aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-10-20 15:12:13 +0200
committerJens Axboe <axboe@fb.com>2016-10-28 08:45:17 -0600
commite806402130c9c494e22c73ae9ead4e79d2a5811c (patch)
treebac59e1eb3f1b5945409bd0780a4824e9b8383f8 /block/blk-mq.c
parentblock: replace REQ_THROTTLED with a bio flag (diff)
downloadwireguard-linux-e806402130c9c494e22c73ae9ead4e79d2a5811c.tar.xz
wireguard-linux-e806402130c9c494e22c73ae9ead4e79d2a5811c.zip
block: split out request-only flags into a new namespace
A lot of the REQ_* flags are only used on struct requests, and only of use to the block layer and a few drivers that dig into struct request internals. This patch adds a new req_flags_t rq_flags field to struct request for them, and thus dramatically shrinks the number of common requests. It also removes the unfortunate situation where we have to fit the fields from the same enum into 32 bits for struct bio and 64 bits for struct request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Shaun Tancheff <shaun.tancheff@seagate.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d74a74a9f9ef..b49c6658eb05 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -142,14 +142,13 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
struct request *rq, int op,
unsigned int op_flags)
{
- if (blk_queue_io_stat(q))
- op_flags |= REQ_IO_STAT;
-
INIT_LIST_HEAD(&rq->queuelist);
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = q;
rq->mq_ctx = ctx;
req_set_op_attrs(rq, op, op_flags);
+ if (blk_queue_io_stat(q))
+ rq->rq_flags |= RQF_IO_STAT;
/* do not touch atomic flags, it needs atomic ops against the timer */
rq->cpu = -1;
INIT_HLIST_NODE(&rq->hash);
@@ -198,7 +197,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
rq = data->hctx->tags->rqs[tag];
if (blk_mq_tag_busy(data->hctx)) {
- rq->cmd_flags = REQ_MQ_INFLIGHT;
+ rq->rq_flags = RQF_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
}
@@ -298,9 +297,9 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
const int tag = rq->tag;
struct request_queue *q = rq->q;
- if (rq->cmd_flags & REQ_MQ_INFLIGHT)
+ if (rq->rq_flags & RQF_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
- rq->cmd_flags = 0;
+ rq->rq_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
blk_mq_put_tag(hctx, ctx, tag);
@@ -489,10 +488,10 @@ static void blk_mq_requeue_work(struct work_struct *work)
spin_unlock_irqrestore(&q->requeue_lock, flags);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
- if (!(rq->cmd_flags & REQ_SOFTBARRIER))
+ if (!(rq->rq_flags & RQF_SOFTBARRIER))
continue;
- rq->cmd_flags &= ~REQ_SOFTBARRIER;
+ rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
blk_mq_insert_request(rq, true, false, false);
}
@@ -519,11 +518,11 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
* We abuse this flag that is otherwise used by the I/O scheduler to
* request head insertation from the workqueue.
*/
- BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
+ BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
spin_lock_irqsave(&q->requeue_lock, flags);
if (at_head) {
- rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->rq_flags |= RQF_SOFTBARRIER;
list_add(&rq->queuelist, &q->requeue_list);
} else {
list_add_tail(&rq->queuelist, &q->requeue_list);