aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-15 12:22:51 -0700
committerJens Axboe <axboe@kernel.dk>2018-11-16 08:34:06 -0700
commit344e9ffcbd1898e1dc04085564a6e05c30ea8199 (patch)
treeba71320bc66d1158790acf1cdeedd21d2da9dead /block/blk-cgroup.c
parentnvme: provide optimized poll function for separate poll queues (diff)
downloadlinux-dev-344e9ffcbd1898e1dc04085564a6e05c30ea8199.tar.xz
linux-dev-344e9ffcbd1898e1dc04085564a6e05c30ea8199.zip
block: add queue_is_mq() helper
Various spots check for q->mq_ops being non-NULL, but provide a helper to do this instead. Where the ->mq_ops != NULL check is redundant, remove it. Since mq == rq-based now that legacy is gone, get rid of the queue_is_rq_based() and just use queue_is_mq() everywhere. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0f6b44614165..63d226a084cd 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1324,7 +1324,7 @@ int blkcg_activate_policy(struct request_queue *q,
if (blkcg_policy_enabled(q, pol))
return 0;
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_freeze_queue(q);
pd_prealloc:
if (!pd_prealloc) {
@@ -1363,7 +1363,7 @@ pd_prealloc:
spin_unlock_irq(&q->queue_lock);
out_bypass_end:
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
@@ -1387,7 +1387,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (!blkcg_policy_enabled(q, pol))
return;
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_freeze_queue(q);
spin_lock_irq(&q->queue_lock);
@@ -1405,7 +1405,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
spin_unlock_irq(&q->queue_lock);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);