aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/block
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@wdc.com>2018-03-07 17:10:03 -0800
committerJens Axboe <axboe@kernel.dk>2018-03-08 14:13:48 -0700
commitf78bac2c8e69144781e271d9771bae8dbb4e7098 (patch)
tree6897788655ef507c7a0c158fde32e2d15dadf539 /block
parentblock: Reorder the queue flag manipulation function definitions (diff)
downloadwireguard-linux-f78bac2c8e69144781e271d9771bae8dbb4e7098.tar.xz
wireguard-linux-f78bac2c8e69144781e271d9771bae8dbb4e7098.zip
block: Use the queue_flag_*() functions instead of open-coding these
Except for changing the atomic queue flag manipulations that are protected by the queue lock into non-atomic manipulations, this patch does not change any functionality. Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Ming Lei <ming.lei@redhat.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-settings.c4
-rw-r--r--block/blk-stat.c6
4 files changed, 7 insertions, 7 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 6febc69a58aa..241b73088617 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -994,7 +994,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
* registered by blk_register_queue().
*/
q->bypass_depth = 1;
- __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+ queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
init_waitqueue_head(&q->mq_freeze_wq);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 75336848f7a7..e70cc7d48f58 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2678,7 +2678,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
if (!(set->flags & BLK_MQ_F_SG_MERGE))
- q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
+ queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
q->sg_reserved_size = INT_MAX;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 48ebe6be07b7..7f719da0eadd 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -861,9 +861,9 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
spin_lock_irq(q->queue_lock);
if (queueable)
- clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
else
- set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+ queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 28003bf9941c..b664aa6df725 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -152,7 +152,7 @@ void blk_stat_add_callback(struct request_queue *q,
spin_lock(&q->stats->lock);
list_add_tail_rcu(&cb->list, &q->stats->callbacks);
- set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
+ queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock(&q->stats->lock);
}
EXPORT_SYMBOL_GPL(blk_stat_add_callback);
@@ -163,7 +163,7 @@ void blk_stat_remove_callback(struct request_queue *q,
spin_lock(&q->stats->lock);
list_del_rcu(&cb->list);
if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
- clear_bit(QUEUE_FLAG_STATS, &q->queue_flags);
+ queue_flag_clear(QUEUE_FLAG_STATS, q);
spin_unlock(&q->stats->lock);
del_timer_sync(&cb->timer);
@@ -191,7 +191,7 @@ void blk_stat_enable_accounting(struct request_queue *q)
{
spin_lock(&q->stats->lock);
q->stats->enable_accounting = true;
- set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
+ queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock(&q->stats->lock);
}