aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq-tag.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:46:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-02 13:46:35 -0700
commitc013d0af81f60cc7dbe357c4e2a925fb6738dbfe (patch)
tree171dfdf928d0450a3fa98a58b2297d857804bb35 /block/blk-mq-tag.c
parentMerge tag 'for-5.20/io_uring-zerocopy-send-2022-07-29' of git://git.kernel.dk/linux-block (diff)
parentublk_drv: fix double shift bug (diff)
downloadlinux-dev-c013d0af81f60cc7dbe357c4e2a925fb6738dbfe.tar.xz
linux-dev-c013d0af81f60cc7dbe357c4e2a925fb6738dbfe.zip
Merge tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: - Improve the type checking of request flags (Bart) - Ensure queue mapping for a single queues always picks the right queue (Bart) - Sanitize the io priority handling (Jan) - rq-qos race fix (Jinke) - Reserved tags handling improvements (John) - Separate memory alignment from file/disk offset aligment for O_DIRECT (Keith) - Add new ublk driver, userspace block driver using io_uring for communication with the userspace backend (Ming) - Use try_cmpxchg() to cleanup the code in various spots (Uros) - Finally remove bdevname() (Christoph) - Clean up the zoned device handling (Christoph) - Clean up independent access range support (Christoph) - Clean up and improve block sysfs handling (Christoph) - Clean up and improve teardown of block devices. This turns the usual two step process into something that is simpler to implement and handle in block drivers (Christoph) - Clean up chunk size handling (Christoph) - Misc cleanups and fixes (Bart, Bo, Dan, GuoYong, Jason, Keith, Liu, Ming, Sebastian, Yang, Ying) * tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block: (178 commits) ublk_drv: fix double shift bug ublk_drv: make sure that correct flags(features) returned to userspace ublk_drv: fix error handling of ublk_add_dev ublk_drv: fix lockdep warning block: remove __blk_get_queue block: call blk_mq_exit_queue from disk_release for never added disks blk-mq: fix error handling in __blk_mq_alloc_disk ublk: defer disk allocation ublk: rewrite ublk_ctrl_get_queue_affinity to not rely on hctx->cpumask ublk: fold __ublk_create_dev into ublk_ctrl_add_dev ublk: cleanup ublk_ctrl_uring_cmd ublk: simplify ublk_ch_open and ublk_ch_release ublk: remove the empty open and release block device operations ublk: remove UBLK_IO_F_PREFLUSH ublk: add a MAINTAINERS entry block: don't allow the same type rq_qos add more than once mmc: fix disk/queue leak in case of adding disk failure ublk_drv: fix an IS_ERR() vs NULL check ublk: remove UBLK_IO_F_INTEGRITY ublk_drv: remove unneeded semicolon ...
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r--block/blk-mq-tag.c31
1 files changed, 12 insertions, 19 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 2dcd738c6952..8e3b36d1cb57 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -37,29 +37,25 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
* to get tag when first time, the other shared-tag users could reserve
* budget for it.
*/
-bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
unsigned int users;
if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue;
- if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
- test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) {
- return true;
- }
+ if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
+ return;
+ set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags);
} else {
- if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
- test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) {
- return true;
- }
+ if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+ return;
+ set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
}
users = atomic_inc_return(&hctx->tags->active_queues);
blk_mq_update_wake_batch(hctx->tags, users);
-
- return true;
}
/*
@@ -266,7 +262,6 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
struct blk_mq_hw_ctx *hctx = iter_data->hctx;
struct request_queue *q = iter_data->q;
struct blk_mq_tag_set *set = q->tag_set;
- bool reserved = iter_data->reserved;
struct blk_mq_tags *tags;
struct request *rq;
bool ret = true;
@@ -276,7 +271,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
else
tags = hctx->tags;
- if (!reserved)
+ if (!iter_data->reserved)
bitnr += tags->nr_reserved_tags;
/*
* We can hit rq == NULL here, because the tagging functions
@@ -287,7 +282,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
return true;
if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
- ret = iter_data->fn(rq, iter_data->data, reserved);
+ ret = iter_data->fn(rq, iter_data->data);
blk_mq_put_rq_ref(rq);
return ret;
}
@@ -337,12 +332,11 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
struct bt_tags_iter_data *iter_data = data;
struct blk_mq_tags *tags = iter_data->tags;
- bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
struct request *rq;
bool ret = true;
bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
- if (!reserved)
+ if (!(iter_data->flags & BT_TAG_ITER_RESERVED))
bitnr += tags->nr_reserved_tags;
/*
@@ -358,7 +352,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
blk_mq_request_started(rq))
- ret = iter_data->fn(rq, iter_data->data, reserved);
+ ret = iter_data->fn(rq, iter_data->data);
if (!iter_static_rqs)
blk_mq_put_rq_ref(rq);
return ret;
@@ -448,8 +442,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
-static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
- void *data, bool reserved)
+static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data)
{
unsigned *count = data;