aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/block/blk.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-11-03 05:47:09 -0600
committerJens Axboe <axboe@kernel.dk>2021-11-04 23:20:10 -0600
commit900e080752025f0016128f07c9ed4c50eba3654b (patch)
tree0429d43fed6fcb41ab394987d423bc08a042e730 /block/blk.h
parentblock: make bio_queue_enter() fast-path available inline (diff)
downloadwireguard-linux-900e080752025f0016128f07c9ed4c50eba3654b.tar.xz
wireguard-linux-900e080752025f0016128f07c9ed4c50eba3654b.zip
block: move queue enter logic into blk_mq_submit_bio()
Retain the old logic for the fops based submit, but for our internal blk_mq_submit_bio(), move the queue entering logic into the core function itself. We need to be a bit careful if going into the scheduler, as a scheduler or queue mappings can arbitrarily change before we have entered the queue. Have the bio scheduler mapping do that separately, it's a very cheap operation compared to actually doing merging locking and lookups. Reviewed-by: Christoph Hellwig <hch@lst.de> [axboe: update to check merge post submit_bio_checks() doing remap...] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/block/blk.h b/block/blk.h
index 814d9632d43e..b4fed2033e48 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -56,6 +56,7 @@ void blk_freeze_queue(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
+bool submit_bio_checks(struct bio *bio);
static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
{