diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 63 |
1 files changed, 40 insertions, 23 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 9321767470dc..9e5e0277a4d9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -283,12 +283,9 @@ static void blk_free_queue(struct request_queue *q) * * Decrements the refcount of the request_queue and free it when the refcount * reaches 0. - * - * Context: Can sleep. */ void blk_put_queue(struct request_queue *q) { - might_sleep(); if (refcount_dec_and_test(&q->refs)) blk_free_queue(q); } @@ -573,7 +570,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q, return BLK_STS_NOTSUPP; /* The bio sector must point to the start of a sequential zone */ - if (bio->bi_iter.bi_sector & (bdev_zone_sectors(bio->bi_bdev) - 1) || + if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector) || !bio_zone_is_seq(bio)) return BLK_STS_IOERR; @@ -687,6 +684,18 @@ static void __submit_bio_noacct_mq(struct bio *bio) void submit_bio_noacct_nocheck(struct bio *bio) { + blk_cgroup_bio_start(bio); + blkcg_bio_issue_init(bio); + + if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { + trace_block_bio_queue(bio); + /* + * Now that enqueuing has been traced, we need to trace + * completion as well. + */ + bio_set_flag(bio, BIO_TRACE_COMPLETION); + } + /* * We only want one ->submit_bio to be active at a time, else stack * usage with stacked devices could be a problem. Use current->bio_list @@ -744,12 +753,16 @@ void submit_bio_noacct(struct bio *bio) * Filter flush bio's early so that bio based drivers without flush * support don't have to worry about them. */ - if (op_is_flush(bio->bi_opf) && - !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { - bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); - if (!bio_sectors(bio)) { - status = BLK_STS_OK; + if (op_is_flush(bio->bi_opf)) { + if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE && + bio_op(bio) != REQ_OP_ZONE_APPEND)) goto end_io; + if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { + bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); + if (!bio_sectors(bio)) { + status = BLK_STS_OK; + goto end_io; + } } } @@ -791,17 +804,6 @@ void submit_bio_noacct(struct bio *bio) if (blk_throtl_bio(bio)) return; - - blk_cgroup_bio_start(bio); - blkcg_bio_issue_init(bio); - - if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) { - trace_block_bio_queue(bio); - /* Now that enqueuing has been traced, we need to trace - * completion as well. - */ - bio_set_flag(bio, BIO_TRACE_COMPLETION); - } submit_bio_noacct_nocheck(bio); return; @@ -856,10 +858,16 @@ EXPORT_SYMBOL(submit_bio); */ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) { - struct request_queue *q = bdev_get_queue(bio->bi_bdev); blk_qc_t cookie = READ_ONCE(bio->bi_cookie); + struct block_device *bdev; + struct request_queue *q; int ret = 0; + bdev = READ_ONCE(bio->bi_bdev); + if (!bdev) + return 0; + + q = bdev_get_queue(bdev); if (cookie == BLK_QC_T_NONE || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; @@ -872,7 +880,16 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) */ blk_flush_plug(current->plug, false); - if (bio_queue_enter(bio)) + /* + * We need to be able to enter a frozen queue, similar to how + * timeouts also need to do that. If that is blocked, then we can + * have pending IO when a queue freeze is started, and then the + * wait for the freeze to finish will wait for polled requests to + * timeout as the poller is preventer from entering the queue and + * completing them. As long as we prevent new IO from being queued, + * that should be all that matters. + */ + if (!percpu_ref_tryget(&q->q_usage_counter)) return 0; if (queue_is_mq(q)) { ret = blk_mq_poll(q, cookie, iob, flags); @@ -919,7 +936,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, */ rcu_read_lock(); bio = READ_ONCE(kiocb->private); - if (bio && bio->bi_bdev) + if (bio) ret = bio_poll(bio, iob, flags); rcu_read_unlock(); |