From 59c3d45e487315e6e05a3f2310b61109f8e503e7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 8 Apr 2014 09:15:35 -0600 Subject: block: remove 'q' parameter from kblockd_schedule_*_work() The queue parameter is never used, just get rid of it. Signed-off-by: Jens Axboe --- block/blk-core.c | 6 +++--- block/blk-flush.c | 2 +- block/blk-mq.c | 7 ++----- block/cfq-iosched.c | 2 +- 4 files changed, 7 insertions(+), 10 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 34d7c196338b..f7d2c3335dfa 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2904,14 +2904,14 @@ free_and_out: } EXPORT_SYMBOL_GPL(blk_rq_prep_clone); -int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) +int kblockd_schedule_work(struct work_struct *work) { return queue_work(kblockd_workqueue, work); } EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_delayed_work(struct request_queue *q, - struct delayed_work *dwork, unsigned long delay) +int kblockd_schedule_delayed_work(struct delayed_work *dwork, + unsigned long delay) { return queue_delayed_work(kblockd_workqueue, dwork, delay); } diff --git a/block/blk-flush.c b/block/blk-flush.c index 43e6b4755e9a..77f20458910c 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -144,7 +144,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { INIT_WORK(&rq->mq_flush_work, mq_flush_run); - kblockd_schedule_work(rq->q, &rq->mq_flush_work); + kblockd_schedule_work(&rq->mq_flush_work); return false; } else { if (add_front) diff --git a/block/blk-mq.c b/block/blk-mq.c index 1d2a9bdbee57..9c8f1f4ada7f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -608,11 +608,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) if (!async) __blk_mq_run_hw_queue(hctx); - else { - struct request_queue *q = hctx->queue; - - kblockd_schedule_delayed_work(q, &hctx->delayed_work, 0); - } + else + kblockd_schedule_delayed_work(&hctx->delayed_work, 0); } void blk_mq_run_queues(struct request_queue *q, bool async) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e0985f1955e7..5063a0bd831a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -908,7 +908,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); - kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); + kblockd_schedule_work(&cfqd->unplug_work); } } -- cgit v1.2.3-59-g8ed1b From 8ab14595b6dffecea264dcca2d6d9eea7c59273a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 8 Apr 2014 09:17:40 -0600 Subject: block: add kblockd_schedule_delayed_work_on() Same function as kblockd_schedule_delayed_work(), but allow the caller to pass in a CPU that the work should be executed on. This just directly extends and maps into the workqueue API, and will be used to make the blk-mq mappings more strict. Signed-off-by: Jens Axboe --- block/blk-core.c | 7 +++++++ include/linux/blkdev.h | 1 + 2 files changed, 8 insertions(+) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index f7d2c3335dfa..7af4a4898dcb 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2917,6 +2917,13 @@ int kblockd_schedule_delayed_work(struct delayed_work *dwork, } EXPORT_SYMBOL(kblockd_schedule_delayed_work); +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); +} +EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); + #define PLUG_MAGIC 0x91827364 /** diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2425945d36ab..5a31307c5ded 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1361,6 +1361,7 @@ static inline void put_dev_sector(Sector p) struct work_struct; int kblockd_schedule_work(struct work_struct *work); int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); #ifdef CONFIG_BLK_CGROUP /* -- cgit v1.2.3-59-g8ed1b From e4043dcf30811f5db15181168e2aac172514302a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 9 Apr 2014 10:18:23 -0600 Subject: blk-mq: ensure that hardware queues are always run on the mapped CPUs Instead of providing soft mappings with no guarantees on hardware queues always being run on the right CPU, switch to a hard mapping guarantee that ensure that we always run the hardware queue on (one of, if more) the mapped CPU. Signed-off-by: Jens Axboe --- block/blk-mq.c | 66 ++++++++++++++++++++++++++++++++++++++------------ include/linux/blk-mq.h | 1 + 2 files changed, 52 insertions(+), 15 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 9c8f1f4ada7f..5455ed19de1c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -209,11 +209,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, break; } - blk_mq_put_ctx(ctx); - if (!(gfp & __GFP_WAIT)) + if (gfp & __GFP_WAIT) { + __blk_mq_run_hw_queue(hctx); + blk_mq_put_ctx(ctx); + } else { + blk_mq_put_ctx(ctx); break; + } - __blk_mq_run_hw_queue(hctx); blk_mq_wait_for_tags(hctx->tags); } while (1); @@ -514,6 +517,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) LIST_HEAD(rq_list); int bit, queued; + WARN_ON(!preempt_count()); + if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; @@ -606,10 +611,22 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; - if (!async) + if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) __blk_mq_run_hw_queue(hctx); - else + else if (hctx->queue->nr_hw_queues == 1) kblockd_schedule_delayed_work(&hctx->delayed_work, 0); + else { + unsigned int cpu; + + /* + * It'd be great if the workqueue API had a way to pass + * in a mask and had some smarts for more clever placement + * than the first CPU. Or we could round-robin here. For now, + * just queue on the first CPU. + */ + cpu = cpumask_first(hctx->cpumask); + kblockd_schedule_delayed_work_on(cpu, &hctx->delayed_work, 0); + } } void blk_mq_run_queues(struct request_queue *q, bool async) @@ -623,7 +640,9 @@ void blk_mq_run_queues(struct request_queue *q, bool async) test_bit(BLK_MQ_S_STOPPED, &hctx->state)) continue; + preempt_disable(); blk_mq_run_hw_queue(hctx, async); + preempt_enable(); } } EXPORT_SYMBOL(blk_mq_run_queues); @@ -648,7 +667,10 @@ EXPORT_SYMBOL(blk_mq_stop_hw_queues); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) { clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + + preempt_disable(); __blk_mq_run_hw_queue(hctx); + preempt_enable(); } EXPORT_SYMBOL(blk_mq_start_hw_queue); @@ -662,7 +684,9 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q) continue; clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + preempt_disable(); blk_mq_run_hw_queue(hctx, true); + preempt_enable(); } } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); @@ -672,7 +696,10 @@ static void blk_mq_work_fn(struct work_struct *work) struct blk_mq_hw_ctx *hctx; hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work); + + preempt_disable(); __blk_mq_run_hw_queue(hctx); + preempt_enable(); } static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, @@ -716,10 +743,10 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, spin_unlock(&ctx->lock); } - blk_mq_put_ctx(current_ctx); - if (run_queue) blk_mq_run_hw_queue(hctx, async); + + blk_mq_put_ctx(current_ctx); } static void blk_mq_insert_requests(struct request_queue *q, @@ -755,9 +782,8 @@ static void blk_mq_insert_requests(struct request_queue *q, } spin_unlock(&ctx->lock); - blk_mq_put_ctx(current_ctx); - blk_mq_run_hw_queue(hctx, from_schedule); + blk_mq_put_ctx(current_ctx); } static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) @@ -876,7 +902,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) if (unlikely(is_flush_fua)) { blk_mq_bio_to_request(rq, bio); - blk_mq_put_ctx(ctx); blk_insert_flush(rq); goto run_queue; } @@ -914,7 +939,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) } spin_unlock(&ctx->lock); - blk_mq_put_ctx(ctx); /* * For a SYNC request, send it to the hardware immediately. For an @@ -923,6 +947,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) */ run_queue: blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua); + blk_mq_put_ctx(ctx); } /* @@ -990,9 +1015,9 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); - blk_mq_put_ctx(ctx); blk_mq_run_hw_queue(hctx, true); + blk_mq_put_ctx(ctx); } static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, @@ -1255,12 +1280,13 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, __ctx->queue = q; /* If the cpu isn't online, the cpu is mapped to first hctx */ - hctx = q->mq_ops->map_queue(q, i); - hctx->nr_ctx++; - if (!cpu_online(i)) continue; + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->cpumask); + hctx->nr_ctx++; + /* * Set local node, IFF we have more than one hw queue. If * not, we remain on the home node of the device @@ -1277,6 +1303,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) struct blk_mq_ctx *ctx; queue_for_each_hw_ctx(q, hctx, i) { + cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; } @@ -1285,7 +1312,11 @@ static void blk_mq_map_swqueue(struct request_queue *q) */ queue_for_each_ctx(q, ctx, i) { /* If the cpu isn't online, the cpu is mapped to first hctx */ + if (!cpu_online(i)) + continue; + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->cpumask); ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } @@ -1329,6 +1360,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, if (!hctxs[i]) goto err_hctxs; + if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL)) + goto err_hctxs; + hctxs[i]->numa_node = NUMA_NO_NODE; hctxs[i]->queue_num = i; } @@ -1392,6 +1426,7 @@ err_hctxs: for (i = 0; i < reg->nr_hw_queues; i++) { if (!hctxs[i]) break; + free_cpumask_var(hctxs[i]->cpumask); reg->ops->free_hctx(hctxs[i], i); } kfree(hctxs); @@ -1413,6 +1448,7 @@ void blk_mq_free_queue(struct request_queue *q) blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) q->mq_ops->exit_hctx(hctx, i); + free_cpumask_var(hctx->cpumask); q->mq_ops->free_hctx(hctx, i); } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0120451545d8..b6ee48740458 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -19,6 +19,7 @@ struct blk_mq_hw_ctx { unsigned long state; /* BLK_MQ_S_* flags */ struct delayed_work delayed_work; + cpumask_var_t cpumask; unsigned long flags; /* BLK_MQ_F_* flags */ -- cgit v1.2.3-59-g8ed1b From cb2da43e3d02b745408252fe4c447b23b1f4fc7b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 9 Apr 2014 10:53:21 -0600 Subject: blk-mq: simplify blk_mq_hw_sysfs_cpus_show() Now that we have a cpu mask of CPUs that are mapped to a specific hardware queue, we can just iterate that to display the sysfs num-hw-queue/cpu_list file. Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'block') diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index b0ba264b0522..9176a6984857 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -246,16 +246,12 @@ static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) { - unsigned int i, queue_num, first = 1; + unsigned int i, first = 1; ssize_t ret = 0; blk_mq_disable_hotplug(); - for_each_online_cpu(i) { - queue_num = hctx->queue->mq_map[i]; - if (queue_num != hctx->queue_num) - continue; - + for_each_cpu(i, hctx->cpumask) { if (first) ret += sprintf(ret + page, "%u", i); else -- cgit v1.2.3-59-g8ed1b From 21f9fcd81593e201172160853b8647336fb81f4f Mon Sep 17 00:00:00 2001 From: Duan Jiong Date: Fri, 11 Apr 2014 15:58:56 +0800 Subject: block: replace IS_ERR and PTR_ERR with PTR_ERR_OR_ZERO This patch fixes coccinelle error regarding usage of IS_ERR and PTR_ERR instead of PTR_ERR_OR_ZERO. Signed-off-by: Duan Jiong Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 7af4a4898dcb..c320def0fe81 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1654,7 +1654,7 @@ static int __init fail_make_request_debugfs(void) struct dentry *dir = fault_create_debugfs_attr("fail_make_request", NULL, &fail_make_request); - return IS_ERR(dir) ? PTR_ERR(dir) : 0; + return PTR_ERR_OR_ZERO(dir); } late_initcall(fail_make_request_debugfs); -- cgit v1.2.3-59-g8ed1b From b4f42e2831ff9b9fa19252265d7c8985d47eefb9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Apr 2014 09:46:28 -0600 Subject: block: remove struct request buffer member This was used in the olden days, back when onions were proper yellow. Basically it mapped to the current buffer to be transferred. With highmem being added more than a decade ago, most drivers map pages out of a bio, and rq->buffer isn't pointing at anything valid. Convert old style drivers to just use bio_data(). For the discard payload use case, just reference the page in the bio. Signed-off-by: Jens Axboe --- block/blk-core.c | 21 ++++++--------------- block/blk-map.c | 3 --- drivers/block/amiflop.c | 2 +- drivers/block/ataflop.c | 2 +- drivers/block/floppy.c | 18 +++++++++--------- drivers/block/hd.c | 10 +++++----- drivers/block/mg_disk.c | 12 ++++++------ drivers/block/paride/pcd.c | 2 +- drivers/block/paride/pd.c | 4 ++-- drivers/block/paride/pf.c | 4 ++-- drivers/block/skd_main.c | 5 ++--- drivers/block/swim.c | 2 +- drivers/block/swim3.c | 6 +++--- drivers/block/xen-blkfront.c | 4 ++-- drivers/block/xsysace.c | 4 ++-- drivers/block/z2ram.c | 6 ++++-- drivers/ide/ide-disk.c | 5 ++--- drivers/md/dm.c | 1 - drivers/mtd/mtd_blkdevs.c | 3 +-- drivers/mtd/ubi/block.c | 2 +- drivers/scsi/scsi_lib.c | 3 --- drivers/scsi/sd.c | 10 ++++------ include/linux/blkdev.h | 1 - 23 files changed, 55 insertions(+), 75 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 1fe9ff6e6802..ae6227fd07aa 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -146,8 +146,8 @@ void blk_dump_rq_flags(struct request *rq, char *msg) printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); - printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", - rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); + printk(KERN_INFO " bio %p, biotail %p, len %u\n", + rq->bio, rq->biotail, blk_rq_bytes(rq)); if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { printk(KERN_INFO " cdb: "); @@ -1360,7 +1360,6 @@ void blk_add_request_payload(struct request *rq, struct page *page, rq->__data_len = rq->resid_len = len; rq->nr_phys_segments = 1; - rq->buffer = bio_data(bio); } EXPORT_SYMBOL_GPL(blk_add_request_payload); @@ -1402,12 +1401,6 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bio->bi_next = req->bio; req->bio = bio; - /* - * may not be valid. if the low level driver said - * it didn't need a bounce buffer then it better - * not touch req->buffer either... - */ - req->buffer = bio_data(bio); req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); @@ -2434,7 +2427,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) } req->__data_len -= total_bytes; - req->buffer = bio_data(req->bio); /* update sector only for requests with clear definition of sector */ if (req->cmd_type == REQ_TYPE_FS) @@ -2752,10 +2744,9 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ rq->cmd_flags |= bio->bi_rw & REQ_WRITE; - if (bio_has_data(bio)) { + if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); - rq->buffer = bio_data(bio); - } + rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; @@ -2831,7 +2822,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); /* * Copy attributes of the original request to the clone request. - * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. + * The actual data parts (e.g. ->cmd, ->sense) are not copied. */ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { @@ -2857,7 +2848,7 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) * * Description: * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. - * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) + * The actual data parts of @rq_src (e.g. ->cmd, ->sense) * are not copied, and copying such parts is the caller's responsibility. * Also, pages which the original bios are pointing to are not copied * and the cloned bios just point same pages. diff --git a/block/blk-map.c b/block/blk-map.c index f7b22bc21518..f890d4345b0c 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -155,7 +155,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; - rq->buffer = NULL; return 0; unmap_rq: blk_rq_unmap_user(bio); @@ -238,7 +237,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); - rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_user_iov); @@ -325,7 +323,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, } blk_queue_bounce(q, &rq->bio); - rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_kern); diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 748dea4f34dc..758da2287d9a 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1406,7 +1406,7 @@ next_segment: track = block / (floppy->dtype->sects * floppy->type->sect_mult); sector = block % (floppy->dtype->sects * floppy->type->sect_mult); - data = rq->buffer + 512 * cnt; + data = bio_data(rq->bio) + 512 * cnt; #ifdef DEBUG printk("access to track %d, sector %d, with buffer at " "0x%08lx\n", track, sector, data); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 96b629e1f0c9..7e8a55f8917c 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1484,7 +1484,7 @@ repeat: ReqCnt = 0; ReqCmd = rq_data_dir(fd_request); ReqBlock = blk_rq_pos(fd_request); - ReqBuffer = fd_request->buffer; + ReqBuffer = bio_data(fd_request->bio); setup_req_params( drive ); do_fd_action( drive ); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 8f5565bf34cd..5f69c910c3ac 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2351,7 +2351,7 @@ static void rw_interrupt(void) } if (CT(COMMAND) != FD_READ || - raw_cmd->kernel_data == current_req->buffer) { + raw_cmd->kernel_data == bio_data(current_req->bio)) { /* transfer directly from buffer */ cont->done(1); } else if (CT(COMMAND) == FD_READ) { @@ -2640,7 +2640,7 @@ static int make_raw_rw_request(void) raw_cmd->flags &= ~FD_RAW_WRITE; raw_cmd->flags |= FD_RAW_READ; COMMAND = FM_MODE(_floppy, FD_READ); - } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) { + } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { unsigned long dma_limit; int direct, indirect; @@ -2654,13 +2654,13 @@ static int make_raw_rw_request(void) */ max_size = buffer_chain_size(); dma_limit = (MAX_DMA_ADDRESS - - ((unsigned long)current_req->buffer)) >> 9; + ((unsigned long)bio_data(current_req->bio))) >> 9; if ((unsigned long)max_size > dma_limit) max_size = dma_limit; /* 64 kb boundaries */ - if (CROSS_64KB(current_req->buffer, max_size << 9)) + if (CROSS_64KB(bio_data(current_req->bio), max_size << 9)) max_size = (K_64 - - ((unsigned long)current_req->buffer) % + ((unsigned long)bio_data(current_req->bio)) % K_64) >> 9; direct = transfer_size(ssize, max_sector, max_size) - fsector_t; /* @@ -2677,7 +2677,7 @@ static int make_raw_rw_request(void) (DP->read_track & (1 << DRS->probed_format)))))) { max_size = blk_rq_sectors(current_req); } else { - raw_cmd->kernel_data = current_req->buffer; + raw_cmd->kernel_data = bio_data(current_req->bio); raw_cmd->length = current_count_sectors << 9; if (raw_cmd->length == 0) { DPRINT("%s: zero dma transfer attempted\n", __func__); @@ -2731,7 +2731,7 @@ static int make_raw_rw_request(void) raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; raw_cmd->length <<= 9; if ((raw_cmd->length < current_count_sectors << 9) || - (raw_cmd->kernel_data != current_req->buffer && + (raw_cmd->kernel_data != bio_data(current_req->bio) && CT(COMMAND) == FD_WRITE && (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || aligned_sector_t < buffer_min)) || @@ -2739,7 +2739,7 @@ static int make_raw_rw_request(void) raw_cmd->length <= 0 || current_count_sectors <= 0) { DPRINT("fractionary current count b=%lx s=%lx\n", raw_cmd->length, current_count_sectors); - if (raw_cmd->kernel_data != current_req->buffer) + if (raw_cmd->kernel_data != bio_data(current_req->bio)) pr_info("addr=%d, length=%ld\n", (int)((raw_cmd->kernel_data - floppy_track_buffer) >> 9), @@ -2756,7 +2756,7 @@ static int make_raw_rw_request(void) return 0; } - if (raw_cmd->kernel_data != current_req->buffer) { + if (raw_cmd->kernel_data != bio_data(current_req->bio)) { if (raw_cmd->kernel_data < floppy_track_buffer || current_count_sectors < 0 || raw_cmd->length < 0 || diff --git a/drivers/block/hd.c b/drivers/block/hd.c index bf397bf108b7..8a290c08262f 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -464,11 +464,11 @@ static void read_intr(void) ok_to_read: req = hd_req; - insw(HD_DATA, req->buffer, 256); + insw(HD_DATA, bio_data(req->bio), 256); #ifdef DEBUG printk("%s: read: sector %ld, remaining = %u, buffer=%p\n", req->rq_disk->disk_name, blk_rq_pos(req) + 1, - blk_rq_sectors(req) - 1, req->buffer+512); + blk_rq_sectors(req) - 1, bio_data(req->bio)+512); #endif if (hd_end_request(0, 512)) { SET_HANDLER(&read_intr); @@ -505,7 +505,7 @@ static void write_intr(void) ok_to_write: if (hd_end_request(0, 512)) { SET_HANDLER(&write_intr); - outsw(HD_DATA, req->buffer, 256); + outsw(HD_DATA, bio_data(req->bio), 256); return; } @@ -624,7 +624,7 @@ repeat: printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n", req->rq_disk->disk_name, req_data_dir(req) == READ ? "read" : "writ", - cyl, head, sec, nsect, req->buffer); + cyl, head, sec, nsect, bio_data(req->bio)); #endif if (req->cmd_type == REQ_TYPE_FS) { switch (rq_data_dir(req)) { @@ -643,7 +643,7 @@ repeat: bad_rw_intr(); goto repeat; } - outsw(HD_DATA, req->buffer, 256); + outsw(HD_DATA, bio_data(req->bio), 256); break; default: printk("unknown hd-command\n"); diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index eb59b1241366..e352cac707e8 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -479,7 +479,7 @@ static unsigned int mg_out(struct mg_host *host, static void mg_read_one(struct mg_host *host, struct request *req) { - u16 *buff = (u16 *)req->buffer; + u16 *buff = (u16 *)bio_data(req->bio); u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) @@ -496,7 +496,7 @@ static void mg_read(struct request *req) mg_bad_rw_intr(host); MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", - blk_rq_sectors(req), blk_rq_pos(req), req->buffer); + blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio)); do { if (mg_wait(host, ATA_DRQ, @@ -514,7 +514,7 @@ static void mg_read(struct request *req) static void mg_write_one(struct mg_host *host, struct request *req) { - u16 *buff = (u16 *)req->buffer; + u16 *buff = (u16 *)bio_data(req->bio); u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) @@ -534,7 +534,7 @@ static void mg_write(struct request *req) } MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", - rem, blk_rq_pos(req), req->buffer); + rem, blk_rq_pos(req), bio_data(req->bio)); if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { @@ -585,7 +585,7 @@ ok_to_read: mg_read_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", - blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); + blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio)); /* send read confirm */ outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); @@ -624,7 +624,7 @@ ok_to_write: /* write 1 sector and set handler if remains */ mg_write_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", - blk_rq_pos(req), blk_rq_sectors(req), req->buffer); + blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio)); host->mg_do_intr = mg_write_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index e76bdc074dbe..719cb1bc1640 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -747,7 +747,7 @@ static void do_pcd_request(struct request_queue * q) pcd_current = cd; pcd_sector = blk_rq_pos(pcd_req); pcd_count = blk_rq_cur_sectors(pcd_req); - pcd_buf = pcd_req->buffer; + pcd_buf = bio_data(pcd_req->bio); pcd_busy = 1; ps_set_intr(do_pcd_read, NULL, 0, nice); return; diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 19ad8f0c83ef..fea7e76a00de 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -454,7 +454,7 @@ static enum action do_pd_io_start(void) if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) return Fail; pd_run = blk_rq_sectors(pd_req); - pd_buf = pd_req->buffer; + pd_buf = bio_data(pd_req->bio); pd_retries = 0; if (pd_cmd == READ) return do_pd_read_start(); @@ -485,7 +485,7 @@ static int pd_next_buf(void) spin_lock_irqsave(&pd_lock, saved_flags); __blk_end_request_cur(pd_req, 0); pd_count = blk_rq_cur_sectors(pd_req); - pd_buf = pd_req->buffer; + pd_buf = bio_data(pd_req->bio); spin_unlock_irqrestore(&pd_lock, saved_flags); return 0; } diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index f5c86d523ba0..9a15fd3c9349 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -795,7 +795,7 @@ repeat: } pf_cmd = rq_data_dir(pf_req); - pf_buf = pf_req->buffer; + pf_buf = bio_data(pf_req->bio); pf_retries = 0; pf_busy = 1; @@ -827,7 +827,7 @@ static int pf_next_buf(void) if (!pf_req) return 1; pf_count = blk_rq_cur_sectors(pf_req); - pf_buf = pf_req->buffer; + pf_buf = bio_data(pf_req->bio); } return 0; } diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index a69dd93d1bd5..36bcedfd930c 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -563,7 +563,6 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, req = skreq->req; blk_add_request_payload(req, page, len); - req->buffer = buf; } static void skd_request_fn_not_online(struct request_queue *q); @@ -856,10 +855,10 @@ static void skd_end_request(struct skd_device *skdev, if ((io_flags & REQ_DISCARD) && (skreq->discard_page == 1)) { + struct bio *bio = req->bio; pr_debug("%s:%s:%d, free the page!", skdev->name, __func__, __LINE__); - free_page((unsigned long)req->buffer); - req->buffer = NULL; + __free_page(bio->bi_io_vec->bv_page); } if (unlikely(error)) { diff --git a/drivers/block/swim.c b/drivers/block/swim.c index b02d53a399f3..6b44bbe528b7 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -549,7 +549,7 @@ static void redo_fd_request(struct request_queue *q) case READ: err = floppy_read_sectors(fs, blk_rq_pos(req), blk_rq_cur_sectors(req), - req->buffer); + bio_data(req->bio)); break; } done: diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index c74f7b56e7c4..523ee8fd4c15 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -342,7 +342,7 @@ static void start_request(struct floppy_state *fs) swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", req->rq_disk->disk_name, req->cmd, (long)blk_rq_pos(req), blk_rq_sectors(req), - req->buffer); + bio_data(req->bio)); swim3_dbg(" errors=%d current_nr_sectors=%u\n", req->errors, blk_rq_cur_sectors(req)); #endif @@ -479,11 +479,11 @@ static inline void setup_transfer(struct floppy_state *fs) /* Set up 3 dma commands: write preamble, data, postamble */ init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); ++cp; - init_dma(cp, OUTPUT_MORE, req->buffer, 512); + init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512); ++cp; init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); } else { - init_dma(cp, INPUT_LAST, req->buffer, n * 512); + init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512); } ++cp; out_le16(&cp->command, DBDMA_STOP); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index efe1b4761735..283a30e88287 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -612,10 +612,10 @@ static void do_blkif_request(struct request_queue *rq) } pr_debug("do_blk_req %p: cmd %p, sec %lx, " - "(%u/%u) buffer:%p [%s]\n", + "(%u/%u) [%s]\n", req, req->cmd, (unsigned long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), - req->buffer, rq_data_dir(req) ? "write" : "read"); + rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 1393b8871a28..ab3ea62e5dfc 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -661,7 +661,7 @@ static void ace_fsm_dostate(struct ace_device *ace) rq_data_dir(req)); ace->req = req; - ace->data_ptr = req->buffer; + ace->data_ptr = bio_data(req->bio); ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); @@ -733,7 +733,7 @@ static void ace_fsm_dostate(struct ace_device *ace) * blk_rq_sectors(ace->req), * blk_rq_cur_sectors(ace->req)); */ - ace->data_ptr = ace->req->buffer; + ace->data_ptr = bio_data(ace->req->bio); ace->data_count = blk_rq_cur_sectors(ace->req) * 16; ace_fsm_yieldirq(ace); break; diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 27de5046708a..968f9e52effa 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -87,13 +87,15 @@ static void do_z2_request(struct request_queue *q) while (len) { unsigned long addr = start & Z2RAM_CHUNKMASK; unsigned long size = Z2RAM_CHUNKSIZE - addr; + void *buffer = bio_data(req->bio); + if (len < size) size = len; addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ]; if (rq_data_dir(req) == READ) - memcpy(req->buffer, (char *)addr, size); + memcpy(buffer, (char *)addr, size); else - memcpy((char *)addr, req->buffer, size); + memcpy((char *)addr, buffer, size); start += size; len -= size; } diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 16f69be820c7..ee880382e3bc 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -188,10 +188,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, ledtrig_ide_activity(); - pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n", + pr_debug("%s: %sing: block=%llu, sectors=%u\n", drive->name, rq_data_dir(rq) == READ ? "read" : "writ", - (unsigned long long)block, blk_rq_sectors(rq), - (unsigned long)rq->buffer); + (unsigned long long)block, blk_rq_sectors(rq)); if (hwif->rw_disk) hwif->rw_disk(drive, rq); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 455e64916498..6a71bc7c9133 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1544,7 +1544,6 @@ static int setup_clone(struct request *clone, struct request *rq, clone->cmd = rq->cmd; clone->cmd_len = rq->cmd_len; clone->sense = rq->sense; - clone->buffer = rq->buffer; clone->end_io = end_clone_request; clone->end_io_data = tio; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 0b2ccb68c0d0..4dbfaee9aa95 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -82,8 +82,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, block = blk_rq_pos(req) << 9 >> tr->blkshift; nsect = blk_rq_cur_bytes(req) >> tr->blkshift; - - buf = req->buffer; + buf = bio_data(req->bio); if (req->cmd_type != REQ_TYPE_FS) return -EIO; diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 7ff473c871a9..ee774ba3728d 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -253,7 +253,7 @@ static int do_ubiblock_request(struct ubiblock *dev, struct request *req) * flash access anyway. */ mutex_lock(&dev->dev_mutex); - ret = ubiblock_read(dev, req->buffer, sec, len); + ret = ubiblock_read(dev, bio_data(req->bio), sec, len); mutex_unlock(&dev->dev_mutex); return ret; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0f3bddcb6b1a..3cc82d3dec78 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1018,8 +1018,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, return BLKPREP_DEFER; } - req->buffer = NULL; - /* * Next, walk the list, and fill in the addresses and sizes of * each segment. @@ -1156,7 +1154,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) BUG_ON(blk_rq_bytes(req)); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - req->buffer = NULL; } cmd->cmd_len = req->cmd_len; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index efcbcd182863..06d154d20faa 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -739,14 +739,11 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); - rq->buffer = page_address(page); rq->__data_len = nr_bytes; out: - if (ret != BLKPREP_OK) { + if (ret != BLKPREP_OK) __free_page(page); - rq->buffer = NULL; - } return ret; } @@ -843,8 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) struct scsi_cmnd *SCpnt = rq->special; if (rq->cmd_flags & REQ_DISCARD) { - free_page((unsigned long)rq->buffer); - rq->buffer = NULL; + struct bio *bio = rq->bio; + + __free_page(bio->bi_io_vec->bv_page); } if (SCpnt->cmnd != rq->cmd) { mempool_free(SCpnt->cmnd, sd_cdb_pool); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 86a8df13a5fe..eb5e94803892 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -178,7 +178,6 @@ struct request { unsigned short ioprio; void *special; /* opaque pointer available for LLD use */ - char *buffer; /* kaddr of the current segment if available */ int tag; int errors; -- cgit v1.2.3-59-g8ed1b From 742ee69b92d9559e968f70cf0a49157866f8fbef Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Apr 2014 10:30:06 +0200 Subject: blk-mq: initialize resid_len Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 5455ed19de1c..658428a28faf 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -353,6 +353,8 @@ static void blk_mq_start_request(struct request *rq, bool last) trace_block_rq_issue(q, rq); + rq->resid_len = blk_rq_bytes(rq); + /* * Just mark start time and set the started bit. Due to memory * ordering, we know we'll see the correct deadline as long as -- cgit v1.2.3-59-g8ed1b From 9d74e25737d73e93ccddeb5a61bcd56b7b8eb57b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Apr 2014 10:30:07 +0200 Subject: blk-mq: do not initialize req->special Drivers can reach their private data easily using the blk_mq_rq_to_pdu helper and don't need req->special. By not initializing it code can be simplified nicely, and we also shave off a few more instructions from the I/O path. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-flush.c | 10 ++-------- block/blk-mq.c | 15 ++------------- block/blk-mq.h | 1 - drivers/block/null_blk.c | 4 ++-- drivers/block/virtio_blk.c | 6 +++--- 5 files changed, 9 insertions(+), 27 deletions(-) (limited to 'block') diff --git a/block/blk-flush.c b/block/blk-flush.c index 77f20458910c..0e42adcfb55e 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -306,22 +306,16 @@ static bool blk_kick_flush(struct request_queue *q) */ q->flush_pending_idx ^= 1; + blk_rq_init(q, q->flush_rq); if (q->mq_ops) { - struct blk_mq_ctx *ctx = first_rq->mq_ctx; - struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); - - blk_mq_rq_init(hctx, q->flush_rq); - q->flush_rq->mq_ctx = ctx; - /* * Reuse the tag value from the fist waiting request, * with blk-mq the tag is generated during request * allocation and drivers can rely on it being inside * the range they asked for. */ + q->flush_rq->mq_ctx = first_rq->mq_ctx; q->flush_rq->tag = first_rq->tag; - } else { - blk_rq_init(q, q->flush_rq); } q->flush_rq->cmd_type = REQ_TYPE_FS; diff --git a/block/blk-mq.c b/block/blk-mq.c index 658428a28faf..d9d0984d2f01 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -251,24 +251,13 @@ struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, } EXPORT_SYMBOL(blk_mq_alloc_reserved_request); -/* - * Re-init and set pdu, if we have it - */ -void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) -{ - blk_rq_init(hctx->queue, rq); - - if (hctx->cmd_size) - rq->special = blk_mq_rq_to_pdu(rq); -} - static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct request *rq) { const int tag = rq->tag; struct request_queue *q = rq->q; - blk_mq_rq_init(hctx, rq); + blk_rq_init(hctx->queue, rq); blk_mq_put_tag(hctx->tags, tag); blk_mq_queue_exit(q); @@ -1165,7 +1154,7 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, left -= to_do * rq_size; for (j = 0; j < to_do; j++) { hctx->rqs[i] = p; - blk_mq_rq_init(hctx, hctx->rqs[i]); + blk_rq_init(hctx->queue, hctx->rqs[i]); p += rq_size; i++; } diff --git a/block/blk-mq.h b/block/blk-mq.h index ebbe6bac9d61..238379a612e4 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -27,7 +27,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_init_flush(struct request_queue *q); void blk_mq_drain_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); -void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq); /* * CPU hotplug helpers diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 091b9ea14feb..71df69d90900 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -226,7 +226,7 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd) static void null_softirq_done_fn(struct request *rq) { - end_cmd(rq->special); + end_cmd(blk_mq_rq_to_pdu(rq)); } static inline void null_handle_cmd(struct nullb_cmd *cmd) @@ -311,7 +311,7 @@ static void null_request_fn(struct request_queue *q) static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct nullb_cmd *cmd = rq->special; + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); cmd->rq = rq; cmd->nq = hctx->driver_data; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6d8a87f252de..c7d02bc9d945 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -112,7 +112,7 @@ static int __virtblk_add_req(struct virtqueue *vq, static inline void virtblk_request_done(struct request *req) { - struct virtblk_req *vbr = req->special; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); int error = virtblk_result(vbr); if (req->cmd_type == REQ_TYPE_BLOCK_PC) { @@ -154,7 +154,7 @@ static void virtblk_done(struct virtqueue *vq) static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) { struct virtio_blk *vblk = hctx->queue->queuedata; - struct virtblk_req *vbr = req->special; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; unsigned int num; const bool last = (req->cmd_flags & REQ_END) != 0; @@ -501,7 +501,7 @@ static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, struct request *rq, unsigned int nr) { struct virtio_blk *vblk = data; - struct virtblk_req *vbr = rq->special; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); sg_init_table(vbr->sg, vblk->sg_elems); return 0; -- cgit v1.2.3-59-g8ed1b From 8727af4b9d45c7503042e3fbd926c1a173876e9c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Apr 2014 10:30:08 +0200 Subject: blk-mq: make ->flush_rq fully transparent to drivers Drivers shouldn't have to care about the block layer setting aside a request to implement the flush state machine. We already override the mq context and tag to make it more transparent, but so far haven't deal with the driver private data in the request. Make sure to override this as well, and while we're at it add a proper helper sitting in blk-mq.c that implements the full impersonation. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-flush.c | 12 ++---------- block/blk-mq.c | 20 ++++++++++++++++++++ block/blk-mq.h | 2 ++ 3 files changed, 24 insertions(+), 10 deletions(-) (limited to 'block') diff --git a/block/blk-flush.c b/block/blk-flush.c index 0e42adcfb55e..c41fc19f75d1 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -307,16 +307,8 @@ static bool blk_kick_flush(struct request_queue *q) q->flush_pending_idx ^= 1; blk_rq_init(q, q->flush_rq); - if (q->mq_ops) { - /* - * Reuse the tag value from the fist waiting request, - * with blk-mq the tag is generated during request - * allocation and drivers can rely on it being inside - * the range they asked for. - */ - q->flush_rq->mq_ctx = first_rq->mq_ctx; - q->flush_rq->tag = first_rq->tag; - } + if (q->mq_ops) + blk_mq_clone_flush_request(q->flush_rq, first_rq); q->flush_rq->cmd_type = REQ_TYPE_FS; q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; diff --git a/block/blk-mq.c b/block/blk-mq.c index d9d0984d2f01..e644feec068c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -275,6 +275,26 @@ void blk_mq_free_request(struct request *rq) __blk_mq_free_request(hctx, ctx, rq); } +/* + * Clone all relevant state from a request that has been put on hold in + * the flush state machine into the preallocated flush request that hangs + * off the request queue. + * + * For a driver the flush request should be invisible, that's why we are + * impersonating the original request here. + */ +void blk_mq_clone_flush_request(struct request *flush_rq, + struct request *orig_rq) +{ + struct blk_mq_hw_ctx *hctx = + orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu); + + flush_rq->mq_ctx = orig_rq->mq_ctx; + flush_rq->tag = orig_rq->tag; + memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq), + hctx->cmd_size); +} + bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) { if (blk_update_request(rq, error, blk_rq_bytes(rq))) diff --git a/block/blk-mq.h b/block/blk-mq.h index 238379a612e4..7964dadb7d64 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -27,6 +27,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_init_flush(struct request_queue *q); void blk_mq_drain_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); +void blk_mq_clone_flush_request(struct request *flush_rq, + struct request *orig_rq); /* * CPU hotplug helpers -- cgit v1.2.3-59-g8ed1b From e9b267d91f6ddbc694cb40aa962b0b2cec03971d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Apr 2014 13:59:10 -0600 Subject: blk-mq: add ->init_request and ->exit_request methods The current blk_mq_init_commands/blk_mq_free_commands interface has a two problems: 1) Because only the constructor is passed to blk_mq_init_commands there is no easy way to clean up when a comman initialization failed. The current code simply leaks the allocations done in the constructor. 2) There is no good place to call blk_mq_free_commands: before blk_cleanup_queue there is no guarantee that all outstanding commands have completed, so we can't free them yet. After blk_cleanup_queue the queue has usually been freed. This can be worked around by grabbing an unconditional reference before calling blk_cleanup_queue and dropping it after blk_mq_free_commands is done, although that's not exatly pretty and driver writers are guaranteed to get it wrong sooner or later. Both issues are easily fixed by making the request constructor and destructor normal blk_mq_ops methods. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 105 ++++++++++++++------------------------------- drivers/block/virtio_blk.c | 23 +++++----- include/linux/blk-mq.h | 14 +++++- 3 files changed, 55 insertions(+), 87 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index e644feec068c..48d2d8495f5e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1031,74 +1031,20 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_put_ctx(ctx); } -static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, - int (*init)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) +static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx, void *driver_data) { - unsigned int i; - int ret = 0; - - for (i = 0; i < hctx->queue_depth; i++) { - struct request *rq = hctx->rqs[i]; - - ret = init(data, hctx, rq, i); - if (ret) - break; - } - - return ret; -} - -int blk_mq_init_commands(struct request_queue *q, - int (*init)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) -{ - struct blk_mq_hw_ctx *hctx; - unsigned int i; - int ret = 0; - - queue_for_each_hw_ctx(q, hctx, i) { - ret = blk_mq_init_hw_commands(hctx, init, data); - if (ret) - break; - } - - return ret; -} -EXPORT_SYMBOL(blk_mq_init_commands); - -static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx, - void (*free)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) -{ - unsigned int i; + struct page *page; - for (i = 0; i < hctx->queue_depth; i++) { - struct request *rq = hctx->rqs[i]; + if (hctx->rqs && hctx->queue->mq_ops->exit_request) { + int i; - free(data, hctx, rq, i); + for (i = 0; i < hctx->queue_depth; i++) { + if (!hctx->rqs[i]) + continue; + hctx->queue->mq_ops->exit_request(driver_data, hctx, + hctx->rqs[i], i); + } } -} - -void blk_mq_free_commands(struct request_queue *q, - void (*free)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) -{ - struct blk_mq_hw_ctx *hctx; - unsigned int i; - - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_free_hw_commands(hctx, free, data); -} -EXPORT_SYMBOL(blk_mq_free_commands); - -static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx) -{ - struct page *page; while (!list_empty(&hctx->page_list)) { page = list_first_entry(&hctx->page_list, struct page, lru); @@ -1123,10 +1069,12 @@ static size_t order_to_size(unsigned int order) } static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, - unsigned int reserved_tags, int node) + struct blk_mq_reg *reg, void *driver_data, int node) { + unsigned int reserved_tags = reg->reserved_tags; unsigned int i, j, entries_per_page, max_order = 4; size_t rq_size, left; + int error; INIT_LIST_HEAD(&hctx->page_list); @@ -1175,14 +1123,23 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, for (j = 0; j < to_do; j++) { hctx->rqs[i] = p; blk_rq_init(hctx->queue, hctx->rqs[i]); + if (reg->ops->init_request) { + error = reg->ops->init_request(driver_data, + hctx, hctx->rqs[i], i); + if (error) + goto err_rq_map; + } + p += rq_size; i++; } } - if (i < (reserved_tags + BLK_MQ_TAG_MIN)) + if (i < (reserved_tags + BLK_MQ_TAG_MIN)) { + error = -ENOMEM; goto err_rq_map; - else if (i != hctx->queue_depth) { + } + if (i != hctx->queue_depth) { hctx->queue_depth = i; pr_warn("%s: queue depth set to %u because of low memory\n", __func__, i); @@ -1190,12 +1147,14 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node); if (!hctx->tags) { -err_rq_map: - blk_mq_free_rq_map(hctx); - return -ENOMEM; + error = -ENOMEM; + goto err_rq_map; } return 0; +err_rq_map: + blk_mq_free_rq_map(hctx, driver_data); + return error; } static int blk_mq_init_hw_queues(struct request_queue *q, @@ -1228,7 +1187,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, blk_mq_hctx_notify, hctx); blk_mq_register_cpu_notifier(&hctx->cpu_notifier); - if (blk_mq_init_rq_map(hctx, reg->reserved_tags, node)) + if (blk_mq_init_rq_map(hctx, reg, driver_data, node)) break; /* @@ -1268,7 +1227,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, reg->ops->exit_hctx(hctx, j); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); - blk_mq_free_rq_map(hctx); + blk_mq_free_rq_map(hctx, driver_data); kfree(hctx->ctxs); } @@ -1455,7 +1414,7 @@ void blk_mq_free_queue(struct request_queue *q) queue_for_each_hw_ctx(q, hctx, i) { kfree(hctx->ctx_map); kfree(hctx->ctxs); - blk_mq_free_rq_map(hctx); + blk_mq_free_rq_map(hctx, q->queuedata); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) q->mq_ops->exit_hctx(hctx, i); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c7d02bc9d945..d06206abd340 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -480,11 +480,22 @@ static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); +static int virtblk_init_request(void *data, struct blk_mq_hw_ctx *hctx, + struct request *rq, unsigned int nr) +{ + struct virtio_blk *vblk = data; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); + + sg_init_table(vbr->sg, vblk->sg_elems); + return 0; +} + static struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .map_queue = blk_mq_map_queue, .alloc_hctx = blk_mq_alloc_single_hw_queue, .free_hctx = blk_mq_free_single_hw_queue, + .init_request = virtblk_init_request, .complete = virtblk_request_done, }; @@ -497,16 +508,6 @@ static struct blk_mq_reg virtio_mq_reg = { }; module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444); -static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, - struct request *rq, unsigned int nr) -{ - struct virtio_blk *vblk = data; - struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); - - sg_init_table(vbr->sg, vblk->sg_elems); - return 0; -} - static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; @@ -577,8 +578,6 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_put_disk; } - blk_mq_init_commands(q, virtblk_init_vbr, vblk); - q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b6ee48740458..29c1a6e83814 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -67,6 +67,10 @@ typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int); typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (init_request_fn)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int); +typedef void (exit_request_fn)(void *, struct blk_mq_hw_ctx *, + struct request *, unsigned int); struct blk_mq_ops { /* @@ -99,6 +103,14 @@ struct blk_mq_ops { */ init_hctx_fn *init_hctx; exit_hctx_fn *exit_hctx; + + /* + * Called for every command allocated by the block layer to allow + * the driver to set up driver specific data. + * Ditto for exit/teardown. + */ + init_request_fn *init_request; + exit_request_fn *exit_request; }; enum { @@ -118,8 +130,6 @@ enum { struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); -int blk_mq_init_commands(struct request_queue *, int (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); -void blk_mq_free_commands(struct request_queue *, void (*free)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); -- cgit v1.2.3-59-g8ed1b From ed44832dea8a74f909e0187f350201402927f5e5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 14 Apr 2014 10:30:10 +0200 Subject: blk-mq: initialize request on allocation If we want to share tag and request allocation between queues we cannot initialize the request at init/free time, but need to initialize it at allocation time as it might get used for different queues over its lifetime. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 48d2d8495f5e..2a5a0fed10a3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -82,6 +82,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, tag = blk_mq_get_tag(hctx->tags, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { rq = hctx->rqs[tag]; + blk_rq_init(hctx->queue, rq); rq->tag = tag; return rq; @@ -257,9 +258,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, const int tag = rq->tag; struct request_queue *q = rq->q; - blk_rq_init(hctx->queue, rq); blk_mq_put_tag(hctx->tags, tag); - blk_mq_queue_exit(q); } @@ -1122,7 +1121,6 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, left -= to_do * rq_size; for (j = 0; j < to_do; j++) { hctx->rqs[i] = p; - blk_rq_init(hctx->queue, hctx->rqs[i]); if (reg->ops->init_request) { error = reg->ops->init_request(driver_data, hctx, hctx->rqs[i], i); -- cgit v1.2.3-59-g8ed1b From 24d2f90309b23f2cfe016b2aebc5f0d6e01c57fd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Apr 2014 14:14:00 -0600 Subject: blk-mq: split out tag initialization, support shared tags Add a new blk_mq_tag_set structure that gets set up before we initialize the queue. A single blk_mq_tag_set structure can be shared by multiple queues. Signed-off-by: Christoph Hellwig Modular export of blk_mq_{alloc,free}_tagset added by me. Signed-off-by: Jens Axboe --- block/blk-mq-cpumap.c | 6 +- block/blk-mq-tag.c | 14 --- block/blk-mq-tag.h | 19 +++- block/blk-mq.c | 244 +++++++++++++++++++++++++-------------------- block/blk-mq.h | 5 +- drivers/block/null_blk.c | 92 ++++++++++------- drivers/block/virtio_blk.c | 48 +++++---- include/linux/blk-mq.h | 34 +++---- 8 files changed, 262 insertions(+), 200 deletions(-) (limited to 'block') diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 097921329619..5d0f93cf358c 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -80,17 +80,17 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) return 0; } -unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg) +unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) { unsigned int *map; /* If cpus are offline, map them to first hctx */ map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL, - reg->numa_node); + set->numa_node); if (!map) return NULL; - if (!blk_mq_update_queue_map(map, reg->nr_hw_queues)) + if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) return map; kfree(map); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 83ae96c51a27..7a799c46c32d 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -1,25 +1,11 @@ #include #include -#include #include #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" -/* - * Per tagged queue (tag address space) map - */ -struct blk_mq_tags { - unsigned int nr_tags; - unsigned int nr_reserved_tags; - unsigned int nr_batch_move; - unsigned int nr_max_cache; - - struct percpu_ida free_tags; - struct percpu_ida reserved_tags; -}; - void blk_mq_wait_for_tags(struct blk_mq_tags *tags) { int tag = blk_mq_get_tag(tags, __GFP_WAIT, false); diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 947ba2c6148e..b602e3fa66ea 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -1,7 +1,24 @@ #ifndef INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H -struct blk_mq_tags; +#include + +/* + * Tag address space map. + */ +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + unsigned int nr_batch_move; + unsigned int nr_max_cache; + + struct percpu_ida free_tags; + struct percpu_ida reserved_tags; + + struct request **rqs; + struct list_head page_list; +}; + extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); extern void blk_mq_free_tags(struct blk_mq_tags *tags); diff --git a/block/blk-mq.c b/block/blk-mq.c index 2a5a0fed10a3..9180052d42cc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -81,7 +81,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, tag = blk_mq_get_tag(hctx->tags, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { - rq = hctx->rqs[tag]; + rq = hctx->tags->rqs[tag]; blk_rq_init(hctx->queue, rq); rq->tag = tag; @@ -404,6 +404,12 @@ static void blk_mq_requeue_request(struct request *rq) rq->nr_phys_segments--; } +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) +{ + return tags->rqs[tag]; +} +EXPORT_SYMBOL(blk_mq_tag_to_rq); + struct blk_mq_timeout_data { struct blk_mq_hw_ctx *hctx; unsigned long *next; @@ -425,12 +431,13 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) do { struct request *rq; - tag = find_next_zero_bit(free_tags, hctx->queue_depth, tag); - if (tag >= hctx->queue_depth) + tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag); + if (tag >= hctx->tags->nr_tags) break; - rq = hctx->rqs[tag++]; - + rq = blk_mq_tag_to_rq(hctx->tags, tag++); + if (rq->q != hctx->queue) + continue; if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) continue; @@ -969,11 +976,11 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) } EXPORT_SYMBOL(blk_mq_map_queue); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *reg, +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, unsigned int hctx_index) { return kmalloc_node(sizeof(struct blk_mq_hw_ctx), - GFP_KERNEL | __GFP_ZERO, reg->numa_node); + GFP_KERNEL | __GFP_ZERO, set->numa_node); } EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); @@ -1030,31 +1037,31 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_put_ctx(ctx); } -static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx, void *driver_data) +static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, unsigned int hctx_idx) { struct page *page; - if (hctx->rqs && hctx->queue->mq_ops->exit_request) { + if (tags->rqs && set->ops->exit_request) { int i; - for (i = 0; i < hctx->queue_depth; i++) { - if (!hctx->rqs[i]) + for (i = 0; i < tags->nr_tags; i++) { + if (!tags->rqs[i]) continue; - hctx->queue->mq_ops->exit_request(driver_data, hctx, - hctx->rqs[i], i); + set->ops->exit_request(set->driver_data, tags->rqs[i], + hctx_idx, i); } } - while (!list_empty(&hctx->page_list)) { - page = list_first_entry(&hctx->page_list, struct page, lru); + while (!list_empty(&tags->page_list)) { + page = list_first_entry(&tags->page_list, struct page, lru); list_del_init(&page->lru); __free_pages(page, page->private); } - kfree(hctx->rqs); + kfree(tags->rqs); - if (hctx->tags) - blk_mq_free_tags(hctx->tags); + blk_mq_free_tags(tags); } static size_t order_to_size(unsigned int order) @@ -1067,30 +1074,36 @@ static size_t order_to_size(unsigned int order) return ret; } -static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, - struct blk_mq_reg *reg, void *driver_data, int node) +static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, + unsigned int hctx_idx) { - unsigned int reserved_tags = reg->reserved_tags; + struct blk_mq_tags *tags; unsigned int i, j, entries_per_page, max_order = 4; size_t rq_size, left; - int error; - INIT_LIST_HEAD(&hctx->page_list); + tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags, + set->numa_node); + if (!tags) + return NULL; - hctx->rqs = kmalloc_node(hctx->queue_depth * sizeof(struct request *), - GFP_KERNEL, node); - if (!hctx->rqs) - return -ENOMEM; + INIT_LIST_HEAD(&tags->page_list); + + tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *), + GFP_KERNEL, set->numa_node); + if (!tags->rqs) { + blk_mq_free_tags(tags); + return NULL; + } /* * rq_size is the size of the request plus driver payload, rounded * to the cacheline size */ - rq_size = round_up(sizeof(struct request) + hctx->cmd_size, + rq_size = round_up(sizeof(struct request) + set->cmd_size, cache_line_size()); - left = rq_size * hctx->queue_depth; + left = rq_size * set->queue_depth; - for (i = 0; i < hctx->queue_depth;) { + for (i = 0; i < set->queue_depth; ) { int this_order = max_order; struct page *page; int to_do; @@ -1100,7 +1113,8 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, this_order--; do { - page = alloc_pages_node(node, GFP_KERNEL, this_order); + page = alloc_pages_node(set->numa_node, GFP_KERNEL, + this_order); if (page) break; if (!this_order--) @@ -1110,22 +1124,22 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, } while (1); if (!page) - break; + goto fail; page->private = this_order; - list_add_tail(&page->lru, &hctx->page_list); + list_add_tail(&page->lru, &tags->page_list); p = page_address(page); entries_per_page = order_to_size(this_order) / rq_size; - to_do = min(entries_per_page, hctx->queue_depth - i); + to_do = min(entries_per_page, set->queue_depth - i); left -= to_do * rq_size; for (j = 0; j < to_do; j++) { - hctx->rqs[i] = p; - if (reg->ops->init_request) { - error = reg->ops->init_request(driver_data, - hctx, hctx->rqs[i], i); - if (error) - goto err_rq_map; + tags->rqs[i] = p; + if (set->ops->init_request) { + if (set->ops->init_request(set->driver_data, + tags->rqs[i], hctx_idx, i, + set->numa_node)) + goto fail; } p += rq_size; @@ -1133,30 +1147,16 @@ static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, } } - if (i < (reserved_tags + BLK_MQ_TAG_MIN)) { - error = -ENOMEM; - goto err_rq_map; - } - if (i != hctx->queue_depth) { - hctx->queue_depth = i; - pr_warn("%s: queue depth set to %u because of low memory\n", - __func__, i); - } + return tags; - hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node); - if (!hctx->tags) { - error = -ENOMEM; - goto err_rq_map; - } - - return 0; -err_rq_map: - blk_mq_free_rq_map(hctx, driver_data); - return error; +fail: + pr_warn("%s: failed to allocate requests\n", __func__); + blk_mq_free_rq_map(set, tags, hctx_idx); + return NULL; } static int blk_mq_init_hw_queues(struct request_queue *q, - struct blk_mq_reg *reg, void *driver_data) + struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx *hctx; unsigned int i, j; @@ -1170,23 +1170,21 @@ static int blk_mq_init_hw_queues(struct request_queue *q, node = hctx->numa_node; if (node == NUMA_NO_NODE) - node = hctx->numa_node = reg->numa_node; + node = hctx->numa_node = set->numa_node; INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; hctx->queue_num = i; - hctx->flags = reg->flags; - hctx->queue_depth = reg->queue_depth; - hctx->cmd_size = reg->cmd_size; + hctx->flags = set->flags; + hctx->cmd_size = set->cmd_size; blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_hctx_notify, hctx); blk_mq_register_cpu_notifier(&hctx->cpu_notifier); - if (blk_mq_init_rq_map(hctx, reg, driver_data, node)) - break; + hctx->tags = set->tags[i]; /* * Allocate space for all possible cpus to avoid allocation in @@ -1206,8 +1204,8 @@ static int blk_mq_init_hw_queues(struct request_queue *q, hctx->nr_ctx_map = num_maps; hctx->nr_ctx = 0; - if (reg->ops->init_hctx && - reg->ops->init_hctx(hctx, driver_data, i)) + if (set->ops->init_hctx && + set->ops->init_hctx(hctx, set->driver_data, i)) break; } @@ -1221,11 +1219,10 @@ static int blk_mq_init_hw_queues(struct request_queue *q, if (i == j) break; - if (reg->ops->exit_hctx) - reg->ops->exit_hctx(hctx, j); + if (set->ops->exit_hctx) + set->ops->exit_hctx(hctx, j); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); - blk_mq_free_rq_map(hctx, driver_data); kfree(hctx->ctxs); } @@ -1290,41 +1287,25 @@ static void blk_mq_map_swqueue(struct request_queue *q) } } -struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, - void *driver_data) +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx **hctxs; struct blk_mq_ctx *ctx; struct request_queue *q; int i; - if (!reg->nr_hw_queues || - !reg->ops->queue_rq || !reg->ops->map_queue || - !reg->ops->alloc_hctx || !reg->ops->free_hctx) - return ERR_PTR(-EINVAL); - - if (!reg->queue_depth) - reg->queue_depth = BLK_MQ_MAX_DEPTH; - else if (reg->queue_depth > BLK_MQ_MAX_DEPTH) { - pr_err("blk-mq: queuedepth too large (%u)\n", reg->queue_depth); - reg->queue_depth = BLK_MQ_MAX_DEPTH; - } - - if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) - return ERR_PTR(-EINVAL); - ctx = alloc_percpu(struct blk_mq_ctx); if (!ctx) return ERR_PTR(-ENOMEM); - hctxs = kmalloc_node(reg->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, - reg->numa_node); + hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, + set->numa_node); if (!hctxs) goto err_percpu; - for (i = 0; i < reg->nr_hw_queues; i++) { - hctxs[i] = reg->ops->alloc_hctx(reg, i); + for (i = 0; i < set->nr_hw_queues; i++) { + hctxs[i] = set->ops->alloc_hctx(set, i); if (!hctxs[i]) goto err_hctxs; @@ -1335,11 +1316,11 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, hctxs[i]->queue_num = i; } - q = blk_alloc_queue_node(GFP_KERNEL, reg->numa_node); + q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); if (!q) goto err_hctxs; - q->mq_map = blk_mq_make_queue_map(reg); + q->mq_map = blk_mq_make_queue_map(set); if (!q->mq_map) goto err_map; @@ -1347,33 +1328,34 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, blk_queue_rq_timeout(q, 30000); q->nr_queues = nr_cpu_ids; - q->nr_hw_queues = reg->nr_hw_queues; + q->nr_hw_queues = set->nr_hw_queues; q->queue_ctx = ctx; q->queue_hw_ctx = hctxs; - q->mq_ops = reg->ops; + q->mq_ops = set->ops; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; q->sg_reserved_size = INT_MAX; blk_queue_make_request(q, blk_mq_make_request); - blk_queue_rq_timed_out(q, reg->ops->timeout); - if (reg->timeout) - blk_queue_rq_timeout(q, reg->timeout); + blk_queue_rq_timed_out(q, set->ops->timeout); + if (set->timeout) + blk_queue_rq_timeout(q, set->timeout); - if (reg->ops->complete) - blk_queue_softirq_done(q, reg->ops->complete); + if (set->ops->complete) + blk_queue_softirq_done(q, set->ops->complete); blk_mq_init_flush(q); - blk_mq_init_cpu_queues(q, reg->nr_hw_queues); + blk_mq_init_cpu_queues(q, set->nr_hw_queues); - q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size, - cache_line_size()), GFP_KERNEL); + q->flush_rq = kzalloc(round_up(sizeof(struct request) + + set->cmd_size, cache_line_size()), + GFP_KERNEL); if (!q->flush_rq) goto err_hw; - if (blk_mq_init_hw_queues(q, reg, driver_data)) + if (blk_mq_init_hw_queues(q, set)) goto err_flush_rq; blk_mq_map_swqueue(q); @@ -1391,11 +1373,11 @@ err_hw: err_map: blk_cleanup_queue(q); err_hctxs: - for (i = 0; i < reg->nr_hw_queues; i++) { + for (i = 0; i < set->nr_hw_queues; i++) { if (!hctxs[i]) break; free_cpumask_var(hctxs[i]->cpumask); - reg->ops->free_hctx(hctxs[i], i); + set->ops->free_hctx(hctxs[i], i); } kfree(hctxs); err_percpu: @@ -1412,7 +1394,6 @@ void blk_mq_free_queue(struct request_queue *q) queue_for_each_hw_ctx(q, hctx, i) { kfree(hctx->ctx_map); kfree(hctx->ctxs); - blk_mq_free_rq_map(hctx, q->queuedata); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) q->mq_ops->exit_hctx(hctx, i); @@ -1473,6 +1454,53 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, return NOTIFY_OK; } +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) +{ + int i; + + if (!set->nr_hw_queues) + return -EINVAL; + if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH) + return -EINVAL; + if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) + return -EINVAL; + + if (!set->nr_hw_queues || + !set->ops->queue_rq || !set->ops->map_queue || + !set->ops->alloc_hctx || !set->ops->free_hctx) + return -EINVAL; + + + set->tags = kmalloc_node(set->nr_hw_queues * sizeof(struct blk_mq_tags), + GFP_KERNEL, set->numa_node); + if (!set->tags) + goto out; + + for (i = 0; i < set->nr_hw_queues; i++) { + set->tags[i] = blk_mq_init_rq_map(set, i); + if (!set->tags[i]) + goto out_unwind; + } + + return 0; + +out_unwind: + while (--i >= 0) + blk_mq_free_rq_map(set, set->tags[i], i); +out: + return -ENOMEM; +} +EXPORT_SYMBOL(blk_mq_alloc_tag_set); + +void blk_mq_free_tag_set(struct blk_mq_tag_set *set) +{ + int i; + + for (i = 0; i < set->nr_hw_queues; i++) + blk_mq_free_rq_map(set, set->tags[i], i); +} +EXPORT_SYMBOL(blk_mq_free_tag_set); + void blk_mq_disable_hotplug(void) { mutex_lock(&all_q_mutex); diff --git a/block/blk-mq.h b/block/blk-mq.h index 7964dadb7d64..5fa14f19f752 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -1,6 +1,8 @@ #ifndef INT_BLK_MQ_H #define INT_BLK_MQ_H +struct blk_mq_tag_set; + struct blk_mq_ctx { struct { spinlock_t lock; @@ -46,8 +48,7 @@ void blk_mq_disable_hotplug(void); /* * CPU -> queue mappings */ -struct blk_mq_reg; -extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg); +extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); void blk_mq_add_timer(struct request *rq); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 71df69d90900..8e7e3a0b0d24 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -32,6 +32,7 @@ struct nullb { unsigned int index; struct request_queue *q; struct gendisk *disk; + struct blk_mq_tag_set tag_set; struct hrtimer timer; unsigned int queue_depth; spinlock_t lock; @@ -320,10 +321,11 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) return BLK_MQ_RQ_QUEUE_OK; } -static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) +static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set, + unsigned int hctx_index) { - int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); - int tip = (reg->nr_hw_queues % nr_online_nodes); + int b_size = DIV_ROUND_UP(set->nr_hw_queues, nr_online_nodes); + int tip = (set->nr_hw_queues % nr_online_nodes); int node = 0, i, n; /* @@ -338,7 +340,7 @@ static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned in tip--; if (!tip) - b_size = reg->nr_hw_queues / nr_online_nodes; + b_size = set->nr_hw_queues / nr_online_nodes; } } @@ -387,13 +389,17 @@ static struct blk_mq_ops null_mq_ops = { .map_queue = blk_mq_map_queue, .init_hctx = null_init_hctx, .complete = null_softirq_done_fn, + .alloc_hctx = blk_mq_alloc_single_hw_queue, + .free_hctx = blk_mq_free_single_hw_queue, }; -static struct blk_mq_reg null_mq_reg = { - .ops = &null_mq_ops, - .queue_depth = 64, - .cmd_size = sizeof(struct nullb_cmd), - .flags = BLK_MQ_F_SHOULD_MERGE, +static struct blk_mq_ops null_mq_ops_pernode = { + .queue_rq = null_queue_rq, + .map_queue = blk_mq_map_queue, + .init_hctx = null_init_hctx, + .complete = null_softirq_done_fn, + .alloc_hctx = null_alloc_hctx, + .free_hctx = null_free_hctx, }; static void null_del_dev(struct nullb *nullb) @@ -402,6 +408,8 @@ static void null_del_dev(struct nullb *nullb) del_gendisk(nullb->disk); blk_cleanup_queue(nullb->q); + if (queue_mode == NULL_Q_MQ) + blk_mq_free_tag_set(&nullb->tag_set); put_disk(nullb->disk); kfree(nullb); } @@ -506,7 +514,7 @@ static int null_add_dev(void) nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) - return -ENOMEM; + goto out; spin_lock_init(&nullb->lock); @@ -514,49 +522,47 @@ static int null_add_dev(void) submit_queues = nr_online_nodes; if (setup_queues(nullb)) - goto err; + goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { - null_mq_reg.numa_node = home_node; - null_mq_reg.queue_depth = hw_queue_depth; - null_mq_reg.nr_hw_queues = submit_queues; - - if (use_per_node_hctx) { - null_mq_reg.ops->alloc_hctx = null_alloc_hctx; - null_mq_reg.ops->free_hctx = null_free_hctx; - } else { - null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; - null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; - } - - nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); + if (use_per_node_hctx) + nullb->tag_set.ops = &null_mq_ops_pernode; + else + nullb->tag_set.ops = &null_mq_ops; + nullb->tag_set.nr_hw_queues = submit_queues; + nullb->tag_set.queue_depth = hw_queue_depth; + nullb->tag_set.numa_node = home_node; + nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); + nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + nullb->tag_set.driver_data = nullb; + + if (blk_mq_alloc_tag_set(&nullb->tag_set)) + goto out_cleanup_queues; + + nullb->q = blk_mq_init_queue(&nullb->tag_set); + if (!nullb->q) + goto out_cleanup_tags; } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); + if (!nullb->q) + goto out_cleanup_queues; blk_queue_make_request(nullb->q, null_queue_bio); init_driver_queues(nullb); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); + if (!nullb->q) + goto out_cleanup_queues; blk_queue_prep_rq(nullb->q, null_rq_prep_fn); - if (nullb->q) - blk_queue_softirq_done(nullb->q, null_softirq_done_fn); + blk_queue_softirq_done(nullb->q, null_softirq_done_fn); init_driver_queues(nullb); } - if (!nullb->q) - goto queue_fail; - nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); - if (!disk) { -queue_fail: - blk_cleanup_queue(nullb->q); - cleanup_queues(nullb); -err: - kfree(nullb); - return -ENOMEM; - } + if (!disk) + goto out_cleanup_blk_queue; mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); @@ -579,6 +585,18 @@ err: sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; + +out_cleanup_blk_queue: + blk_cleanup_queue(nullb->q); +out_cleanup_tags: + if (queue_mode == NULL_Q_MQ) + blk_mq_free_tag_set(&nullb->tag_set); +out_cleanup_queues: + cleanup_queues(nullb); +out_free_nullb: + kfree(nullb); +out: + return -ENOMEM; } static int __init null_init(void) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index d06206abd340..f909a8821e65 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -30,6 +30,9 @@ struct virtio_blk /* The disk structure for the kernel. */ struct gendisk *disk; + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + /* Process context for config space updates */ struct work_struct config_work; @@ -480,8 +483,9 @@ static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); -static int virtblk_init_request(void *data, struct blk_mq_hw_ctx *hctx, - struct request *rq, unsigned int nr) +static int virtblk_init_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int request_idx, + unsigned int numa_node) { struct virtio_blk *vblk = data; struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); @@ -495,18 +499,12 @@ static struct blk_mq_ops virtio_mq_ops = { .map_queue = blk_mq_map_queue, .alloc_hctx = blk_mq_alloc_single_hw_queue, .free_hctx = blk_mq_free_single_hw_queue, - .init_request = virtblk_init_request, .complete = virtblk_request_done, + .init_request = virtblk_init_request, }; -static struct blk_mq_reg virtio_mq_reg = { - .ops = &virtio_mq_ops, - .nr_hw_queues = 1, - .queue_depth = 0, /* Set in virtblk_probe */ - .numa_node = NUMA_NO_NODE, - .flags = BLK_MQ_F_SHOULD_MERGE, -}; -module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444); +static unsigned int virtblk_queue_depth; +module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); static int virtblk_probe(struct virtio_device *vdev) { @@ -562,20 +560,32 @@ static int virtblk_probe(struct virtio_device *vdev) } /* Default queue sizing is to fill the ring. */ - if (!virtio_mq_reg.queue_depth) { - virtio_mq_reg.queue_depth = vblk->vq->num_free; + if (!virtblk_queue_depth) { + virtblk_queue_depth = vblk->vq->num_free; /* ... but without indirect descs, we use 2 descs per req */ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) - virtio_mq_reg.queue_depth /= 2; + virtblk_queue_depth /= 2; } - virtio_mq_reg.cmd_size = + + memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); + vblk->tag_set.ops = &virtio_mq_ops; + vblk->tag_set.nr_hw_queues = 1; + vblk->tag_set.queue_depth = virtblk_queue_depth; + vblk->tag_set.numa_node = NUMA_NO_NODE; + vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + vblk->tag_set.cmd_size = sizeof(struct virtblk_req) + sizeof(struct scatterlist) * sg_elems; + vblk->tag_set.driver_data = vblk; - q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk); + err = blk_mq_alloc_tag_set(&vblk->tag_set); + if (err) + goto out_put_disk; + + q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); if (!q) { err = -ENOMEM; - goto out_put_disk; + goto out_free_tags; } q->queuedata = vblk; @@ -678,6 +688,8 @@ static int virtblk_probe(struct virtio_device *vdev) out_del_disk: del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); +out_free_tags: + blk_mq_free_tag_set(&vblk->tag_set); out_put_disk: put_disk(vblk->disk); out_free_vq: @@ -704,6 +716,8 @@ static void virtblk_remove(struct virtio_device *vdev) del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); + blk_mq_free_tag_set(&vblk->tag_set); + /* Stop all the virtqueues. */ vdev->config->reset(vdev); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 29c1a6e83814..a4ea0ce83b07 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -33,8 +33,6 @@ struct blk_mq_hw_ctx { unsigned int nr_ctx_map; unsigned long *ctx_map; - struct request **rqs; - struct list_head page_list; struct blk_mq_tags *tags; unsigned long queued; @@ -42,7 +40,6 @@ struct blk_mq_hw_ctx { #define BLK_MQ_MAX_DISPATCH_ORDER 10 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; - unsigned int queue_depth; unsigned int numa_node; unsigned int cmd_size; /* per-request extra data */ @@ -50,7 +47,7 @@ struct blk_mq_hw_ctx { struct kobject kobj; }; -struct blk_mq_reg { +struct blk_mq_tag_set { struct blk_mq_ops *ops; unsigned int nr_hw_queues; unsigned int queue_depth; @@ -59,18 +56,22 @@ struct blk_mq_reg { int numa_node; unsigned int timeout; unsigned int flags; /* BLK_MQ_F_* */ + void *driver_data; + + struct blk_mq_tags **tags; }; typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); -typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int); +typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *, + unsigned int); typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); -typedef int (init_request_fn)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int); -typedef void (exit_request_fn)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int); +typedef int (init_request_fn)(void *, struct request *, unsigned int, + unsigned int, unsigned int); +typedef void (exit_request_fn)(void *, struct request *, unsigned int, + unsigned int); struct blk_mq_ops { /* @@ -127,10 +128,13 @@ enum { BLK_MQ_MAX_DEPTH = 2048, }; -struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *); +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); +void blk_mq_free_tag_set(struct blk_mq_tag_set *set); + void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); void blk_mq_insert_request(struct request *, bool, bool, bool); @@ -139,10 +143,10 @@ void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); -struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); +struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int); void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); bool blk_mq_end_io_partial(struct request *rq, int error, @@ -173,12 +177,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) return (void *) rq + sizeof(*rq); } -static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, - unsigned int tag) -{ - return hctx->rqs[tag]; -} - #define queue_for_each_hw_ctx(q, hctx, i) \ for ((i) = 0; (i) < (q)->nr_hw_queues && \ ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) -- cgit v1.2.3-59-g8ed1b From fd1270d5df6a005e1248e87042159a799cc4b2c9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 16 Apr 2014 09:23:48 -0600 Subject: blk-mq: don't use preempt_count() to check for right CPU UP or CONFIG_PREEMPT_NONE will return 0, and what we really want to check is whether or not we are on the right CPU. So don't make PREEMPT part of this, just test the CPU in the mask directly. Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 9180052d42cc..1fa01ff0f9bc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -534,7 +534,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) LIST_HEAD(rq_list); int bit, queued; - WARN_ON(!preempt_count()); + WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; -- cgit v1.2.3-59-g8ed1b From 6700a678c02e47b6d50c51da2a46ff80efedb8c7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 16 Apr 2014 10:38:35 -0600 Subject: blk-mq: kill preempt disable/enable in blk_mq_work_fn() blk_mq_work_fn() is always invoked off the bounded workqueues, so it can happily preempt among the queues in that set without causing any issues for blk-mq. Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 1fa01ff0f9bc..b59a8d027dff 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -714,9 +714,7 @@ static void blk_mq_work_fn(struct work_struct *work) hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work); - preempt_disable(); __blk_mq_run_hw_queue(hctx); - preempt_enable(); } static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, -- cgit v1.2.3-59-g8ed1b From 63151a449ebaef062ffac5b302206565ff5ef62e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:52 +0200 Subject: blk-mq: allow drivers to hook into I/O completion Split out the bottom half of blk_mq_end_io so that drivers can perform work when they know a request has been completed, but before it has been freed. This also obsoletes blk_mq_end_io_partial as drivers can now pass any value to blk_update_request directly. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 16 ++++++++++------ include/linux/blk-mq.h | 9 ++------- 2 files changed, 12 insertions(+), 13 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index b59a8d027dff..86d66e0e900c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -294,20 +294,24 @@ void blk_mq_clone_flush_request(struct request *flush_rq, hctx->cmd_size); } -bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) +inline void __blk_mq_end_io(struct request *rq, int error) { - if (blk_update_request(rq, error, blk_rq_bytes(rq))) - return true; - blk_account_io_done(rq); if (rq->end_io) rq->end_io(rq, error); else blk_mq_free_request(rq); - return false; } -EXPORT_SYMBOL(blk_mq_end_io_partial); +EXPORT_SYMBOL(__blk_mq_end_io); + +void blk_mq_end_io(struct request *rq, int error) +{ + if (blk_update_request(rq, error, blk_rq_bytes(rq))) + BUG(); + __blk_mq_end_io(rq, error); +} +EXPORT_SYMBOL(blk_mq_end_io); static void __blk_mq_complete_request_remote(void *data) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a4ea0ce83b07..a81b474b794f 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -149,13 +149,8 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int); void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); -bool blk_mq_end_io_partial(struct request *rq, int error, - unsigned int nr_bytes); -static inline void blk_mq_end_io(struct request *rq, int error) -{ - bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq)); - BUG_ON(!done); -} +void blk_mq_end_io(struct request *rq, int error); +void __blk_mq_end_io(struct request *rq, int error); void blk_mq_complete_request(struct request *rq); -- cgit v1.2.3-59-g8ed1b From 91b63639c7d59dbf86da5311faff43691dcae832 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:53 +0200 Subject: blk-mq: bidi support Add two unlinkely branches to make sure the resid is initialized correctly for bidi request pairs, and the second request gets properly freed. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 86d66e0e900c..963a82109386 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -298,10 +298,13 @@ inline void __blk_mq_end_io(struct request *rq, int error) { blk_account_io_done(rq); - if (rq->end_io) + if (rq->end_io) { rq->end_io(rq, error); - else + } else { + if (unlikely(blk_bidi_rq(rq))) + blk_mq_free_request(rq->next_rq); blk_mq_free_request(rq); + } } EXPORT_SYMBOL(__blk_mq_end_io); @@ -366,6 +369,8 @@ static void blk_mq_start_request(struct request *rq, bool last) trace_block_rq_issue(q, rq); rq->resid_len = blk_rq_bytes(rq); + if (unlikely(blk_bidi_rq(rq))) + rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); /* * Just mark start time and set the started bit. Due to memory -- cgit v1.2.3-59-g8ed1b From 1b4a325858f695a9b5041313602d34b36f463724 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:54 +0200 Subject: blk-mq: add async parameter to blk_mq_start_stopped_hw_queues Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 ++-- drivers/block/virtio_blk.c | 4 ++-- include/linux/blk-mq.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 963a82109386..da3808823e44 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -700,7 +700,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) } EXPORT_SYMBOL(blk_mq_start_hw_queue); -void blk_mq_start_stopped_hw_queues(struct request_queue *q) +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; int i; @@ -711,7 +711,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q) clear_bit(BLK_MQ_S_STOPPED, &hctx->state); preempt_disable(); - blk_mq_run_hw_queue(hctx, true); + blk_mq_run_hw_queue(hctx, async); preempt_enable(); } } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index f909a8821e65..7a51f065edcd 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -151,7 +151,7 @@ static void virtblk_done(struct virtqueue *vq) /* In case queue is stopped waiting for more buffers. */ if (req_done) - blk_mq_start_stopped_hw_queues(vblk->disk->queue); + blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); } static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) @@ -762,7 +762,7 @@ static int virtblk_restore(struct virtio_device *vdev) vblk->config_enable = true; ret = init_vq(vdev->priv); if (!ret) - blk_mq_start_stopped_hw_queues(vblk->disk->queue); + blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); return ret; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a81b474b794f..9ecfab96d8c9 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -157,7 +157,7 @@ void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); -void blk_mq_start_stopped_hw_queues(struct request_queue *q); +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); /* * Driver command data is immediately after the request. So subtract request -- cgit v1.2.3-59-g8ed1b From 70f4db639c5b2479e08657392cbf3ba3cceea11c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 10:48:08 -0600 Subject: blk-mq: add blk_mq_delay_queue Add a blk-mq equivalent to blk_delay_queue so that the scsi layer can ask to be kicked again after a delay. Signed-off-by: Christoph Hellwig Modified by me to kill the unnecessary preempt disable/enable in the delayed workqueue handler. Signed-off-by: Jens Axboe --- block/blk-core.c | 6 ++++-- block/blk-mq.c | 45 +++++++++++++++++++++++++++++++++++++++------ include/linux/blk-mq.h | 4 +++- 3 files changed, 46 insertions(+), 9 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index ae6227fd07aa..90b6e63b8769 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -251,8 +251,10 @@ void blk_sync_queue(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) - cancel_delayed_work_sync(&hctx->delayed_work); + queue_for_each_hw_ctx(q, hctx, i) { + cancel_delayed_work_sync(&hctx->run_work); + cancel_delayed_work_sync(&hctx->delay_work); + } } else { cancel_delayed_work_sync(&q->delay_work); } diff --git a/block/blk-mq.c b/block/blk-mq.c index da3808823e44..0cf52dddfa6b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -640,7 +640,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) __blk_mq_run_hw_queue(hctx); else if (hctx->queue->nr_hw_queues == 1) - kblockd_schedule_delayed_work(&hctx->delayed_work, 0); + kblockd_schedule_delayed_work(&hctx->run_work, 0); else { unsigned int cpu; @@ -651,7 +651,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) * just queue on the first CPU. */ cpu = cpumask_first(hctx->cpumask); - kblockd_schedule_delayed_work_on(cpu, &hctx->delayed_work, 0); + kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0); } } @@ -675,7 +675,8 @@ EXPORT_SYMBOL(blk_mq_run_queues); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) { - cancel_delayed_work(&hctx->delayed_work); + cancel_delayed_work(&hctx->run_work); + cancel_delayed_work(&hctx->delay_work); set_bit(BLK_MQ_S_STOPPED, &hctx->state); } EXPORT_SYMBOL(blk_mq_stop_hw_queue); @@ -717,15 +718,46 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); -static void blk_mq_work_fn(struct work_struct *work) +static void blk_mq_run_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; - hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work); + hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); __blk_mq_run_hw_queue(hctx); } +static void blk_mq_delay_work_fn(struct work_struct *work) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); + + if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) + __blk_mq_run_hw_queue(hctx); +} + +void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) +{ + unsigned long tmo = msecs_to_jiffies(msecs); + + if (hctx->queue->nr_hw_queues == 1) + kblockd_schedule_delayed_work(&hctx->delay_work, tmo); + else { + unsigned int cpu; + + /* + * It'd be great if the workqueue API had a way to pass + * in a mask and had some smarts for more clever placement + * than the first CPU. Or we could round-robin here. For now, + * just queue on the first CPU. + */ + cpu = cpumask_first(hctx->cpumask); + kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo); + } +} +EXPORT_SYMBOL(blk_mq_delay_queue); + static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) { @@ -1179,7 +1211,8 @@ static int blk_mq_init_hw_queues(struct request_queue *q, if (node == NUMA_NO_NODE) node = hctx->numa_node = set->numa_node; - INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn); + INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); + INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9ecfab96d8c9..ae868e77bc2f 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -18,7 +18,8 @@ struct blk_mq_hw_ctx { } ____cacheline_aligned_in_smp; unsigned long state; /* BLK_MQ_S_* flags */ - struct delayed_work delayed_work; + struct delayed_work run_work; + struct delayed_work delay_work; cpumask_var_t cpumask; unsigned long flags; /* BLK_MQ_F_* flags */ @@ -158,6 +159,7 @@ void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); /* * Driver command data is immediately after the request. So subtract request -- cgit v1.2.3-59-g8ed1b From 2f268556567ebeb3538f99b9bdad177581439dcb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:56 +0200 Subject: blk-mq: add blk_mq_start_hw_queues Add a helper to unconditionally kick contexts of a queue. This will be needed by the SCSI layer to provide fair queueing between multiple devices on a single host. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 11 +++++++++++ include/linux/blk-mq.h | 1 + 2 files changed, 12 insertions(+) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 0cf52dddfa6b..543bbc08a261 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -701,6 +701,17 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) } EXPORT_SYMBOL(blk_mq_start_hw_queue); +void blk_mq_start_hw_queues(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_start_hw_queue(hctx); +} +EXPORT_SYMBOL(blk_mq_start_hw_queues); + + void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index ae868e77bc2f..391377e53367 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -158,6 +158,7 @@ void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); +void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -- cgit v1.2.3-59-g8ed1b From ed0791b2f83cec4e77d88c4e9baabcebf9254a78 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:57 +0200 Subject: blk-mq: add blk_mq_requeue_request This allows to requeue a request that has been accepted by ->queue_rq earlier. This is needed by the SCSI layer in various error conditions. The existing internal blk_mq_requeue_request is renamed to __blk_mq_requeue_request as it is a lower level building block for this funtionality. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 18 ++++++++++++++++-- include/linux/blk-mq.h | 2 ++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 543bbc08a261..ee225cc312b8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -400,7 +400,7 @@ static void blk_mq_start_request(struct request *rq, bool last) rq->cmd_flags |= REQ_END; } -static void blk_mq_requeue_request(struct request *rq) +static void __blk_mq_requeue_request(struct request *rq) { struct request_queue *q = rq->q; @@ -413,6 +413,20 @@ static void blk_mq_requeue_request(struct request *rq) rq->nr_phys_segments--; } +void blk_mq_requeue_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + __blk_mq_requeue_request(rq); + blk_clear_rq_complete(rq); + + trace_block_rq_requeue(q, rq); + + BUG_ON(blk_queued_rq(rq)); + blk_mq_insert_request(rq, true, true, false); +} +EXPORT_SYMBOL(blk_mq_requeue_request); + struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { return tags->rqs[tag]; @@ -602,7 +616,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) * time */ list_add(&rq->queuelist, &rq_list); - blk_mq_requeue_request(rq); + __blk_mq_requeue_request(rq); break; default: pr_err("blk-mq: bad return on queue: %d\n", ret); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 391377e53367..ab469d525894 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -153,6 +153,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); void blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error); +void blk_mq_requeue_request(struct request *rq); + void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); -- cgit v1.2.3-59-g8ed1b From f88a164b72bd51fe4c89e06ac9939f2afe39c7ed Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:58 +0200 Subject: blk-mq: rename mq_flush_work struct request member We will use this work_struct to requeue scsi commands from the completion handler as well, so give it a more generic name. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-flush.c | 6 +++--- include/linux/blkdev.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-flush.c b/block/blk-flush.c index c41fc19f75d1..ec7a224d6733 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -134,7 +134,7 @@ static void mq_flush_run(struct work_struct *work) { struct request *rq; - rq = container_of(work, struct request, mq_flush_work); + rq = container_of(work, struct request, requeue_work); memset(&rq->csd, 0, sizeof(rq->csd)); blk_mq_insert_request(rq, false, true, false); @@ -143,8 +143,8 @@ static void mq_flush_run(struct work_struct *work) static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { - INIT_WORK(&rq->mq_flush_work, mq_flush_run); - kblockd_schedule_work(&rq->mq_flush_work); + INIT_WORK(&rq->requeue_work, mq_flush_run); + kblockd_schedule_work(&rq->requeue_work); return false; } else { if (add_front) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 95bb551273ab..71288083a46f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -98,7 +98,7 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; - struct work_struct mq_flush_work; + struct work_struct requeue_work; unsigned long fifo_time; }; -- cgit v1.2.3-59-g8ed1b From 12120077b2612a243d158605640cd39266906667 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Apr 2014 09:44:59 +0200 Subject: block: export blk_finish_request This allows to mirror the blk-mq code flow for more a more readable I/O completion handler in SCSI. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 3 ++- include/linux/blkdev.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 90b6e63b8769..c4269701cb4f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2497,7 +2497,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request); /* * queue lock must be held */ -static void blk_finish_request(struct request *req, int error) +void blk_finish_request(struct request *req, int error) { if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); @@ -2523,6 +2523,7 @@ static void blk_finish_request(struct request *req, int error) __blk_put_request(req->q, req); } } +EXPORT_SYMBOL(blk_finish_request); /** * blk_end_bidi_request - Complete a bidi request diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 71288083a46f..20b26d4e53a2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -936,6 +936,7 @@ extern struct request *blk_fetch_request(struct request_queue *q); */ extern bool blk_update_request(struct request *rq, int error, unsigned int nr_bytes); +extern void blk_finish_request(struct request *rq, int error); extern bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes); extern void blk_end_request_all(struct request *rq, int error); -- cgit v1.2.3-59-g8ed1b From f793aa53786668c9e0db5900f70f560e99d62fa0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 16 Apr 2014 11:36:54 -0600 Subject: block: relax when to modify the timeout timer Since we are now, by default, applying timer slack to expiry times, the logic for when to modify a timer in the block code is suboptimal. The block layer keeps a forward rolling timer per queue for all requests, and modifies this timer if a request has a shorter timeout than what the current expiry time is. However, this breaks down when our rounded timer values get applied slack. Then each new request ends up modifying the timer, since we're still a little in front of the timer + slack. Fix this by allowing a tolerance of HZ / 2, the timeout handling doesn't need to be very precise. This drastically cuts down the number of timer modifications we have to make. Signed-off-by: Jens Axboe --- block/blk-timeout.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-timeout.c b/block/blk-timeout.c index d96f7061c6fd..a09e8af8186c 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -199,8 +199,19 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list) expiry = round_jiffies_up(req->deadline); if (!timer_pending(&q->timeout) || - time_before(expiry, q->timeout.expires)) - mod_timer(&q->timeout, expiry); + time_before(expiry, q->timeout.expires)) { + unsigned long diff = q->timeout.expires - expiry; + + /* + * Due to added timer slack to group timers, the timer + * will often be a little in front of what we asked for. + * So apply some tolerance here too, otherwise we keep + * modifying the timer because expires for value X + * will be X + something. + */ + if (diff >= HZ / 2) + mod_timer(&q->timeout, expiry); + } } -- cgit v1.2.3-59-g8ed1b From 11471e0d04f3762b9216d8421ce6e9d89b0bf450 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 19 Apr 2014 18:00:16 +0800 Subject: blk-mq: free hctx->ctx_map when init failed Avoid memory leak in the failure path. Reviewed-by: Christoph Hellwig Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index ee225cc312b8..5fbbb221d499 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1289,6 +1289,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); kfree(hctx->ctxs); + kfree(hctx->ctx_map); } return 1; -- cgit v1.2.3-59-g8ed1b From 4847900532c2dc0fc77b67f745c20e636d5b3125 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 19 Apr 2014 18:00:17 +0800 Subject: blk-mq: fix allocation of set->tags type of set->tags is struct blk_mq_tags **. Reviewed-by: Christoph Hellwig Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 5fbbb221d499..c2777970f28b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1537,7 +1537,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) return -EINVAL; - set->tags = kmalloc_node(set->nr_hw_queues * sizeof(struct blk_mq_tags), + set->tags = kmalloc_node(set->nr_hw_queues * + sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); if (!set->tags) goto out; -- cgit v1.2.3-59-g8ed1b From 4ca085009f447abc1c818f8802e908e9afdd77fa Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 19 Apr 2014 18:00:18 +0800 Subject: blk-mq: user (1 << order) to implement order_to_size() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cc: Jörg-Volker Peetz Cc: Max Filippov Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index c2777970f28b..3a560a4db0b4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1130,12 +1130,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, static size_t order_to_size(unsigned int order) { - size_t ret = PAGE_SIZE; - - while (order--) - ret *= 2; - - return ret; + return (size_t)PAGE_SIZE << order; } static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, -- cgit v1.2.3-59-g8ed1b From 6a3c8a3ac0e68dcfc2a01f4aa1ca0edd1a1701eb Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 19 Apr 2014 18:00:19 +0800 Subject: blk-mq: initialize req->q in allocation The patch basically reverts the patch of(blk-mq: initialize request on allocation) in Jens's tree(already in -next), and only initialize req->q in allocation for two reasons: - presumed cache hotness on completion - blk_rq_tagged(rq) depends on reset of req->mq_ctx Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 3a560a4db0b4..c26b3be1893c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -82,7 +82,6 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, tag = blk_mq_get_tag(hctx->tags, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { rq = hctx->tags->rqs[tag]; - blk_rq_init(hctx->queue, rq); rq->tag = tag; return rq; @@ -187,6 +186,7 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; + rq->q = q; rq->mq_ctx = ctx; rq->cmd_flags = rw_flags; rq->start_time = jiffies; @@ -258,6 +258,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, const int tag = rq->tag; struct request_queue *q = rq->q; + blk_rq_init(hctx->queue, rq); blk_mq_put_tag(hctx->tags, tag); blk_mq_queue_exit(q); } @@ -1194,6 +1195,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, left -= to_do * rq_size; for (j = 0; j < to_do; j++) { tags->rqs[i] = p; + blk_rq_init(NULL, tags->rqs[i]); if (set->ops->init_request) { if (set->ops->init_request(set->driver_data, tags->rqs[i], hctx_idx, i, -- cgit v1.2.3-59-g8ed1b From 8876e140ec7b9c57ab5a5dc39b7bb9815f96e3f5 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Thu, 17 Apr 2014 21:41:16 +0200 Subject: block/blk-throttle.c: add static to blk_throtl_dispatch_work_fn blk_throtl_dispatch_work_fn is only used in blk-throttle.c Cc: Jens Axboe Cc: Andrew Morton Signed-off-by: Fabian Frederick Signed-off-by: Jens Axboe --- block/blk-throttle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 033745cd7fba..680a0cc116dd 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1258,7 +1258,7 @@ out_unlock: * of throtl_data->service_queue. Those bio's are ready and issued by this * function. */ -void blk_throtl_dispatch_work_fn(struct work_struct *work) +static void blk_throtl_dispatch_work_fn(struct work_struct *work) { struct throtl_data *td = container_of(work, struct throtl_data, dispatch_work); -- cgit v1.2.3-59-g8ed1b From 981bd189f80f34c07ac0d87e4760d09bcda5f4cb Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 24 Apr 2014 00:07:34 +0800 Subject: blk-mq: fix leak of set->tags set->tags should be freed in blk_mq_free_tag_set(). Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index c26b3be1893c..cea1bd87a416 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1562,6 +1562,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) for (i = 0; i < set->nr_hw_queues; i++) blk_mq_free_rq_map(set, set->tags[i], i); + kfree(set->tags); } EXPORT_SYMBOL(blk_mq_free_tag_set); -- cgit v1.2.3-59-g8ed1b From 70ab0b2d51f84fc7d9eb6ed81c3986595efaa33d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 24 Apr 2014 08:50:38 -0600 Subject: Revert "blk-mq: initialize req->q in allocation" This reverts commit 6a3c8a3ac0e68dcfc2a01f4aa1ca0edd1a1701eb. We need selective clearing of the request to make the init-at-free time completely safe. Otherwise we end up stomping on rq->atomic_flags, which we don't want to do. --- block/blk-mq.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index cea1bd87a416..7d5650d75aef 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -82,6 +82,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, tag = blk_mq_get_tag(hctx->tags, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { rq = hctx->tags->rqs[tag]; + blk_rq_init(hctx->queue, rq); rq->tag = tag; return rq; @@ -186,7 +187,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; - rq->q = q; rq->mq_ctx = ctx; rq->cmd_flags = rw_flags; rq->start_time = jiffies; @@ -258,7 +258,6 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, const int tag = rq->tag; struct request_queue *q = rq->q; - blk_rq_init(hctx->queue, rq); blk_mq_put_tag(hctx->tags, tag); blk_mq_queue_exit(q); } @@ -1195,7 +1194,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, left -= to_do * rq_size; for (j = 0; j < to_do; j++) { tags->rqs[i] = p; - blk_rq_init(NULL, tags->rqs[i]); if (set->ops->init_request) { if (set->ops->init_request(set->driver_data, tags->rqs[i], hctx_idx, i, -- cgit v1.2.3-59-g8ed1b From 87ee7b112193bd081ba1a171fa5f6f39c429ef56 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 24 Apr 2014 08:51:47 -0600 Subject: blk-mq: fix race with timeouts and requeue events If a requeue event races with a timeout, we can get into the situation where we attempt to complete a request from the timeout handler when it's not start anymore. This causes a crash. So have the timeout handler check that REQ_ATOM_STARTED is still set on the request - if not, we ignore the event. If this happens, the request has now been marked as complete. As a consequence, we need to ensure to clear REQ_ATOM_COMPLETE in blk_mq_start_request(), as to maintain proper request state. Signed-off-by: Jens Axboe --- block/blk-mq.c | 39 ++++++++++++++++++++++++++++++++------- block/blk-mq.h | 2 -- block/blk-timeout.c | 16 +++++++++------- block/blk.h | 3 +-- 4 files changed, 42 insertions(+), 18 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 7d5650d75aef..a84112c94e74 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -378,7 +378,15 @@ static void blk_mq_start_request(struct request *rq, bool last) * REQ_ATOMIC_STARTED is seen. */ rq->deadline = jiffies + q->rq_timeout; + + /* + * Mark us as started and clear complete. Complete might have been + * set if requeue raced with timeout, which then marked it as + * complete. So be sure to clear complete again when we start + * the request, otherwise we'll ignore the completion event. + */ set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); if (q->dma_drain_size && blk_rq_bytes(rq)) { /* @@ -485,6 +493,28 @@ static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx, blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data); } +static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq) +{ + struct request_queue *q = rq->q; + + /* + * We know that complete is set at this point. If STARTED isn't set + * anymore, then the request isn't active and the "timeout" should + * just be ignored. This can happen due to the bitflag ordering. + * Timeout first checks if STARTED is set, and if it is, assumes + * the request is active. But if we race with completion, then + * we both flags will get cleared. So check here again, and ignore + * a timeout event with a request that isn't active. + */ + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + return BLK_EH_NOT_HANDLED; + + if (!q->mq_ops->timeout) + return BLK_EH_RESET_TIMER; + + return q->mq_ops->timeout(rq); +} + static void blk_mq_rq_timer(unsigned long data) { struct request_queue *q = (struct request_queue *) data; @@ -538,11 +568,6 @@ static bool blk_mq_attempt_merge(struct request_queue *q, return false; } -void blk_mq_add_timer(struct request *rq) -{ - __blk_add_timer(rq, NULL); -} - /* * Run this hardware queue, pulling any software queues mapped to it in. * Note that this function currently has various problems around ordering @@ -799,7 +824,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, /* * We do this early, to ensure we are on the right CPU. */ - blk_mq_add_timer(rq); + blk_add_timer(rq); } void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, @@ -1400,7 +1425,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) q->sg_reserved_size = INT_MAX; blk_queue_make_request(q, blk_mq_make_request); - blk_queue_rq_timed_out(q, set->ops->timeout); + blk_queue_rq_timed_out(q, blk_mq_rq_timed_out); if (set->timeout) blk_queue_rq_timeout(q, set->timeout); diff --git a/block/blk-mq.h b/block/blk-mq.h index 5fa14f19f752..b41a784de50d 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -51,6 +51,4 @@ void blk_mq_disable_hotplug(void); extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); -void blk_mq_add_timer(struct request *rq); - #endif diff --git a/block/blk-timeout.c b/block/blk-timeout.c index a09e8af8186c..49988a3ca85c 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -96,11 +96,7 @@ static void blk_rq_timed_out(struct request *req) __blk_complete_request(req); break; case BLK_EH_RESET_TIMER: - if (q->mq_ops) - blk_mq_add_timer(req); - else - blk_add_timer(req); - + blk_add_timer(req); blk_clear_rq_complete(req); break; case BLK_EH_NOT_HANDLED: @@ -170,7 +166,8 @@ void blk_abort_request(struct request *req) } EXPORT_SYMBOL_GPL(blk_abort_request); -void __blk_add_timer(struct request *req, struct list_head *timeout_list) +static void __blk_add_timer(struct request *req, + struct list_head *timeout_list) { struct request_queue *q = req->q; unsigned long expiry; @@ -225,6 +222,11 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list) */ void blk_add_timer(struct request *req) { - __blk_add_timer(req, &req->q->timeout_list); + struct request_queue *q = req->q; + + if (q->mq_ops) + __blk_add_timer(req, NULL); + else + __blk_add_timer(req, &req->q->timeout_list); } diff --git a/block/blk.h b/block/blk.h index 1d880f1f957f..79be2cbce7fd 100644 --- a/block/blk.h +++ b/block/blk.h @@ -37,9 +37,8 @@ bool __blk_end_bidi_request(struct request *rq, int error, void blk_rq_timed_out_timer(unsigned long data); void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, unsigned int *next_set); -void __blk_add_timer(struct request *req, struct list_head *timeout_list); +void blk_add_timer(struct request *req); void blk_delete_timer(struct request *); -void blk_add_timer(struct request *); bool bio_attempt_front_merge(struct request_queue *q, struct request *req, -- cgit v1.2.3-59-g8ed1b From 38535201633077cbaf8b32886b5e3005b36c9024 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 25 Apr 2014 02:32:53 -0700 Subject: blk-mq: respect rq_affinity The blk-mq code is using it's own version of the I/O completion affinity tunables, which causes a few issues: - the rq_affinity sysfs file doesn't work for blk-mq devices, even if it still is present, thus breaking existing tuning setups. - the rq_affinity = 1 mode, which is the defauly for legacy request based drivers isn't implemented at all. - blk-mq drivers don't implement any completion affinity with the default flag settings. This patches removes the blk-mq ipi_redirect flag and sysfs file, as well as the internal BLK_MQ_F_SHOULD_IPI flag and replaces it with code that respects the queue-wide rq_affinity flags and also implements the rq_affinity = 1 mode. This means I/O completion affinity can now only be tuned block-queue wide instead of per context, which seems more sensible to me anyway. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 42 ------------------------------------------ block/blk-mq.c | 8 ++++++-- block/blk-mq.h | 1 - include/linux/blk-mq.h | 1 - 4 files changed, 6 insertions(+), 46 deletions(-) (limited to 'block') diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 9176a6984857..8145b5b25b4b 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -203,42 +203,6 @@ static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx, return ret; } -static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page) -{ - ssize_t ret; - - spin_lock(&hctx->lock); - ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI)); - spin_unlock(&hctx->lock); - - return ret; -} - -static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx, - const char *page, size_t len) -{ - struct blk_mq_ctx *ctx; - unsigned long ret; - unsigned int i; - - if (kstrtoul(page, 10, &ret)) { - pr_err("blk-mq-sysfs: invalid input '%s'\n", page); - return -EINVAL; - } - - spin_lock(&hctx->lock); - if (ret) - hctx->flags |= BLK_MQ_F_SHOULD_IPI; - else - hctx->flags &= ~BLK_MQ_F_SHOULD_IPI; - spin_unlock(&hctx->lock); - - hctx_for_each_ctx(hctx, ctx, i) - ctx->ipi_redirect = !!ret; - - return len; -} - static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) { return blk_mq_tag_sysfs_show(hctx->tags, page); @@ -307,11 +271,6 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { .attr = {.name = "pending", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_rq_list_show, }; -static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = { - .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR}, - .show = blk_mq_hw_sysfs_ipi_show, - .store = blk_mq_hw_sysfs_ipi_store, -}; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { .attr = {.name = "tags", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_tags_show, @@ -326,7 +285,6 @@ static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_run.attr, &blk_mq_hw_sysfs_dispatched.attr, &blk_mq_hw_sysfs_pending.attr, - &blk_mq_hw_sysfs_ipi.attr, &blk_mq_hw_sysfs_tags.attr, &blk_mq_hw_sysfs_cpus.attr, NULL, diff --git a/block/blk-mq.c b/block/blk-mq.c index a84112c94e74..f2e92eb92803 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -326,15 +326,19 @@ static void __blk_mq_complete_request_remote(void *data) void __blk_mq_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; + bool shared = false; int cpu; - if (!ctx->ipi_redirect) { + if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { rq->q->softirq_done_fn(rq); return; } cpu = get_cpu(); - if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) + shared = cpus_share_cache(cpu, ctx->cpu); + + if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; diff --git a/block/blk-mq.h b/block/blk-mq.h index b41a784de50d..1ae364ceaf8b 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -11,7 +11,6 @@ struct blk_mq_ctx { unsigned int cpu; unsigned int index_hw; - unsigned int ipi_redirect; /* incremented at dispatch time */ unsigned long rq_dispatched[2]; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index ab469d525894..3b561d651a02 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -122,7 +122,6 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_SORT = 1 << 1, - BLK_MQ_F_SHOULD_IPI = 1 << 2, BLK_MQ_S_STOPPED = 0, -- cgit v1.2.3-59-g8ed1b From c4a634f432376a28c9639b35746e33168ab97709 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 25 Apr 2014 14:14:48 +0200 Subject: block: fold __blk_add_timer into blk_add_timer Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-timeout.c | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) (limited to 'block') diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 49988a3ca85c..448745683d28 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -166,8 +166,15 @@ void blk_abort_request(struct request *req) } EXPORT_SYMBOL_GPL(blk_abort_request); -static void __blk_add_timer(struct request *req, - struct list_head *timeout_list) +/** + * blk_add_timer - Start timeout timer for a single request + * @req: request that is about to start running. + * + * Notes: + * Each request has its own timer, and as it is added to the queue, we + * set up the timer. When the request completes, we cancel the timer. + */ +void blk_add_timer(struct request *req) { struct request_queue *q = req->q; unsigned long expiry; @@ -185,8 +192,8 @@ static void __blk_add_timer(struct request *req, req->timeout = q->rq_timeout; req->deadline = jiffies + req->timeout; - if (timeout_list) - list_add_tail(&req->timeout_list, timeout_list); + if (!q->mq_ops) + list_add_tail(&req->timeout_list, &req->q->timeout_list); /* * If the timer isn't already pending or this timeout is earlier @@ -211,22 +218,3 @@ static void __blk_add_timer(struct request *req, } } - -/** - * blk_add_timer - Start timeout timer for a single request - * @req: request that is about to start running. - * - * Notes: - * Each request has its own timer, and as it is added to the queue, we - * set up the timer. When the request completes, we cancel the timer. - */ -void blk_add_timer(struct request *req) -{ - struct request_queue *q = req->q; - - if (q->mq_ops) - __blk_add_timer(req, NULL); - else - __blk_add_timer(req, &req->q->timeout_list); -} - -- cgit v1.2.3-59-g8ed1b From 5810d903fa3459e703ce82a1d45136813c6afad8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 29 Apr 2014 20:49:48 -0600 Subject: blk-mq: fix waiting for reserved tags blk_mq_wait_for_tags() is only able to wait for "normal" tags, not reserved tags. Pass in which one we should attempt to get a tag for, so that waiting for reserved tags will work. Reserved tags are used for internal commands, which are usually serialized. Hence no waiting generally takes place, but we should ensure that it actually works if users need that functionality. Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 4 ++-- block/blk-mq-tag.h | 2 +- block/blk-mq.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 7a799c46c32d..1f43d6ee956f 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -6,9 +6,9 @@ #include "blk-mq.h" #include "blk-mq-tag.h" -void blk_mq_wait_for_tags(struct blk_mq_tags *tags) +void blk_mq_wait_for_tags(struct blk_mq_tags *tags, bool reserved) { - int tag = blk_mq_get_tag(tags, __GFP_WAIT, false); + int tag = blk_mq_get_tag(tags, __GFP_WAIT, reserved); blk_mq_put_tag(tags, tag); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index b602e3fa66ea..c8e0645ea331 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -24,7 +24,7 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved); -extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags); +extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags, bool reserved); extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag); extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); diff --git a/block/blk-mq.c b/block/blk-mq.c index f2e92eb92803..0aee32568210 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -218,7 +218,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, break; } - blk_mq_wait_for_tags(hctx->tags); + blk_mq_wait_for_tags(hctx->tags, reserved); } while (1); return rq; -- cgit v1.2.3-59-g8ed1b From 98bc1f272aba620d4222120853011d0ef026cf56 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 30 Apr 2014 13:43:08 -0600 Subject: blk-mq remove debug BUG_ON() when draining software queues It's never been of any use, lets get rid of it. Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 - 1 file changed, 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 0aee32568210..77308a84dfb2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -599,7 +599,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) { clear_bit(bit, hctx->ctx_map); ctx = hctx->ctxs[bit]; - BUG_ON(bit != ctx->index_hw); spin_lock(&ctx->lock); list_splice_tail_init(&ctx->rq_list, &rq_list); -- cgit v1.2.3-59-g8ed1b From c6d600c65ebfa10a2a10d3e9183a24527ebe2aa4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 30 Apr 2014 13:43:56 -0600 Subject: blk-mq: refactor request insertion/merging Refactor the logic around adding a new bio to a software queue, so we nest the ctx->lock where we really need it (merge and insertion) and don't hold it when we don't (init and IO start accounting). Signed-off-by: Jens Axboe --- block/blk-mq.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 77308a84dfb2..ca51ee4aa485 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1037,17 +1037,25 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) } } - spin_lock(&ctx->lock); + if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) { + init_request_from_bio(rq, bio); - if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && - blk_mq_attempt_merge(q, ctx, bio)) - __blk_mq_free_request(hctx, ctx, rq); - else { - blk_mq_bio_to_request(rq, bio); + spin_lock(&ctx->lock); +insert_rq: __blk_mq_insert_request(hctx, rq, false); + spin_unlock(&ctx->lock); + blk_account_io_start(rq, 1); + } else { + spin_lock(&ctx->lock); + if (!blk_mq_attempt_merge(q, ctx, bio)) { + init_request_from_bio(rq, bio); + goto insert_rq; + } + + spin_unlock(&ctx->lock); + __blk_mq_free_request(hctx, ctx, rq); } - spin_unlock(&ctx->lock); /* * For a SYNC request, send it to the hardware immediately. For an -- cgit v1.2.3-59-g8ed1b From 176167ad9e44df221454e65e2ea83c3079950073 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Mon, 28 Apr 2014 12:38:34 +0900 Subject: block: Fix format string mismatch in cfq-iosched.c Fix format string mismatch in cfq_var_show() Signed-off-by: Masanari Iida Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5063a0bd831a..22dffebc7c73 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -4460,7 +4460,7 @@ out_free: static ssize_t cfq_var_show(unsigned int var, char *page) { - return sprintf(page, "%d\n", var); + return sprintf(page, "%u\n", var); } static ssize_t -- cgit v1.2.3-59-g8ed1b From 74814b1c5569f5503727cf3052a52e2349818409 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 2 May 2014 11:24:48 -0600 Subject: blk-mq: remove extra requeue trace We already issue a blktrace requeue event in __blk_mq_requeue_request(), don't do it from the original caller as well. Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index ca51ee4aa485..0d379830a278 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -427,13 +427,9 @@ static void __blk_mq_requeue_request(struct request *rq) void blk_mq_requeue_request(struct request *rq) { - struct request_queue *q = rq->q; - __blk_mq_requeue_request(rq); blk_clear_rq_complete(rq); - trace_block_rq_requeue(q, rq); - BUG_ON(blk_queued_rq(rq)); blk_mq_insert_request(rq, true, true, false); } -- cgit v1.2.3-59-g8ed1b From 5214e33c8efee16502b8a0549a3c24e21af458f7 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Fri, 2 May 2014 18:21:45 +0200 Subject: block/blk-iopoll.c: use iop instead of iopoll All blk_iopoll functions use iop for parent iopoll structure except blk_iopoll_complete.This also fixes one kernel-doc warning. Cc: Jens Axboe Cc: Andrew Morton Signed-off-by: Fabian Frederick Signed-off-by: Jens Axboe --- block/blk-iopoll.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c index c11d24e379e2..d828b44a404b 100644 --- a/block/blk-iopoll.c +++ b/block/blk-iopoll.c @@ -64,12 +64,12 @@ EXPORT_SYMBOL(__blk_iopoll_complete); * iopoll handler will not be invoked again before blk_iopoll_sched_prep() * is called. **/ -void blk_iopoll_complete(struct blk_iopoll *iopoll) +void blk_iopoll_complete(struct blk_iopoll *iop) { unsigned long flags; local_irq_save(flags); - __blk_iopoll_complete(iopoll); + __blk_iopoll_complete(iop); local_irq_restore(flags); } EXPORT_SYMBOL(blk_iopoll_complete); -- cgit v1.2.3-59-g8ed1b From 5cf8c2277576fcc48966b105bb42782d7929fc48 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Fri, 2 May 2014 18:28:17 +0200 Subject: block/blk-throttle.c: fix return of 0/1 with return type bool Fix 4 coccinelle warnings. Cc: Jens Axboe Cc: Andrew Morton Signed-off-by: Fabian Frederick Signed-off-by: Jens Axboe --- block/blk-throttle.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 680a0cc116dd..9353b4683359 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -744,7 +744,7 @@ static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, static bool throtl_slice_used(struct throtl_grp *tg, bool rw) { if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) - return 0; + return false; return 1; } @@ -842,7 +842,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, if (tg->io_disp[rw] + 1 <= io_allowed) { if (wait) *wait = 0; - return 1; + return true; } /* Calc approx time to dispatch */ @@ -880,7 +880,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { if (wait) *wait = 0; - return 1; + return true; } /* Calc approx time to dispatch */ @@ -923,7 +923,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { if (wait) *wait = 0; - return 1; + return true; } /* -- cgit v1.2.3-59-g8ed1b From 506e931f92defdc60c1dc4aa2ff4a19a5dcd8618 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 7 May 2014 10:26:44 -0600 Subject: blk-mq: add basic round-robin of what CPU to queue workqueue work on Right now we just pick the first CPU in the mask, but that can easily overload that one. Add some basic batching and round-robin all the entries in the mask instead. Signed-off-by: Jens Axboe --- block/blk-mq.c | 45 +++++++++++++++++++++++++++++++-------------- include/linux/blk-mq.h | 4 ++++ 2 files changed, 35 insertions(+), 14 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 0d379830a278..2410e0cb7aef 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -670,6 +670,30 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) } } +/* + * It'd be great if the workqueue API had a way to pass + * in a mask and had some smarts for more clever placement. + * For now we just round-robin here, switching for every + * BLK_MQ_CPU_WORK_BATCH queued items. + */ +static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) +{ + int cpu = hctx->next_cpu; + + if (--hctx->next_cpu_batch <= 0) { + int next_cpu; + + next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(hctx->cpumask); + + hctx->next_cpu = next_cpu; + hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; + } + + return cpu; +} + void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) @@ -682,13 +706,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) else { unsigned int cpu; - /* - * It'd be great if the workqueue API had a way to pass - * in a mask and had some smarts for more clever placement - * than the first CPU. Or we could round-robin here. For now, - * just queue on the first CPU. - */ - cpu = cpumask_first(hctx->cpumask); + cpu = blk_mq_hctx_next_cpu(hctx); kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0); } } @@ -795,13 +813,7 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) else { unsigned int cpu; - /* - * It'd be great if the workqueue API had a way to pass - * in a mask and had some smarts for more clever placement - * than the first CPU. Or we could round-robin here. For now, - * just queue on the first CPU. - */ - cpu = cpumask_first(hctx->cpumask); + cpu = blk_mq_hctx_next_cpu(hctx); kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo); } } @@ -1378,6 +1390,11 @@ static void blk_mq_map_swqueue(struct request_queue *q) ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } + + queue_for_each_hw_ctx(q, hctx, i) { + hctx->next_cpu = cpumask_first(hctx->cpumask); + hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; + } } struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3b561d651a02..5bd677e2dcb7 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -21,6 +21,8 @@ struct blk_mq_hw_ctx { struct delayed_work run_work; struct delayed_work delay_work; cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; unsigned long flags; /* BLK_MQ_F_* flags */ @@ -126,6 +128,8 @@ enum { BLK_MQ_S_STOPPED = 0, BLK_MQ_MAX_DEPTH = 2048, + + BLK_MQ_CPU_WORK_BATCH = 8, }; struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); -- cgit v1.2.3-59-g8ed1b From 9fccfed8f0cad9b79575a87c45d6f5f6ee05bb66 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 8 May 2014 14:50:19 -0600 Subject: blk-mq: update a hotplug comment for grammar Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 2410e0cb7aef..3fdb097ebe5e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1548,10 +1548,10 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, struct request_queue *q; /* - * Before new mapping is established, hotadded cpu might already start - * handling requests. This doesn't break anything as we map offline - * CPUs to first hardware queue. We will re-init queue below to get - * optimal settings. + * Before new mappings are established, hotadded cpu might already + * start handling requests. This doesn't break anything as we map + * offline CPUs to first hardware queue. We will re-init the queue + * below to get optimal settings. */ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) -- cgit v1.2.3-59-g8ed1b From af76e555e5e29e08eb8ac1f7878e23dbf0d6741f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 6 May 2014 12:12:45 +0200 Subject: blk-mq: initialize struct request fields individually This allows us to avoid a non-atomic memset over ->atomic_flags as well as killing lots of duplicate initializations. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 47 +++++++++++++++++++++++++++++++++++++++++++++-- include/linux/blkdev.h | 7 ++++--- 2 files changed, 49 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 3fdb097ebe5e..492f49f96459 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -82,9 +82,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, tag = blk_mq_get_tag(hctx->tags, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { rq = hctx->tags->rqs[tag]; - blk_rq_init(hctx->queue, rq); rq->tag = tag; - return rq; } @@ -187,10 +185,54 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; + INIT_LIST_HEAD(&rq->queuelist); + /* csd/requeue_work/fifo_time is initialized before use */ + rq->q = q; rq->mq_ctx = ctx; rq->cmd_flags = rw_flags; + rq->cmd_type = 0; + /* do not touch atomic flags, it needs atomic ops against the timer */ + rq->cpu = -1; + rq->__data_len = 0; + rq->__sector = (sector_t) -1; + rq->bio = NULL; + rq->biotail = NULL; + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); + memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv))); + rq->rq_disk = NULL; + rq->part = NULL; rq->start_time = jiffies; +#ifdef CONFIG_BLK_CGROUP + rq->rl = NULL; set_start_time_ns(rq); + rq->io_start_time_ns = 0; +#endif + rq->nr_phys_segments = 0; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + rq->nr_integrity_segments = 0; +#endif + rq->ioprio = 0; + rq->special = NULL; + /* tag was already set */ + rq->errors = 0; + memset(rq->__cmd, 0, sizeof(rq->__cmd)); + rq->cmd = rq->__cmd; + rq->cmd_len = BLK_MAX_CDB; + + rq->extra_len = 0; + rq->sense_len = 0; + rq->resid_len = 0; + rq->sense = NULL; + + rq->deadline = 0; + INIT_LIST_HEAD(&rq->timeout_list); + rq->timeout = 0; + rq->retries = 0; + rq->end_io = NULL; + rq->end_io_data = NULL; + rq->next_rq = NULL; + ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } @@ -258,6 +300,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, const int tag = rq->tag; struct request_queue *q = rq->q; + clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); blk_mq_put_tag(hctx->tags, tag); blk_mq_queue_exit(q); } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 20b26d4e53a2..94b27210641b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -90,9 +90,10 @@ enum rq_cmd_type_bits { #define BLK_MAX_CDB 16 /* - * try to put the fields that are referenced together in the same cacheline. - * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() - * as well! + * Try to put the fields that are referenced together in the same cacheline. + * + * If you modify this structure, make sure to update blk_rq_init() and + * especially blk_mq_rq_ctx_init() to take care of the added fields. */ struct request { struct list_head queuelist; -- cgit v1.2.3-59-g8ed1b From 4bb659b156996f2993dc16fad71fec9ee070153c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 9 May 2014 09:36:49 -0600 Subject: blk-mq: implement new and more efficient tagging scheme blk-mq currently uses percpu_ida for tag allocation. But that only works well if the ratio between tag space and number of CPUs is sufficiently high. For most devices and systems, that is not the case. The end result if that we either only utilize the tag space partially, or we end up attempting to fully exhaust it and run into lots of lock contention with stealing between CPUs. This is not optimal. This new tagging scheme is a hybrid bitmap allocator. It uses two tricks to both be SMP friendly and allow full exhaustion of the space: 1) We cache the last allocated (or freed) tag on a per blk-mq software context basis. This allows us to limit the space we have to search. The key element here is not caching it in the shared tag structure, otherwise we end up dirtying more shared cache lines on each allocate/free operation. 2) The tag space is split into cache line sized groups, and each context will start off randomly in that space. Even up to full utilization of the space, this divides the tag users efficiently into cache line groups, avoiding dirtying the same one both between allocators and between allocator and freeer. This scheme shows drastically better behaviour, both on small tag spaces but on large ones as well. It has been tested extensively to show better performance for all the cases blk-mq cares about. Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 415 ++++++++++++++++++++++++++++++++++++++++--------- block/blk-mq-tag.h | 42 ++++- block/blk-mq.c | 23 ++- block/blk-mq.h | 4 +- include/linux/blk-mq.h | 6 +- 5 files changed, 391 insertions(+), 99 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 1f43d6ee956f..467f3a20b355 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -1,64 +1,257 @@ #include #include +#include #include #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" -void blk_mq_wait_for_tags(struct blk_mq_tags *tags, bool reserved) +void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, + bool reserved) { - int tag = blk_mq_get_tag(tags, __GFP_WAIT, reserved); - blk_mq_put_tag(tags, tag); + int tag, zero = 0; + + tag = blk_mq_get_tag(tags, hctx, &zero, __GFP_WAIT, reserved); + blk_mq_put_tag(tags, tag, &zero); +} + +static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) +{ + int i; + + for (i = 0; i < bt->map_nr; i++) { + struct blk_mq_bitmap *bm = &bt->map[i]; + int ret; + + ret = find_first_zero_bit(&bm->word, bm->depth); + if (ret < bm->depth) + return true; + } + + return false; } bool blk_mq_has_free_tags(struct blk_mq_tags *tags) { - return !tags || - percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0; + if (!tags) + return true; + + return bt_has_free_tags(&tags->bitmap_tags); +} + +static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) +{ + int tag, org_last_tag, end; + + org_last_tag = last_tag = TAG_TO_BIT(last_tag); + end = bm->depth; + do { +restart: + tag = find_next_zero_bit(&bm->word, end, last_tag); + if (unlikely(tag >= end)) { + /* + * We started with an offset, start from 0 to + * exhaust the map. + */ + if (org_last_tag && last_tag) { + end = last_tag; + last_tag = 0; + goto restart; + } + return -1; + } + last_tag = tag + 1; + } while (test_and_set_bit_lock(tag, &bm->word)); + + return tag; +} + +/* + * Straight forward bitmap tag implementation, where each bit is a tag + * (cleared == free, and set == busy). The small twist is using per-cpu + * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue + * contexts. This enables us to drastically limit the space searched, + * without dirtying an extra shared cacheline like we would if we stored + * the cache value inside the shared blk_mq_bitmap_tags structure. On top + * of that, each word of tags is in a separate cacheline. This means that + * multiple users will tend to stick to different cachelines, at least + * until the map is exhausted. + */ +static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache) +{ + unsigned int last_tag, org_last_tag; + int index, i, tag; + + last_tag = org_last_tag = *tag_cache; + index = TAG_TO_INDEX(last_tag); + + for (i = 0; i < bt->map_nr; i++) { + tag = __bt_get_word(&bt->map[index], last_tag); + if (tag != -1) { + tag += index * BITS_PER_LONG; + goto done; + } + + last_tag = 0; + if (++index >= bt->map_nr) + index = 0; + } + + *tag_cache = 0; + return -1; + + /* + * Only update the cache from the allocation path, if we ended + * up using the specific cached tag. + */ +done: + if (tag == org_last_tag) { + last_tag = tag + 1; + if (last_tag >= bt->depth - 1) + last_tag = 0; + + *tag_cache = last_tag; + } + + return tag; +} + +static inline void bt_index_inc(unsigned int *index) +{ + *index = (*index + 1) & (BT_WAIT_QUEUES - 1); +} + +static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, + struct blk_mq_hw_ctx *hctx) +{ + struct bt_wait_state *bs; + + if (!hctx) + return &bt->bs[0]; + + bs = &bt->bs[hctx->wait_index]; + bt_index_inc(&hctx->wait_index); + return bs; } -static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp) +static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, + unsigned int *last_tag, gfp_t gfp) { + struct bt_wait_state *bs; + DEFINE_WAIT(wait); int tag; - tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ? - TASK_UNINTERRUPTIBLE : TASK_RUNNING); - if (tag < 0) - return BLK_MQ_TAG_FAIL; - return tag + tags->nr_reserved_tags; + tag = __bt_get(bt, last_tag); + if (tag != -1) + return tag; + + if (!(gfp & __GFP_WAIT)) + return -1; + + bs = bt_wait_ptr(bt, hctx); + do { + bool was_empty; + + was_empty = list_empty(&wait.task_list); + prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); + + tag = __bt_get(bt, last_tag); + if (tag != -1) + break; + + if (was_empty) + atomic_set(&bs->wait_cnt, bt->wake_cnt); + + io_schedule(); + } while (1); + + finish_wait(&bs->wait, &wait); + return tag; +} + +static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, + struct blk_mq_hw_ctx *hctx, + unsigned int *last_tag, gfp_t gfp) +{ + int tag; + + tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp); + if (tag >= 0) + return tag + tags->nr_reserved_tags; + + return BLK_MQ_TAG_FAIL; } static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, gfp_t gfp) { - int tag; + int tag, zero = 0; if (unlikely(!tags->nr_reserved_tags)) { WARN_ON_ONCE(1); return BLK_MQ_TAG_FAIL; } - tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ? - TASK_UNINTERRUPTIBLE : TASK_RUNNING); + tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp); if (tag < 0) return BLK_MQ_TAG_FAIL; + return tag; } -unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved) +unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, + struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, + gfp_t gfp, bool reserved) { if (!reserved) - return __blk_mq_get_tag(tags, gfp); + return __blk_mq_get_tag(tags, hctx, last_tag, gfp); return __blk_mq_get_reserved_tag(tags, gfp); } +static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) +{ + int i, wake_index; + + wake_index = bt->wake_index; + for (i = 0; i < BT_WAIT_QUEUES; i++) { + struct bt_wait_state *bs = &bt->bs[wake_index]; + + if (waitqueue_active(&bs->wait)) { + if (wake_index != bt->wake_index) + bt->wake_index = wake_index; + + return bs; + } + + bt_index_inc(&wake_index); + } + + return NULL; +} + +static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) +{ + const int index = TAG_TO_INDEX(tag); + struct bt_wait_state *bs; + + clear_bit(TAG_TO_BIT(tag), &bt->map[index].word); + + bs = bt_wake_ptr(bt); + if (bs && atomic_dec_and_test(&bs->wait_cnt)) { + smp_mb__after_clear_bit(); + atomic_set(&bs->wait_cnt, bt->wake_cnt); + bt_index_inc(&bt->wake_index); + wake_up(&bs->wait); + } +} + static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) { BUG_ON(tag >= tags->nr_tags); - percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags); + bt_clear_tag(&tags->bitmap_tags, tag); } static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, @@ -66,22 +259,41 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, { BUG_ON(tag >= tags->nr_reserved_tags); - percpu_ida_free(&tags->reserved_tags, tag); + bt_clear_tag(&tags->breserved_tags, tag); } -void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) +void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, + unsigned int *last_tag) { - if (tag >= tags->nr_reserved_tags) - __blk_mq_put_tag(tags, tag); - else + if (tag >= tags->nr_reserved_tags) { + const int real_tag = tag - tags->nr_reserved_tags; + + __blk_mq_put_tag(tags, real_tag); + *last_tag = real_tag; + } else __blk_mq_put_reserved_tag(tags, tag); } -static int __blk_mq_tag_iter(unsigned id, void *data) +static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, + unsigned long *free_map, unsigned int off) { - unsigned long *tag_map = data; - __set_bit(id, tag_map); - return 0; + int i; + + for (i = 0; i < bt->map_nr; i++) { + struct blk_mq_bitmap *bm = &bt->map[i]; + int bit = 0; + + do { + bit = find_next_zero_bit(&bm->word, bm->depth, bit); + if (bit >= bm->depth) + break; + + __set_bit(bit + off, free_map); + bit++; + } while (1); + + off += BITS_PER_LONG; + } } void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, @@ -95,21 +307,98 @@ void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, if (!tag_map) return; - percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map); + bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags); if (tags->nr_reserved_tags) - percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter, - tag_map); + bt_for_each_free(&tags->breserved_tags, tag_map, 0); fn(data, tag_map); kfree(tag_map); } +static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) +{ + unsigned int i, used; + + for (i = 0, used = 0; i < bt->map_nr; i++) { + struct blk_mq_bitmap *bm = &bt->map[i]; + + used += bitmap_weight(&bm->word, bm->depth); + } + + return bt->depth - used; +} + +static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, + int node, bool reserved) +{ + int i; + + /* + * Depth can be zero for reserved tags, that's not a failure + * condition. + */ + if (depth) { + int nr, i, map_depth; + + nr = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; + bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap), + GFP_KERNEL, node); + if (!bt->map) + return -ENOMEM; + + bt->map_nr = nr; + map_depth = depth; + for (i = 0; i < nr; i++) { + bt->map[i].depth = min(map_depth, BITS_PER_LONG); + map_depth -= BITS_PER_LONG; + } + } + + bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); + if (!bt->bs) { + kfree(bt->map); + return -ENOMEM; + } + + for (i = 0; i < BT_WAIT_QUEUES; i++) + init_waitqueue_head(&bt->bs[i].wait); + + bt->wake_cnt = BT_WAIT_BATCH; + if (bt->wake_cnt > depth / 4) + bt->wake_cnt = max(1U, depth / 4); + + bt->depth = depth; + return 0; +} + +static void bt_free(struct blk_mq_bitmap_tags *bt) +{ + kfree(bt->map); + kfree(bt->bs); +} + +static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, + int node) +{ + unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; + + if (bt_alloc(&tags->bitmap_tags, depth, node, false)) + goto enomem; + if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) + goto enomem; + + return tags; +enomem: + bt_free(&tags->bitmap_tags); + kfree(tags); + return NULL; +} + struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, int node) { unsigned int nr_tags, nr_cache; struct blk_mq_tags *tags; - int ret; if (total_tags > BLK_MQ_TAG_MAX) { pr_err("blk-mq: tag depth too large\n"); @@ -121,72 +410,46 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, return NULL; nr_tags = total_tags - reserved_tags; - nr_cache = nr_tags / num_possible_cpus(); - - if (nr_cache < BLK_MQ_TAG_CACHE_MIN) - nr_cache = BLK_MQ_TAG_CACHE_MIN; - else if (nr_cache > BLK_MQ_TAG_CACHE_MAX) - nr_cache = BLK_MQ_TAG_CACHE_MAX; + nr_cache = nr_tags / num_online_cpus(); tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; - tags->nr_max_cache = nr_cache; - tags->nr_batch_move = max(1u, nr_cache / 2); - - ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags - - tags->nr_reserved_tags, - tags->nr_max_cache, - tags->nr_batch_move); - if (ret) - goto err_free_tags; - - if (reserved_tags) { - /* - * With max_cahe and batch set to 1, the allocator fallbacks to - * no cached. It's fine reserved tags allocation is slow. - */ - ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags, - 1, 1); - if (ret) - goto err_reserved_tags; - } - return tags; - -err_reserved_tags: - percpu_ida_destroy(&tags->free_tags); -err_free_tags: - kfree(tags); - return NULL; + return blk_mq_init_bitmap_tags(tags, node); } void blk_mq_free_tags(struct blk_mq_tags *tags) { - percpu_ida_destroy(&tags->free_tags); - percpu_ida_destroy(&tags->reserved_tags); + bt_free(&tags->bitmap_tags); + bt_free(&tags->breserved_tags); kfree(tags); } +void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) +{ + unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; + + if (depth > 1) + *tag = prandom_u32() % (depth - 1); + else + *tag = 0; +} + ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; - unsigned int cpu; + unsigned int free, res; if (!tags) return 0; - page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u," - " max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags, - tags->nr_batch_move, tags->nr_max_cache); + page += sprintf(page, "nr_tags=%u, reserved_tags=%u\n", + tags->nr_tags, tags->nr_reserved_tags); - page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", - percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids), - percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids)); + free = bt_unused_tags(&tags->bitmap_tags); + res = bt_unused_tags(&tags->breserved_tags); - for_each_possible_cpu(cpu) { - page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu, - percpu_ida_free_tags(&tags->free_tags, cpu)); - } + page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); return page - orig_page; } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index c8e0645ea331..06d4a2f0f7a0 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -1,7 +1,34 @@ #ifndef INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H -#include +enum { + BT_WAIT_QUEUES = 8, + BT_WAIT_BATCH = 8, +}; + +struct bt_wait_state { + atomic_t wait_cnt; + wait_queue_head_t wait; +} ____cacheline_aligned_in_smp; + +#define TAG_TO_INDEX(tag) ((tag) / BITS_PER_LONG) +#define TAG_TO_BIT(tag) ((tag) & (BITS_PER_LONG - 1)) + +struct blk_mq_bitmap { + unsigned long word; + unsigned long depth; +} ____cacheline_aligned_in_smp; + +struct blk_mq_bitmap_tags { + unsigned int depth; + unsigned int wake_cnt; + + struct blk_mq_bitmap *map; + unsigned int map_nr; + + unsigned int wake_index; + struct bt_wait_state *bs; +}; /* * Tag address space map. @@ -9,11 +36,9 @@ struct blk_mq_tags { unsigned int nr_tags; unsigned int nr_reserved_tags; - unsigned int nr_batch_move; - unsigned int nr_max_cache; - struct percpu_ida free_tags; - struct percpu_ida reserved_tags; + struct blk_mq_bitmap_tags bitmap_tags; + struct blk_mq_bitmap_tags breserved_tags; struct request **rqs; struct list_head page_list; @@ -23,12 +48,13 @@ struct blk_mq_tags { extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); extern void blk_mq_free_tags(struct blk_mq_tags *tags); -extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved); -extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags, bool reserved); -extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag); +extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); +extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, bool reserved); +extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, unsigned int *last_tag); extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); +extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index 492f49f96459..9f07a266f7ab 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -74,12 +74,13 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, } static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, gfp_t gfp, bool reserved) { struct request *rq; unsigned int tag; - tag = blk_mq_get_tag(hctx->tags, gfp, reserved); + tag = blk_mq_get_tag(hctx->tags, hctx, &ctx->last_tag, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { rq = hctx->tags->rqs[tag]; rq->tag = tag; @@ -246,7 +247,8 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); - rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); + rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT, + reserved); if (rq) { blk_mq_rq_ctx_init(q, ctx, rq, rw); break; @@ -260,7 +262,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, break; } - blk_mq_wait_for_tags(hctx->tags, reserved); + blk_mq_wait_for_tags(hctx->tags, hctx, reserved); } while (1); return rq; @@ -278,6 +280,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) blk_mq_put_ctx(rq->mq_ctx); return rq; } +EXPORT_SYMBOL(blk_mq_alloc_request); struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp) @@ -301,7 +304,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct request_queue *q = rq->q; clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); - blk_mq_put_tag(hctx->tags, tag); + blk_mq_put_tag(hctx->tags, tag, &ctx->last_tag); blk_mq_queue_exit(q); } @@ -677,11 +680,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) queued++; continue; case BLK_MQ_RQ_QUEUE_BUSY: - /* - * FIXME: we should have a mechanism to stop the queue - * like blk_stop_queue, otherwise we will waste cpu - * time - */ list_add(&rq->queuelist, &rq_list); __blk_mq_requeue_request(rq); break; @@ -873,6 +871,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, list_add(&rq->queuelist, &ctx->rq_list); else list_add_tail(&rq->queuelist, &ctx->rq_list); + blk_mq_hctx_mark_pending(hctx, ctx); /* @@ -1046,7 +1045,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) if (is_sync) rw |= REQ_SYNC; trace_block_getrq(q, bio, rw); - rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); + rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false); if (likely(rq)) blk_mq_rq_ctx_init(q, ctx, rq, rw); else { @@ -1130,8 +1129,8 @@ EXPORT_SYMBOL(blk_mq_map_queue); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, unsigned int hctx_index) { - return kmalloc_node(sizeof(struct blk_mq_hw_ctx), - GFP_KERNEL | __GFP_ZERO, set->numa_node); + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, + set->numa_node); } EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); diff --git a/block/blk-mq.h b/block/blk-mq.h index 1ae364ceaf8b..97cfab9c092f 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -12,6 +12,8 @@ struct blk_mq_ctx { unsigned int cpu; unsigned int index_hw; + unsigned int last_tag ____cacheline_aligned_in_smp; + /* incremented at dispatch time */ unsigned long rq_dispatched[2]; unsigned long rq_merged; @@ -21,7 +23,7 @@ struct blk_mq_ctx { struct request_queue *queue; struct kobject kobj; -}; +} ____cacheline_aligned_in_smp; void __blk_mq_complete_request(struct request *rq); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 5bd677e2dcb7..f83d15f6e1c1 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -31,10 +31,12 @@ struct blk_mq_hw_ctx { void *driver_data; - unsigned int nr_ctx; - struct blk_mq_ctx **ctxs; unsigned int nr_ctx_map; unsigned long *ctx_map; + unsigned int nr_ctx; + struct blk_mq_ctx **ctxs; + + unsigned int wait_index; struct blk_mq_tags *tags; -- cgit v1.2.3-59-g8ed1b From 59d13bf5f57ded658c872fa22276f75ab8f12841 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 9 May 2014 13:41:15 -0600 Subject: blk-mq: use sparser tag layout for lower queue depth For best performance, spreading tags over multiple cachelines makes the tagging more efficient on multicore systems. But since we have 8 * sizeof(unsigned long) tags per cacheline, we don't always get a nice spread. Attempt to spread the tags over at least 4 cachelines, using fewer number of bits per unsigned long if we have to. This improves tagging performance in setups with 32-128 tags. For higher depths, the spread is the same as before (BITS_PER_LONG tags per cacheline). Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 45 ++++++++++++++++++++++++++++++++------------- block/blk-mq-tag.h | 7 ++++--- 2 files changed, 36 insertions(+), 16 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 467f3a20b355..6c78c08865e3 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -44,7 +44,7 @@ static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) { int tag, org_last_tag, end; - org_last_tag = last_tag = TAG_TO_BIT(last_tag); + org_last_tag = last_tag; end = bm->depth; do { restart: @@ -84,12 +84,12 @@ static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache) int index, i, tag; last_tag = org_last_tag = *tag_cache; - index = TAG_TO_INDEX(last_tag); + index = TAG_TO_INDEX(bt, last_tag); for (i = 0; i < bt->map_nr; i++) { - tag = __bt_get_word(&bt->map[index], last_tag); + tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag)); if (tag != -1) { - tag += index * BITS_PER_LONG; + tag += (index << bt->bits_per_word); goto done; } @@ -233,10 +233,10 @@ static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) { - const int index = TAG_TO_INDEX(tag); + const int index = TAG_TO_INDEX(bt, tag); struct bt_wait_state *bs; - clear_bit(TAG_TO_BIT(tag), &bt->map[index].word); + clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word); bs = bt_wake_ptr(bt); if (bs && atomic_dec_and_test(&bs->wait_cnt)) { @@ -292,7 +292,7 @@ static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, bit++; } while (1); - off += BITS_PER_LONG; + off += (1 << bt->bits_per_word); } } @@ -333,14 +333,31 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, { int i; + bt->bits_per_word = ilog2(BITS_PER_LONG); + /* * Depth can be zero for reserved tags, that's not a failure * condition. */ if (depth) { - int nr, i, map_depth; + unsigned int nr, i, map_depth, tags_per_word; + + tags_per_word = (1 << bt->bits_per_word); + + /* + * If the tag space is small, shrink the number of tags + * per word so we spread over a few cachelines, at least. + * If less than 4 tags, just forget about it, it's not + * going to work optimally anyway. + */ + if (depth >= 4) { + while (tags_per_word * 4 > depth) { + bt->bits_per_word--; + tags_per_word = (1 << bt->bits_per_word); + } + } - nr = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; + nr = ALIGN(depth, tags_per_word) / tags_per_word; bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap), GFP_KERNEL, node); if (!bt->map) @@ -349,8 +366,8 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, bt->map_nr = nr; map_depth = depth; for (i = 0; i < nr; i++) { - bt->map[i].depth = min(map_depth, BITS_PER_LONG); - map_depth -= BITS_PER_LONG; + bt->map[i].depth = min(map_depth, tags_per_word); + map_depth -= tags_per_word; } } @@ -443,8 +460,10 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) if (!tags) return 0; - page += sprintf(page, "nr_tags=%u, reserved_tags=%u\n", - tags->nr_tags, tags->nr_reserved_tags); + page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " + "bits_per_word=%u\n", + tags->nr_tags, tags->nr_reserved_tags, + tags->bitmap_tags.bits_per_word); free = bt_unused_tags(&tags->bitmap_tags); res = bt_unused_tags(&tags->breserved_tags); diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 06d4a2f0f7a0..7aa9f0665489 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -11,8 +11,8 @@ struct bt_wait_state { wait_queue_head_t wait; } ____cacheline_aligned_in_smp; -#define TAG_TO_INDEX(tag) ((tag) / BITS_PER_LONG) -#define TAG_TO_BIT(tag) ((tag) & (BITS_PER_LONG - 1)) +#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word) +#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1)) struct blk_mq_bitmap { unsigned long word; @@ -22,9 +22,10 @@ struct blk_mq_bitmap { struct blk_mq_bitmap_tags { unsigned int depth; unsigned int wake_cnt; + unsigned int bits_per_word; - struct blk_mq_bitmap *map; unsigned int map_nr; + struct blk_mq_bitmap *map; unsigned int wake_index; struct bt_wait_state *bs; -- cgit v1.2.3-59-g8ed1b From cf4b50afc28cc4629f82f5591bef1db2ce129fdc Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 9 May 2014 14:54:08 -0600 Subject: blk-mq: fix race in IO start accounting Commit c6d600c6 opened up a small race where we could attempt to account IO completion on a request, racing with IO start accounting. Fix this up by ensuring that we've accounted for IO start before inserting the request. Signed-off-by: Jens Axboe --- block/blk-mq.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 9f07a266f7ab..526feee31bff 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1088,17 +1088,15 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) } if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) { - init_request_from_bio(rq, bio); - + blk_mq_bio_to_request(rq, bio); spin_lock(&ctx->lock); insert_rq: __blk_mq_insert_request(hctx, rq, false); spin_unlock(&ctx->lock); - blk_account_io_start(rq, 1); } else { spin_lock(&ctx->lock); if (!blk_mq_attempt_merge(q, ctx, bio)) { - init_request_from_bio(rq, bio); + blk_mq_bio_to_request(rq, bio); goto insert_rq; } -- cgit v1.2.3-59-g8ed1b From 7276d02e241dc3595d4a18ac2456fa449e448de7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 9 May 2014 15:48:23 -0600 Subject: block: only calculate part_in_flight() once We first check if we have inflight IO, then retrieve that same number again. Usually this isn't that costly since the chance of having the data dirtied in between is small, but there's no reason for calling part_in_flight() twice. Signed-off-by: Jens Axboe --- block/blk-core.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index c4269701cb4f..a6bd3e702201 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1233,12 +1233,15 @@ static void add_acct_request(struct request_queue *q, struct request *rq, static void part_round_stats_single(int cpu, struct hd_struct *part, unsigned long now) { + int inflight; + if (now == part->stamp) return; - if (part_in_flight(part)) { + inflight = part_in_flight(part); + if (inflight) { __part_stat_add(cpu, part, time_in_queue, - part_in_flight(part) * (now - part->stamp)); + inflight * (now - part->stamp)); __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); } part->stamp = now; -- cgit v1.2.3-59-g8ed1b From 0289b2e110b7824b2f76d194ad6f8f0844e270ad Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sun, 11 May 2014 01:01:48 +0800 Subject: blk-mq: bitmap tag: use clear_bit_unlock in bt_clear_tag() The unlock memory barrier need to order access to req in free path and clearing tag bit, otherwise either request free path may see a allocated request, or initialized request in allocate path might be modified by the ongoing free path. Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 6c78c08865e3..a81b138e89fe 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -236,7 +236,11 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) const int index = TAG_TO_INDEX(bt, tag); struct bt_wait_state *bs; - clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word); + /* + * The unlock memory barrier need to order access to req in free + * path and clearing tag bit + */ + clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); bs = bt_wake_ptr(bt); if (bs && atomic_dec_and_test(&bs->wait_cnt)) { -- cgit v1.2.3-59-g8ed1b From 60f2df8a29df5f2db2c87fd23122a1cebdf2011a Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sun, 11 May 2014 01:01:49 +0800 Subject: blk-mq: bitmap tag: remove barrier in bt_clear_tag() The barrier isn't necessary because both atomic_dec_and_test() and wake_up() implicate one barrier. Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 1 - 1 file changed, 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index a81b138e89fe..5a83d8e587f7 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -244,7 +244,6 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) bs = bt_wake_ptr(bt); if (bs && atomic_dec_and_test(&bs->wait_cnt)) { - smp_mb__after_clear_bit(); atomic_set(&bs->wait_cnt, bt->wake_cnt); bt_index_inc(&bt->wake_index); wake_up(&bs->wait); -- cgit v1.2.3-59-g8ed1b From 9d3d21aeb4f194cd7ac205abe68b14b47ae736a8 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 10 May 2014 15:43:14 -0600 Subject: blk-mq: bitmap tag: select random tag betweet 0 and (depth - 1) The selected tag should be selected at random between 0 and (depth - 1) with probability 1/depth, instead between 0 and (depth - 2) with probability 1/(depth - 1). Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 5a83d8e587f7..f196e60178f4 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -449,10 +449,7 @@ void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) { unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; - if (depth > 1) - *tag = prandom_u32() % (depth - 1); - else - *tag = 0; + *tag = prandom_u32() % depth; } ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) -- cgit v1.2.3-59-g8ed1b From 1f236ab22ce3bc5d4f975aa116966c0ea7ec2013 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sun, 11 May 2014 01:01:51 +0800 Subject: blk-mq: bitmap tag: cleanup blk_mq_init_tags Both nr_cache and nr_tags arn't needed for bitmap tag anymore. Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index f196e60178f4..8d526a3e02f6 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -417,7 +417,6 @@ enomem: struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, int node) { - unsigned int nr_tags, nr_cache; struct blk_mq_tags *tags; if (total_tags > BLK_MQ_TAG_MAX) { @@ -429,9 +428,6 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, if (!tags) return NULL; - nr_tags = total_tags - reserved_tags; - nr_cache = nr_tags / num_online_cpus(); - tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; -- cgit v1.2.3-59-g8ed1b From 0d2602ca30e410e84e8bdf05c84ed5688e0a5a44 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 13 May 2014 15:10:52 -0600 Subject: blk-mq: improve support for shared tags maps This adds support for active queue tracking, meaning that the blk-mq tagging maintains a count of active users of a tag set. This allows us to maintain a notion of fairness between users, so that we can distribute the tag depth evenly without starving some users while allowing others to try unfair deep queues. If sharing of a tag set is detected, each hardware queue will track the depth of its own queue. And if this exceeds the total depth divided by the number of active queues, the user is actively throttled down. The active queue count is done lazily to avoid bouncing that data between submitter and completer. Each hardware queue gets marked active when it allocates its first tag, and gets marked inactive when 1) the last tag is cleared, and 2) the queue timeout grace period has passed. Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 10 +++++ block/blk-mq-tag.c | 112 +++++++++++++++++++++++++++++++++++++++------- block/blk-mq-tag.h | 27 +++++++++-- block/blk-mq.c | 85 ++++++++++++++++++++++++++++++++--- block/blk-timeout.c | 13 +++++- block/blk.h | 4 ++ include/linux/blk-mq.h | 7 +++ include/linux/blk_types.h | 2 + include/linux/blkdev.h | 3 ++ 9 files changed, 236 insertions(+), 27 deletions(-) (limited to 'block') diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 8145b5b25b4b..99a60a829e69 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -208,6 +208,11 @@ static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) return blk_mq_tag_sysfs_show(hctx->tags, page); } +static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page) +{ + return sprintf(page, "%u\n", atomic_read(&hctx->nr_active)); +} + static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) { unsigned int i, first = 1; @@ -267,6 +272,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = { .attr = {.name = "dispatched", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_dispatched_show, }; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = { + .attr = {.name = "active", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_active_show, +}; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { .attr = {.name = "pending", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_rq_list_show, @@ -287,6 +296,7 @@ static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_pending.attr, &blk_mq_hw_sysfs_tags.attr, &blk_mq_hw_sysfs_cpus.attr, + &blk_mq_hw_sysfs_active.attr, NULL, }; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 8d526a3e02f6..c80086c9c064 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -7,13 +7,12 @@ #include "blk-mq.h" #include "blk-mq-tag.h" -void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, - bool reserved) +void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved) { int tag, zero = 0; - tag = blk_mq_get_tag(tags, hctx, &zero, __GFP_WAIT, reserved); - blk_mq_put_tag(tags, tag, &zero); + tag = blk_mq_get_tag(hctx, &zero, __GFP_WAIT, reserved); + blk_mq_put_tag(hctx, tag, &zero); } static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) @@ -40,6 +39,84 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags) return bt_has_free_tags(&tags->bitmap_tags); } +static inline void bt_index_inc(unsigned int *index) +{ + *index = (*index + 1) & (BT_WAIT_QUEUES - 1); +} + +/* + * If a previously inactive queue goes active, bump the active user count. + */ +bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && + !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + atomic_inc(&hctx->tags->active_queues); + + return true; +} + +/* + * If a previously busy queue goes inactive, potential waiters could now + * be allowed to queue. Wake them up and check. + */ +void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_tags *tags = hctx->tags; + struct blk_mq_bitmap_tags *bt; + int i, wake_index; + + if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return; + + atomic_dec(&tags->active_queues); + + /* + * Will only throttle depth on non-reserved tags + */ + bt = &tags->bitmap_tags; + wake_index = bt->wake_index; + for (i = 0; i < BT_WAIT_QUEUES; i++) { + struct bt_wait_state *bs = &bt->bs[wake_index]; + + if (waitqueue_active(&bs->wait)) + wake_up(&bs->wait); + + bt_index_inc(&wake_index); + } +} + +/* + * For shared tag users, we track the number of currently active users + * and attempt to provide a fair share of the tag depth for each of them. + */ +static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, + struct blk_mq_bitmap_tags *bt) +{ + unsigned int depth, users; + + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return true; + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return true; + + /* + * Don't try dividing an ant + */ + if (bt->depth == 1) + return true; + + users = atomic_read(&hctx->tags->active_queues); + if (!users) + return true; + + /* + * Allow at least some tags + */ + depth = max((bt->depth + users - 1) / users, 4U); + return atomic_read(&hctx->nr_active) < depth; +} + static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) { int tag, org_last_tag, end; @@ -78,11 +155,15 @@ restart: * multiple users will tend to stick to different cachelines, at least * until the map is exhausted. */ -static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache) +static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, + unsigned int *tag_cache) { unsigned int last_tag, org_last_tag; int index, i, tag; + if (!hctx_may_queue(hctx, bt)) + return -1; + last_tag = org_last_tag = *tag_cache; index = TAG_TO_INDEX(bt, last_tag); @@ -117,11 +198,6 @@ done: return tag; } -static inline void bt_index_inc(unsigned int *index) -{ - *index = (*index + 1) & (BT_WAIT_QUEUES - 1); -} - static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx) { @@ -142,7 +218,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, DEFINE_WAIT(wait); int tag; - tag = __bt_get(bt, last_tag); + tag = __bt_get(hctx, bt, last_tag); if (tag != -1) return tag; @@ -156,7 +232,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, was_empty = list_empty(&wait.task_list); prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); - tag = __bt_get(bt, last_tag); + tag = __bt_get(hctx, bt, last_tag); if (tag != -1) break; @@ -200,14 +276,13 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, return tag; } -unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, - struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, +unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved) { if (!reserved) - return __blk_mq_get_tag(tags, hctx, last_tag, gfp); + return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp); - return __blk_mq_get_reserved_tag(tags, gfp); + return __blk_mq_get_reserved_tag(hctx->tags, gfp); } static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) @@ -265,9 +340,11 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, bt_clear_tag(&tags->breserved_tags, tag); } -void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, +void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag) { + struct blk_mq_tags *tags = hctx->tags; + if (tag >= tags->nr_reserved_tags) { const int real_tag = tag - tags->nr_reserved_tags; @@ -465,6 +542,7 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) res = bt_unused_tags(&tags->breserved_tags); page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); + page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); return page - orig_page; } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 7aa9f0665489..0f5ec8b50ef3 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -38,6 +38,8 @@ struct blk_mq_tags { unsigned int nr_tags; unsigned int nr_reserved_tags; + atomic_t active_queues; + struct blk_mq_bitmap_tags bitmap_tags; struct blk_mq_bitmap_tags breserved_tags; @@ -49,9 +51,9 @@ struct blk_mq_tags { extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); extern void blk_mq_free_tags(struct blk_mq_tags *tags); -extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); -extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags, struct blk_mq_hw_ctx *hctx, bool reserved); -extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag, unsigned int *last_tag); +extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); +extern void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved); +extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); @@ -68,4 +70,23 @@ enum { BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, }; +extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); +extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); + +static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return false; + + return __blk_mq_tag_busy(hctx); +} + +static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return; + + __blk_mq_tag_idle(hctx); +} + #endif diff --git a/block/blk-mq.c b/block/blk-mq.c index 9f07a266f7ab..3c4f1fceef8e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -80,9 +80,16 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, struct request *rq; unsigned int tag; - tag = blk_mq_get_tag(hctx->tags, hctx, &ctx->last_tag, gfp, reserved); + tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); if (tag != BLK_MQ_TAG_FAIL) { rq = hctx->tags->rqs[tag]; + + rq->cmd_flags = 0; + if (blk_mq_tag_busy(hctx)) { + rq->cmd_flags = REQ_MQ_INFLIGHT; + atomic_inc(&hctx->nr_active); + } + rq->tag = tag; return rq; } @@ -190,7 +197,7 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, /* csd/requeue_work/fifo_time is initialized before use */ rq->q = q; rq->mq_ctx = ctx; - rq->cmd_flags = rw_flags; + rq->cmd_flags |= rw_flags; rq->cmd_type = 0; /* do not touch atomic flags, it needs atomic ops against the timer */ rq->cpu = -1; @@ -262,7 +269,7 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, break; } - blk_mq_wait_for_tags(hctx->tags, hctx, reserved); + blk_mq_wait_for_tags(hctx, reserved); } while (1); return rq; @@ -303,8 +310,11 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, const int tag = rq->tag; struct request_queue *q = rq->q; + if (rq->cmd_flags & REQ_MQ_INFLIGHT) + atomic_dec(&hctx->nr_active); + clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); - blk_mq_put_tag(hctx->tags, tag, &ctx->last_tag); + blk_mq_put_tag(hctx, tag, &ctx->last_tag); blk_mq_queue_exit(q); } @@ -571,8 +581,13 @@ static void blk_mq_rq_timer(unsigned long data) queue_for_each_hw_ctx(q, hctx, i) blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set); - if (next_set) - mod_timer(&q->timeout, round_jiffies_up(next)); + if (next_set) { + next = blk_rq_timeout(round_jiffies_up(next)); + mod_timer(&q->timeout, next); + } else { + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_tag_idle(hctx); + } } /* @@ -1439,6 +1454,56 @@ static void blk_mq_map_swqueue(struct request_queue *q) } } +static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) +{ + struct blk_mq_hw_ctx *hctx; + struct request_queue *q; + bool shared; + int i; + + if (set->tag_list.next == set->tag_list.prev) + shared = false; + else + shared = true; + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_freeze_queue(q); + + queue_for_each_hw_ctx(q, hctx, i) { + if (shared) + hctx->flags |= BLK_MQ_F_TAG_SHARED; + else + hctx->flags &= ~BLK_MQ_F_TAG_SHARED; + } + blk_mq_unfreeze_queue(q); + } +} + +static void blk_mq_del_queue_tag_set(struct request_queue *q) +{ + struct blk_mq_tag_set *set = q->tag_set; + + blk_mq_freeze_queue(q); + + mutex_lock(&set->tag_list_lock); + list_del_init(&q->tag_set_list); + blk_mq_update_tag_set_depth(set); + mutex_unlock(&set->tag_list_lock); + + blk_mq_unfreeze_queue(q); +} + +static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, + struct request_queue *q) +{ + q->tag_set = set; + + mutex_lock(&set->tag_list_lock); + list_add_tail(&q->tag_set_list, &set->tag_list); + blk_mq_update_tag_set_depth(set); + mutex_unlock(&set->tag_list_lock); +} + struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx **hctxs; @@ -1464,6 +1529,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL)) goto err_hctxs; + atomic_set(&hctxs[i]->nr_active, 0); hctxs[i]->numa_node = NUMA_NO_NODE; hctxs[i]->queue_num = i; } @@ -1516,6 +1582,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) list_add_tail(&q->all_q_node, &all_q_list); mutex_unlock(&all_q_mutex); + blk_mq_add_queue_tag_set(set, q); + return q; err_flush_rq: @@ -1543,6 +1611,8 @@ void blk_mq_free_queue(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; + blk_mq_del_queue_tag_set(q); + queue_for_each_hw_ctx(q, hctx, i) { kfree(hctx->ctx_map); kfree(hctx->ctxs); @@ -1635,6 +1705,9 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) goto out_unwind; } + mutex_init(&set->tag_list_lock); + INIT_LIST_HEAD(&set->tag_list); + return 0; out_unwind: diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 448745683d28..43e8b515806f 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -166,6 +166,17 @@ void blk_abort_request(struct request *req) } EXPORT_SYMBOL_GPL(blk_abort_request); +unsigned long blk_rq_timeout(unsigned long timeout) +{ + unsigned long maxt; + + maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT); + if (time_after(timeout, maxt)) + timeout = maxt; + + return timeout; +} + /** * blk_add_timer - Start timeout timer for a single request * @req: request that is about to start running. @@ -200,7 +211,7 @@ void blk_add_timer(struct request *req) * than an existing one, modify the timer. Round up to next nearest * second. */ - expiry = round_jiffies_up(req->deadline); + expiry = blk_rq_timeout(round_jiffies_up(req->deadline)); if (!timer_pending(&q->timeout) || time_before(expiry, q->timeout.expires)) { diff --git a/block/blk.h b/block/blk.h index 79be2cbce7fd..95cab70000e3 100644 --- a/block/blk.h +++ b/block/blk.h @@ -9,6 +9,9 @@ /* Number of requests a "batching" process may submit */ #define BLK_BATCH_REQ 32 +/* Max future timer expiry for timeouts */ +#define BLK_MAX_TIMEOUT (5 * HZ) + extern struct kmem_cache *blk_requestq_cachep; extern struct kmem_cache *request_cachep; extern struct kobj_type blk_queue_ktype; @@ -37,6 +40,7 @@ bool __blk_end_bidi_request(struct request *rq, int error, void blk_rq_timed_out_timer(unsigned long data); void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, unsigned int *next_set); +unsigned long blk_rq_timeout(unsigned long timeout); void blk_add_timer(struct request *req); void blk_delete_timer(struct request *); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f83d15f6e1c1..379f88d5c44d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -48,6 +48,8 @@ struct blk_mq_hw_ctx { unsigned int numa_node; unsigned int cmd_size; /* per-request extra data */ + atomic_t nr_active; + struct blk_mq_cpu_notifier cpu_notifier; struct kobject kobj; }; @@ -64,6 +66,9 @@ struct blk_mq_tag_set { void *driver_data; struct blk_mq_tags **tags; + + struct mutex tag_list_lock; + struct list_head tag_list; }; typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); @@ -126,8 +131,10 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_SORT = 1 << 1, + BLK_MQ_F_TAG_SHARED = 1 << 2, BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_MAX_DEPTH = 2048, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index aa0eaa2d0bd8..d8e4cea23a25 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -190,6 +190,7 @@ enum rq_flag_bits { __REQ_PM, /* runtime pm request */ __REQ_END, /* last of chain of requests */ __REQ_HASHED, /* on IO scheduler merge hash */ + __REQ_MQ_INFLIGHT, /* track inflight for MQ */ __REQ_NR_BITS, /* stops here */ }; @@ -243,5 +244,6 @@ enum rq_flag_bits { #define REQ_PM (1ULL << __REQ_PM) #define REQ_END (1ULL << __REQ_END) #define REQ_HASHED (1ULL << __REQ_HASHED) +#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 94b27210641b..6bc011a09e82 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -481,6 +481,9 @@ struct request_queue { wait_queue_head_t mq_freeze_wq; struct percpu_counter mq_usage_counter; struct list_head all_q_node; + + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ -- cgit v1.2.3-59-g8ed1b From f9c78b2be2cac2a7a397d489275e7d9f9ae785f2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 19 May 2014 08:16:41 -0600 Subject: block: move bio.c and bio-integrity.c from fs/ to block/ They really belong in block/, especially now since it's not in drivers/block/ anymore. Additionally, the get_maintainer script gets it wrong when in fs/. Suggested-by: Christoph Hellwig Acked-by: Al Viro Signed-off-by: Jens Axboe --- block/Makefile | 3 +- block/bio-integrity.c | 657 ++++++++++++++++ block/bio.c | 2038 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/Makefile | 3 +- fs/bio-integrity.c | 657 ---------------- fs/bio.c | 2038 ------------------------------------------------- 6 files changed, 2698 insertions(+), 2698 deletions(-) create mode 100644 block/bio-integrity.c create mode 100644 block/bio.c delete mode 100644 fs/bio-integrity.c delete mode 100644 fs/bio.c (limited to 'block') diff --git a/block/Makefile b/block/Makefile index 20645e88fb57..506a0c570be2 100644 --- a/block/Makefile +++ b/block/Makefile @@ -2,7 +2,7 @@ # Makefile for the kernel block layer # -obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ +obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \ @@ -20,3 +20,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o +obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o diff --git a/block/bio-integrity.c b/block/bio-integrity.c new file mode 100644 index 000000000000..9e241063a616 --- /dev/null +++ b/block/bio-integrity.c @@ -0,0 +1,657 @@ +/* + * bio-integrity.c - bio data integrity extensions + * + * Copyright (C) 2007, 2008, 2009 Oracle Corporation + * Written by: Martin K. Petersen + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#include +#include +#include +#include +#include +#include + +#define BIP_INLINE_VECS 4 + +static struct kmem_cache *bip_slab; +static struct workqueue_struct *kintegrityd_wq; + +/** + * bio_integrity_alloc - Allocate integrity payload and attach it to bio + * @bio: bio to attach integrity metadata to + * @gfp_mask: Memory allocation mask + * @nr_vecs: Number of integrity metadata scatter-gather elements + * + * Description: This function prepares a bio for attaching integrity + * metadata. nr_vecs specifies the maximum number of pages containing + * integrity metadata that can be attached. + */ +struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, + gfp_t gfp_mask, + unsigned int nr_vecs) +{ + struct bio_integrity_payload *bip; + struct bio_set *bs = bio->bi_pool; + unsigned long idx = BIO_POOL_NONE; + unsigned inline_vecs; + + if (!bs) { + bip = kmalloc(sizeof(struct bio_integrity_payload) + + sizeof(struct bio_vec) * nr_vecs, gfp_mask); + inline_vecs = nr_vecs; + } else { + bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); + inline_vecs = BIP_INLINE_VECS; + } + + if (unlikely(!bip)) + return NULL; + + memset(bip, 0, sizeof(*bip)); + + if (nr_vecs > inline_vecs) { + bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, + bs->bvec_integrity_pool); + if (!bip->bip_vec) + goto err; + } else { + bip->bip_vec = bip->bip_inline_vecs; + } + + bip->bip_slab = idx; + bip->bip_bio = bio; + bio->bi_integrity = bip; + + return bip; +err: + mempool_free(bip, bs->bio_integrity_pool); + return NULL; +} +EXPORT_SYMBOL(bio_integrity_alloc); + +/** + * bio_integrity_free - Free bio integrity payload + * @bio: bio containing bip to be freed + * + * Description: Used to free the integrity portion of a bio. Usually + * called from bio_free(). + */ +void bio_integrity_free(struct bio *bio) +{ + struct bio_integrity_payload *bip = bio->bi_integrity; + struct bio_set *bs = bio->bi_pool; + + if (bip->bip_owns_buf) + kfree(bip->bip_buf); + + if (bs) { + if (bip->bip_slab != BIO_POOL_NONE) + bvec_free(bs->bvec_integrity_pool, bip->bip_vec, + bip->bip_slab); + + mempool_free(bip, bs->bio_integrity_pool); + } else { + kfree(bip); + } + + bio->bi_integrity = NULL; +} +EXPORT_SYMBOL(bio_integrity_free); + +static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip) +{ + if (bip->bip_slab == BIO_POOL_NONE) + return BIP_INLINE_VECS; + + return bvec_nr_vecs(bip->bip_slab); +} + +/** + * bio_integrity_add_page - Attach integrity metadata + * @bio: bio to update + * @page: page containing integrity metadata + * @len: number of bytes of integrity metadata in page + * @offset: start offset within page + * + * Description: Attach a page containing integrity metadata to bio. + */ +int bio_integrity_add_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + struct bio_integrity_payload *bip = bio->bi_integrity; + struct bio_vec *iv; + + if (bip->bip_vcnt >= bip_integrity_vecs(bip)) { + printk(KERN_ERR "%s: bip_vec full\n", __func__); + return 0; + } + + iv = bip->bip_vec + bip->bip_vcnt; + + iv->bv_page = page; + iv->bv_len = len; + iv->bv_offset = offset; + bip->bip_vcnt++; + + return len; +} +EXPORT_SYMBOL(bio_integrity_add_page); + +static int bdev_integrity_enabled(struct block_device *bdev, int rw) +{ + struct blk_integrity *bi = bdev_get_integrity(bdev); + + if (bi == NULL) + return 0; + + if (rw == READ && bi->verify_fn != NULL && + (bi->flags & INTEGRITY_FLAG_READ)) + return 1; + + if (rw == WRITE && bi->generate_fn != NULL && + (bi->flags & INTEGRITY_FLAG_WRITE)) + return 1; + + return 0; +} + +/** + * bio_integrity_enabled - Check whether integrity can be passed + * @bio: bio to check + * + * Description: Determines whether bio_integrity_prep() can be called + * on this bio or not. bio data direction and target device must be + * set prior to calling. The functions honors the write_generate and + * read_verify flags in sysfs. + */ +int bio_integrity_enabled(struct bio *bio) +{ + if (!bio_is_rw(bio)) + return 0; + + /* Already protected? */ + if (bio_integrity(bio)) + return 0; + + return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio)); +} +EXPORT_SYMBOL(bio_integrity_enabled); + +/** + * bio_integrity_hw_sectors - Convert 512b sectors to hardware ditto + * @bi: blk_integrity profile for device + * @sectors: Number of 512 sectors to convert + * + * Description: The block layer calculates everything in 512 byte + * sectors but integrity metadata is done in terms of the hardware + * sector size of the storage device. Convert the block layer sectors + * to physical sectors. + */ +static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi, + unsigned int sectors) +{ + /* At this point there are only 512b or 4096b DIF/EPP devices */ + if (bi->sector_size == 4096) + return sectors >>= 3; + + return sectors; +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size; +} + +/** + * bio_integrity_tag_size - Retrieve integrity tag space + * @bio: bio to inspect + * + * Description: Returns the maximum number of tag bytes that can be + * attached to this bio. Filesystems can use this to determine how + * much metadata to attach to an I/O. + */ +unsigned int bio_integrity_tag_size(struct bio *bio) +{ + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + + BUG_ON(bio->bi_iter.bi_size == 0); + + return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size); +} +EXPORT_SYMBOL(bio_integrity_tag_size); + +static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, + int set) +{ + struct bio_integrity_payload *bip = bio->bi_integrity; + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + unsigned int nr_sectors; + + BUG_ON(bip->bip_buf == NULL); + + if (bi->tag_size == 0) + return -1; + + nr_sectors = bio_integrity_hw_sectors(bi, + DIV_ROUND_UP(len, bi->tag_size)); + + if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) { + printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__, + nr_sectors * bi->tuple_size, bip->bip_iter.bi_size); + return -1; + } + + if (set) + bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors); + else + bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors); + + return 0; +} + +/** + * bio_integrity_set_tag - Attach a tag buffer to a bio + * @bio: bio to attach buffer to + * @tag_buf: Pointer to a buffer containing tag data + * @len: Length of the included buffer + * + * Description: Use this function to tag a bio by leveraging the extra + * space provided by devices formatted with integrity protection. The + * size of the integrity buffer must be <= to the size reported by + * bio_integrity_tag_size(). + */ +int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len) +{ + BUG_ON(bio_data_dir(bio) != WRITE); + + return bio_integrity_tag(bio, tag_buf, len, 1); +} +EXPORT_SYMBOL(bio_integrity_set_tag); + +/** + * bio_integrity_get_tag - Retrieve a tag buffer from a bio + * @bio: bio to retrieve buffer from + * @tag_buf: Pointer to a buffer for the tag data + * @len: Length of the target buffer + * + * Description: Use this function to retrieve the tag buffer from a + * completed I/O. The size of the integrity buffer must be <= to the + * size reported by bio_integrity_tag_size(). + */ +int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len) +{ + BUG_ON(bio_data_dir(bio) != READ); + + return bio_integrity_tag(bio, tag_buf, len, 0); +} +EXPORT_SYMBOL(bio_integrity_get_tag); + +/** + * bio_integrity_generate_verify - Generate/verify integrity metadata for a bio + * @bio: bio to generate/verify integrity metadata for + * @operate: operate number, 1 for generate, 0 for verify + */ +static int bio_integrity_generate_verify(struct bio *bio, int operate) +{ + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + struct blk_integrity_exchg bix; + struct bio_vec *bv; + sector_t sector; + unsigned int sectors, ret = 0, i; + void *prot_buf = bio->bi_integrity->bip_buf; + + if (operate) + sector = bio->bi_iter.bi_sector; + else + sector = bio->bi_integrity->bip_iter.bi_sector; + + bix.disk_name = bio->bi_bdev->bd_disk->disk_name; + bix.sector_size = bi->sector_size; + + bio_for_each_segment_all(bv, bio, i) { + void *kaddr = kmap_atomic(bv->bv_page); + bix.data_buf = kaddr + bv->bv_offset; + bix.data_size = bv->bv_len; + bix.prot_buf = prot_buf; + bix.sector = sector; + + if (operate) + bi->generate_fn(&bix); + else { + ret = bi->verify_fn(&bix); + if (ret) { + kunmap_atomic(kaddr); + return ret; + } + } + + sectors = bv->bv_len / bi->sector_size; + sector += sectors; + prot_buf += sectors * bi->tuple_size; + + kunmap_atomic(kaddr); + } + return ret; +} + +/** + * bio_integrity_generate - Generate integrity metadata for a bio + * @bio: bio to generate integrity metadata for + * + * Description: Generates integrity metadata for a bio by calling the + * block device's generation callback function. The bio must have a + * bip attached with enough room to accommodate the generated + * integrity metadata. + */ +static void bio_integrity_generate(struct bio *bio) +{ + bio_integrity_generate_verify(bio, 1); +} + +static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) +{ + if (bi) + return bi->tuple_size; + + return 0; +} + +/** + * bio_integrity_prep - Prepare bio for integrity I/O + * @bio: bio to prepare + * + * Description: Allocates a buffer for integrity metadata, maps the + * pages and attaches them to a bio. The bio must have data + * direction, target device and start sector set priot to calling. In + * the WRITE case, integrity metadata will be generated using the + * block device's integrity function. In the READ case, the buffer + * will be prepared for DMA and a suitable end_io handler set up. + */ +int bio_integrity_prep(struct bio *bio) +{ + struct bio_integrity_payload *bip; + struct blk_integrity *bi; + struct request_queue *q; + void *buf; + unsigned long start, end; + unsigned int len, nr_pages; + unsigned int bytes, offset, i; + unsigned int sectors; + + bi = bdev_get_integrity(bio->bi_bdev); + q = bdev_get_queue(bio->bi_bdev); + BUG_ON(bi == NULL); + BUG_ON(bio_integrity(bio)); + + sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio)); + + /* Allocate kernel buffer for protection data */ + len = sectors * blk_integrity_tuple_size(bi); + buf = kmalloc(len, GFP_NOIO | q->bounce_gfp); + if (unlikely(buf == NULL)) { + printk(KERN_ERR "could not allocate integrity buffer\n"); + return -ENOMEM; + } + + end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + start = ((unsigned long) buf) >> PAGE_SHIFT; + nr_pages = end - start; + + /* Allocate bio integrity payload and integrity vectors */ + bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); + if (unlikely(bip == NULL)) { + printk(KERN_ERR "could not allocate data integrity bioset\n"); + kfree(buf); + return -EIO; + } + + bip->bip_owns_buf = 1; + bip->bip_buf = buf; + bip->bip_iter.bi_size = len; + bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; + + /* Map it */ + offset = offset_in_page(buf); + for (i = 0 ; i < nr_pages ; i++) { + int ret; + bytes = PAGE_SIZE - offset; + + if (len <= 0) + break; + + if (bytes > len) + bytes = len; + + ret = bio_integrity_add_page(bio, virt_to_page(buf), + bytes, offset); + + if (ret == 0) + return 0; + + if (ret < bytes) + break; + + buf += bytes; + len -= bytes; + offset = 0; + } + + /* Install custom I/O completion handler if read verify is enabled */ + if (bio_data_dir(bio) == READ) { + bip->bip_end_io = bio->bi_end_io; + bio->bi_end_io = bio_integrity_endio; + } + + /* Auto-generate integrity metadata if this is a write */ + if (bio_data_dir(bio) == WRITE) + bio_integrity_generate(bio); + + return 0; +} +EXPORT_SYMBOL(bio_integrity_prep); + +/** + * bio_integrity_verify - Verify integrity metadata for a bio + * @bio: bio to verify + * + * Description: This function is called to verify the integrity of a + * bio. The data in the bio io_vec is compared to the integrity + * metadata returned by the HBA. + */ +static int bio_integrity_verify(struct bio *bio) +{ + return bio_integrity_generate_verify(bio, 0); +} + +/** + * bio_integrity_verify_fn - Integrity I/O completion worker + * @work: Work struct stored in bio to be verified + * + * Description: This workqueue function is called to complete a READ + * request. The function verifies the transferred integrity metadata + * and then calls the original bio end_io function. + */ +static void bio_integrity_verify_fn(struct work_struct *work) +{ + struct bio_integrity_payload *bip = + container_of(work, struct bio_integrity_payload, bip_work); + struct bio *bio = bip->bip_bio; + int error; + + error = bio_integrity_verify(bio); + + /* Restore original bio completion handler */ + bio->bi_end_io = bip->bip_end_io; + bio_endio_nodec(bio, error); +} + +/** + * bio_integrity_endio - Integrity I/O completion function + * @bio: Protected bio + * @error: Pointer to errno + * + * Description: Completion for integrity I/O + * + * Normally I/O completion is done in interrupt context. However, + * verifying I/O integrity is a time-consuming task which must be run + * in process context. This function postpones completion + * accordingly. + */ +void bio_integrity_endio(struct bio *bio, int error) +{ + struct bio_integrity_payload *bip = bio->bi_integrity; + + BUG_ON(bip->bip_bio != bio); + + /* In case of an I/O error there is no point in verifying the + * integrity metadata. Restore original bio end_io handler + * and run it. + */ + if (error) { + bio->bi_end_io = bip->bip_end_io; + bio_endio(bio, error); + + return; + } + + INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); + queue_work(kintegrityd_wq, &bip->bip_work); +} +EXPORT_SYMBOL(bio_integrity_endio); + +/** + * bio_integrity_advance - Advance integrity vector + * @bio: bio whose integrity vector to update + * @bytes_done: number of data bytes that have been completed + * + * Description: This function calculates how many integrity bytes the + * number of completed data bytes correspond to and advances the + * integrity vector accordingly. + */ +void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) +{ + struct bio_integrity_payload *bip = bio->bi_integrity; + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); + + bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); +} +EXPORT_SYMBOL(bio_integrity_advance); + +/** + * bio_integrity_trim - Trim integrity vector + * @bio: bio whose integrity vector to update + * @offset: offset to first data sector + * @sectors: number of data sectors + * + * Description: Used to trim the integrity vector in a cloned bio. + * The ivec will be advanced corresponding to 'offset' data sectors + * and the length will be truncated corresponding to 'len' data + * sectors. + */ +void bio_integrity_trim(struct bio *bio, unsigned int offset, + unsigned int sectors) +{ + struct bio_integrity_payload *bip = bio->bi_integrity; + struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); + + bio_integrity_advance(bio, offset << 9); + bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors); +} +EXPORT_SYMBOL(bio_integrity_trim); + +/** + * bio_integrity_clone - Callback for cloning bios with integrity metadata + * @bio: New bio + * @bio_src: Original bio + * @gfp_mask: Memory allocation mask + * + * Description: Called to allocate a bip when cloning a bio + */ +int bio_integrity_clone(struct bio *bio, struct bio *bio_src, + gfp_t gfp_mask) +{ + struct bio_integrity_payload *bip_src = bio_src->bi_integrity; + struct bio_integrity_payload *bip; + + BUG_ON(bip_src == NULL); + + bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); + + if (bip == NULL) + return -EIO; + + memcpy(bip->bip_vec, bip_src->bip_vec, + bip_src->bip_vcnt * sizeof(struct bio_vec)); + + bip->bip_vcnt = bip_src->bip_vcnt; + bip->bip_iter = bip_src->bip_iter; + + return 0; +} +EXPORT_SYMBOL(bio_integrity_clone); + +int bioset_integrity_create(struct bio_set *bs, int pool_size) +{ + if (bs->bio_integrity_pool) + return 0; + + bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab); + if (!bs->bio_integrity_pool) + return -1; + + bs->bvec_integrity_pool = biovec_create_pool(pool_size); + if (!bs->bvec_integrity_pool) { + mempool_destroy(bs->bio_integrity_pool); + return -1; + } + + return 0; +} +EXPORT_SYMBOL(bioset_integrity_create); + +void bioset_integrity_free(struct bio_set *bs) +{ + if (bs->bio_integrity_pool) + mempool_destroy(bs->bio_integrity_pool); + + if (bs->bvec_integrity_pool) + mempool_destroy(bs->bvec_integrity_pool); +} +EXPORT_SYMBOL(bioset_integrity_free); + +void __init bio_integrity_init(void) +{ + /* + * kintegrityd won't block much but may burn a lot of CPU cycles. + * Make it highpri CPU intensive wq with max concurrency of 1. + */ + kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM | + WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); + if (!kintegrityd_wq) + panic("Failed to create kintegrityd\n"); + + bip_slab = kmem_cache_create("bio_integrity_payload", + sizeof(struct bio_integrity_payload) + + sizeof(struct bio_vec) * BIP_INLINE_VECS, + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + if (!bip_slab) + panic("Failed to create slab\n"); +} diff --git a/block/bio.c b/block/bio.c new file mode 100644 index 000000000000..96d28eee8a1e --- /dev/null +++ b/block/bio.c @@ -0,0 +1,2038 @@ +/* + * Copyright (C) 2001 Jens Axboe + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for struct sg_iovec */ + +#include + +/* + * Test patch to inline a certain number of bi_io_vec's inside the bio + * itself, to shrink a bio data allocation from two mempool calls to one + */ +#define BIO_INLINE_VECS 4 + +/* + * if you change this list, also change bvec_alloc or things will + * break badly! cannot be bigger than what you can fit into an + * unsigned short + */ +#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } +static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { + BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), +}; +#undef BV + +/* + * fs_bio_set is the bio_set containing bio and iovec memory pools used by + * IO code that does not need private memory pools. + */ +struct bio_set *fs_bio_set; +EXPORT_SYMBOL(fs_bio_set); + +/* + * Our slab pool management + */ +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; +static DEFINE_MUTEX(bio_slab_lock); +static struct bio_slab *bio_slabs; +static unsigned int bio_slab_nr, bio_slab_max; + +static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) +{ + unsigned int sz = sizeof(struct bio) + extra_size; + struct kmem_cache *slab = NULL; + struct bio_slab *bslab, *new_bio_slabs; + unsigned int new_bio_slab_max; + unsigned int i, entry = -1; + + mutex_lock(&bio_slab_lock); + + i = 0; + while (i < bio_slab_nr) { + bslab = &bio_slabs[i]; + + if (!bslab->slab && entry == -1) + entry = i; + else if (bslab->slab_size == sz) { + slab = bslab->slab; + bslab->slab_ref++; + break; + } + i++; + } + + if (slab) + goto out_unlock; + + if (bio_slab_nr == bio_slab_max && entry == -1) { + new_bio_slab_max = bio_slab_max << 1; + new_bio_slabs = krealloc(bio_slabs, + new_bio_slab_max * sizeof(struct bio_slab), + GFP_KERNEL); + if (!new_bio_slabs) + goto out_unlock; + bio_slab_max = new_bio_slab_max; + bio_slabs = new_bio_slabs; + } + if (entry == -1) + entry = bio_slab_nr++; + + bslab = &bio_slabs[entry]; + + snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); + slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL); + if (!slab) + goto out_unlock; + + bslab->slab = slab; + bslab->slab_ref = 1; + bslab->slab_size = sz; +out_unlock: + mutex_unlock(&bio_slab_lock); + return slab; +} + +static void bio_put_slab(struct bio_set *bs) +{ + struct bio_slab *bslab = NULL; + unsigned int i; + + mutex_lock(&bio_slab_lock); + + for (i = 0; i < bio_slab_nr; i++) { + if (bs->bio_slab == bio_slabs[i].slab) { + bslab = &bio_slabs[i]; + break; + } + } + + if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) + goto out; + + WARN_ON(!bslab->slab_ref); + + if (--bslab->slab_ref) + goto out; + + kmem_cache_destroy(bslab->slab); + bslab->slab = NULL; + +out: + mutex_unlock(&bio_slab_lock); +} + +unsigned int bvec_nr_vecs(unsigned short idx) +{ + return bvec_slabs[idx].nr_vecs; +} + +void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) +{ + BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); + + if (idx == BIOVEC_MAX_IDX) + mempool_free(bv, pool); + else { + struct biovec_slab *bvs = bvec_slabs + idx; + + kmem_cache_free(bvs->slab, bv); + } +} + +struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, + mempool_t *pool) +{ + struct bio_vec *bvl; + + /* + * see comment near bvec_array define! + */ + switch (nr) { + case 1: + *idx = 0; + break; + case 2 ... 4: + *idx = 1; + break; + case 5 ... 16: + *idx = 2; + break; + case 17 ... 64: + *idx = 3; + break; + case 65 ... 128: + *idx = 4; + break; + case 129 ... BIO_MAX_PAGES: + *idx = 5; + break; + default: + return NULL; + } + + /* + * idx now points to the pool we want to allocate from. only the + * 1-vec entry pool is mempool backed. + */ + if (*idx == BIOVEC_MAX_IDX) { +fallback: + bvl = mempool_alloc(pool, gfp_mask); + } else { + struct biovec_slab *bvs = bvec_slabs + *idx; + gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); + + /* + * Make this allocation restricted and don't dump info on + * allocation failures, since we'll fallback to the mempool + * in case of failure. + */ + __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; + + /* + * Try a slab allocation. If this fails and __GFP_WAIT + * is set, retry with the 1-entry mempool + */ + bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); + if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { + *idx = BIOVEC_MAX_IDX; + goto fallback; + } + } + + return bvl; +} + +static void __bio_free(struct bio *bio) +{ + bio_disassociate_task(bio); + + if (bio_integrity(bio)) + bio_integrity_free(bio); +} + +static void bio_free(struct bio *bio) +{ + struct bio_set *bs = bio->bi_pool; + void *p; + + __bio_free(bio); + + if (bs) { + if (bio_flagged(bio, BIO_OWNS_VEC)) + bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); + + /* + * If we have front padding, adjust the bio pointer before freeing + */ + p = bio; + p -= bs->front_pad; + + mempool_free(p, bs->bio_pool); + } else { + /* Bio was allocated by bio_kmalloc() */ + kfree(bio); + } +} + +void bio_init(struct bio *bio) +{ + memset(bio, 0, sizeof(*bio)); + bio->bi_flags = 1 << BIO_UPTODATE; + atomic_set(&bio->bi_remaining, 1); + atomic_set(&bio->bi_cnt, 1); +} +EXPORT_SYMBOL(bio_init); + +/** + * bio_reset - reinitialize a bio + * @bio: bio to reset + * + * Description: + * After calling bio_reset(), @bio will be in the same state as a freshly + * allocated bio returned bio bio_alloc_bioset() - the only fields that are + * preserved are the ones that are initialized by bio_alloc_bioset(). See + * comment in struct bio. + */ +void bio_reset(struct bio *bio) +{ + unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); + + __bio_free(bio); + + memset(bio, 0, BIO_RESET_BYTES); + bio->bi_flags = flags|(1 << BIO_UPTODATE); + atomic_set(&bio->bi_remaining, 1); +} +EXPORT_SYMBOL(bio_reset); + +static void bio_chain_endio(struct bio *bio, int error) +{ + bio_endio(bio->bi_private, error); + bio_put(bio); +} + +/** + * bio_chain - chain bio completions + * @bio: the target bio + * @parent: the @bio's parent bio + * + * The caller won't have a bi_end_io called when @bio completes - instead, + * @parent's bi_end_io won't be called until both @parent and @bio have + * completed; the chained bio will also be freed when it completes. + * + * The caller must not set bi_private or bi_end_io in @bio. + */ +void bio_chain(struct bio *bio, struct bio *parent) +{ + BUG_ON(bio->bi_private || bio->bi_end_io); + + bio->bi_private = parent; + bio->bi_end_io = bio_chain_endio; + atomic_inc(&parent->bi_remaining); +} +EXPORT_SYMBOL(bio_chain); + +static void bio_alloc_rescue(struct work_struct *work) +{ + struct bio_set *bs = container_of(work, struct bio_set, rescue_work); + struct bio *bio; + + while (1) { + spin_lock(&bs->rescue_lock); + bio = bio_list_pop(&bs->rescue_list); + spin_unlock(&bs->rescue_lock); + + if (!bio) + break; + + generic_make_request(bio); + } +} + +static void punt_bios_to_rescuer(struct bio_set *bs) +{ + struct bio_list punt, nopunt; + struct bio *bio; + + /* + * In order to guarantee forward progress we must punt only bios that + * were allocated from this bio_set; otherwise, if there was a bio on + * there for a stacking driver higher up in the stack, processing it + * could require allocating bios from this bio_set, and doing that from + * our own rescuer would be bad. + * + * Since bio lists are singly linked, pop them all instead of trying to + * remove from the middle of the list: + */ + + bio_list_init(&punt); + bio_list_init(&nopunt); + + while ((bio = bio_list_pop(current->bio_list))) + bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); + + *current->bio_list = nopunt; + + spin_lock(&bs->rescue_lock); + bio_list_merge(&bs->rescue_list, &punt); + spin_unlock(&bs->rescue_lock); + + queue_work(bs->rescue_workqueue, &bs->rescue_work); +} + +/** + * bio_alloc_bioset - allocate a bio for I/O + * @gfp_mask: the GFP_ mask given to the slab allocator + * @nr_iovecs: number of iovecs to pre-allocate + * @bs: the bio_set to allocate from. + * + * Description: + * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is + * backed by the @bs's mempool. + * + * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be + * able to allocate a bio. This is due to the mempool guarantees. To make this + * work, callers must never allocate more than 1 bio at a time from this pool. + * Callers that need to allocate more than 1 bio must always submit the + * previously allocated bio for IO before attempting to allocate a new one. + * Failure to do so can cause deadlocks under memory pressure. + * + * Note that when running under generic_make_request() (i.e. any block + * driver), bios are not submitted until after you return - see the code in + * generic_make_request() that converts recursion into iteration, to prevent + * stack overflows. + * + * This would normally mean allocating multiple bios under + * generic_make_request() would be susceptible to deadlocks, but we have + * deadlock avoidance code that resubmits any blocked bios from a rescuer + * thread. + * + * However, we do not guarantee forward progress for allocations from other + * mempools. Doing multiple allocations from the same mempool under + * generic_make_request() should be avoided - instead, use bio_set's front_pad + * for per bio allocations. + * + * RETURNS: + * Pointer to new bio on success, NULL on failure. + */ +struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) +{ + gfp_t saved_gfp = gfp_mask; + unsigned front_pad; + unsigned inline_vecs; + unsigned long idx = BIO_POOL_NONE; + struct bio_vec *bvl = NULL; + struct bio *bio; + void *p; + + if (!bs) { + if (nr_iovecs > UIO_MAXIOV) + return NULL; + + p = kmalloc(sizeof(struct bio) + + nr_iovecs * sizeof(struct bio_vec), + gfp_mask); + front_pad = 0; + inline_vecs = nr_iovecs; + } else { + /* + * generic_make_request() converts recursion to iteration; this + * means if we're running beneath it, any bios we allocate and + * submit will not be submitted (and thus freed) until after we + * return. + * + * This exposes us to a potential deadlock if we allocate + * multiple bios from the same bio_set() while running + * underneath generic_make_request(). If we were to allocate + * multiple bios (say a stacking block driver that was splitting + * bios), we would deadlock if we exhausted the mempool's + * reserve. + * + * We solve this, and guarantee forward progress, with a rescuer + * workqueue per bio_set. If we go to allocate and there are + * bios on current->bio_list, we first try the allocation + * without __GFP_WAIT; if that fails, we punt those bios we + * would be blocking to the rescuer workqueue before we retry + * with the original gfp_flags. + */ + + if (current->bio_list && !bio_list_empty(current->bio_list)) + gfp_mask &= ~__GFP_WAIT; + + p = mempool_alloc(bs->bio_pool, gfp_mask); + if (!p && gfp_mask != saved_gfp) { + punt_bios_to_rescuer(bs); + gfp_mask = saved_gfp; + p = mempool_alloc(bs->bio_pool, gfp_mask); + } + + front_pad = bs->front_pad; + inline_vecs = BIO_INLINE_VECS; + } + + if (unlikely(!p)) + return NULL; + + bio = p + front_pad; + bio_init(bio); + + if (nr_iovecs > inline_vecs) { + bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); + if (!bvl && gfp_mask != saved_gfp) { + punt_bios_to_rescuer(bs); + gfp_mask = saved_gfp; + bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); + } + + if (unlikely(!bvl)) + goto err_free; + + bio->bi_flags |= 1 << BIO_OWNS_VEC; + } else if (nr_iovecs) { + bvl = bio->bi_inline_vecs; + } + + bio->bi_pool = bs; + bio->bi_flags |= idx << BIO_POOL_OFFSET; + bio->bi_max_vecs = nr_iovecs; + bio->bi_io_vec = bvl; + return bio; + +err_free: + mempool_free(p, bs->bio_pool); + return NULL; +} +EXPORT_SYMBOL(bio_alloc_bioset); + +void zero_fill_bio(struct bio *bio) +{ + unsigned long flags; + struct bio_vec bv; + struct bvec_iter iter; + + bio_for_each_segment(bv, bio, iter) { + char *data = bvec_kmap_irq(&bv, &flags); + memset(data, 0, bv.bv_len); + flush_dcache_page(bv.bv_page); + bvec_kunmap_irq(data, &flags); + } +} +EXPORT_SYMBOL(zero_fill_bio); + +/** + * bio_put - release a reference to a bio + * @bio: bio to release reference to + * + * Description: + * Put a reference to a &struct bio, either one you have gotten with + * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. + **/ +void bio_put(struct bio *bio) +{ + BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); + + /* + * last put frees it + */ + if (atomic_dec_and_test(&bio->bi_cnt)) + bio_free(bio); +} +EXPORT_SYMBOL(bio_put); + +inline int bio_phys_segments(struct request_queue *q, struct bio *bio) +{ + if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) + blk_recount_segments(q, bio); + + return bio->bi_phys_segments; +} +EXPORT_SYMBOL(bio_phys_segments); + +/** + * __bio_clone_fast - clone a bio that shares the original bio's biovec + * @bio: destination bio + * @bio_src: bio to clone + * + * Clone a &bio. Caller will own the returned bio, but not + * the actual data it points to. Reference count of returned + * bio will be one. + * + * Caller must ensure that @bio_src is not freed before @bio. + */ +void __bio_clone_fast(struct bio *bio, struct bio *bio_src) +{ + BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE); + + /* + * most users will be overriding ->bi_bdev with a new target, + * so we don't set nor calculate new physical/hw segment counts here + */ + bio->bi_bdev = bio_src->bi_bdev; + bio->bi_flags |= 1 << BIO_CLONED; + bio->bi_rw = bio_src->bi_rw; + bio->bi_iter = bio_src->bi_iter; + bio->bi_io_vec = bio_src->bi_io_vec; +} +EXPORT_SYMBOL(__bio_clone_fast); + +/** + * bio_clone_fast - clone a bio that shares the original bio's biovec + * @bio: bio to clone + * @gfp_mask: allocation priority + * @bs: bio_set to allocate from + * + * Like __bio_clone_fast, only also allocates the returned bio + */ +struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) +{ + struct bio *b; + + b = bio_alloc_bioset(gfp_mask, 0, bs); + if (!b) + return NULL; + + __bio_clone_fast(b, bio); + + if (bio_integrity(bio)) { + int ret; + + ret = bio_integrity_clone(b, bio, gfp_mask); + + if (ret < 0) { + bio_put(b); + return NULL; + } + } + + return b; +} +EXPORT_SYMBOL(bio_clone_fast); + +/** + * bio_clone_bioset - clone a bio + * @bio_src: bio to clone + * @gfp_mask: allocation priority + * @bs: bio_set to allocate from + * + * Clone bio. Caller will own the returned bio, but not the actual data it + * points to. Reference count of returned bio will be one. + */ +struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, + struct bio_set *bs) +{ + struct bvec_iter iter; + struct bio_vec bv; + struct bio *bio; + + /* + * Pre immutable biovecs, __bio_clone() used to just do a memcpy from + * bio_src->bi_io_vec to bio->bi_io_vec. + * + * We can't do that anymore, because: + * + * - The point of cloning the biovec is to produce a bio with a biovec + * the caller can modify: bi_idx and bi_bvec_done should be 0. + * + * - The original bio could've had more than BIO_MAX_PAGES biovecs; if + * we tried to clone the whole thing bio_alloc_bioset() would fail. + * But the clone should succeed as long as the number of biovecs we + * actually need to allocate is fewer than BIO_MAX_PAGES. + * + * - Lastly, bi_vcnt should not be looked at or relied upon by code + * that does not own the bio - reason being drivers don't use it for + * iterating over the biovec anymore, so expecting it to be kept up + * to date (i.e. for clones that share the parent biovec) is just + * asking for trouble and would force extra work on + * __bio_clone_fast() anyways. + */ + + bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); + if (!bio) + return NULL; + + bio->bi_bdev = bio_src->bi_bdev; + bio->bi_rw = bio_src->bi_rw; + bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; + bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; + + if (bio->bi_rw & REQ_DISCARD) + goto integrity_clone; + + if (bio->bi_rw & REQ_WRITE_SAME) { + bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; + goto integrity_clone; + } + + bio_for_each_segment(bv, bio_src, iter) + bio->bi_io_vec[bio->bi_vcnt++] = bv; + +integrity_clone: + if (bio_integrity(bio_src)) { + int ret; + + ret = bio_integrity_clone(bio, bio_src, gfp_mask); + if (ret < 0) { + bio_put(bio); + return NULL; + } + } + + return bio; +} +EXPORT_SYMBOL(bio_clone_bioset); + +/** + * bio_get_nr_vecs - return approx number of vecs + * @bdev: I/O target + * + * Return the approximate number of pages we can send to this target. + * There's no guarantee that you will be able to fit this number of pages + * into a bio, it does not account for dynamic restrictions that vary + * on offset. + */ +int bio_get_nr_vecs(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + int nr_pages; + + nr_pages = min_t(unsigned, + queue_max_segments(q), + queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); + + return min_t(unsigned, nr_pages, BIO_MAX_PAGES); + +} +EXPORT_SYMBOL(bio_get_nr_vecs); + +static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page + *page, unsigned int len, unsigned int offset, + unsigned int max_sectors) +{ + int retried_segments = 0; + struct bio_vec *bvec; + + /* + * cloned bio must not modify vec list + */ + if (unlikely(bio_flagged(bio, BIO_CLONED))) + return 0; + + if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) + return 0; + + /* + * For filesystems with a blocksize smaller than the pagesize + * we will often be called with the same page as last time and + * a consecutive offset. Optimize this special case. + */ + if (bio->bi_vcnt > 0) { + struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; + + if (page == prev->bv_page && + offset == prev->bv_offset + prev->bv_len) { + unsigned int prev_bv_len = prev->bv_len; + prev->bv_len += len; + + if (q->merge_bvec_fn) { + struct bvec_merge_data bvm = { + /* prev_bvec is already charged in + bi_size, discharge it in order to + simulate merging updated prev_bvec + as new bvec. */ + .bi_bdev = bio->bi_bdev, + .bi_sector = bio->bi_iter.bi_sector, + .bi_size = bio->bi_iter.bi_size - + prev_bv_len, + .bi_rw = bio->bi_rw, + }; + + if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) { + prev->bv_len -= len; + return 0; + } + } + + goto done; + } + } + + if (bio->bi_vcnt >= bio->bi_max_vecs) + return 0; + + /* + * we might lose a segment or two here, but rather that than + * make this too complex. + */ + + while (bio->bi_phys_segments >= queue_max_segments(q)) { + + if (retried_segments) + return 0; + + retried_segments = 1; + blk_recount_segments(q, bio); + } + + /* + * setup the new entry, we might clear it again later if we + * cannot add the page + */ + bvec = &bio->bi_io_vec[bio->bi_vcnt]; + bvec->bv_page = page; + bvec->bv_len = len; + bvec->bv_offset = offset; + + /* + * if queue has other restrictions (eg varying max sector size + * depending on offset), it can specify a merge_bvec_fn in the + * queue to get further control + */ + if (q->merge_bvec_fn) { + struct bvec_merge_data bvm = { + .bi_bdev = bio->bi_bdev, + .bi_sector = bio->bi_iter.bi_sector, + .bi_size = bio->bi_iter.bi_size, + .bi_rw = bio->bi_rw, + }; + + /* + * merge_bvec_fn() returns number of bytes it can accept + * at this offset + */ + if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) { + bvec->bv_page = NULL; + bvec->bv_len = 0; + bvec->bv_offset = 0; + return 0; + } + } + + /* If we may be able to merge these biovecs, force a recount */ + if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) + bio->bi_flags &= ~(1 << BIO_SEG_VALID); + + bio->bi_vcnt++; + bio->bi_phys_segments++; + done: + bio->bi_iter.bi_size += len; + return len; +} + +/** + * bio_add_pc_page - attempt to add page to bio + * @q: the target queue + * @bio: destination bio + * @page: page to add + * @len: vec entry length + * @offset: vec entry offset + * + * Attempt to add a page to the bio_vec maplist. This can fail for a + * number of reasons, such as the bio being full or target block device + * limitations. The target block device must allow bio's up to PAGE_SIZE, + * so it is always possible to add a single page to an empty bio. + * + * This should only be used by REQ_PC bios. + */ +int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, + unsigned int len, unsigned int offset) +{ + return __bio_add_page(q, bio, page, len, offset, + queue_max_hw_sectors(q)); +} +EXPORT_SYMBOL(bio_add_pc_page); + +/** + * bio_add_page - attempt to add page to bio + * @bio: destination bio + * @page: page to add + * @len: vec entry length + * @offset: vec entry offset + * + * Attempt to add a page to the bio_vec maplist. This can fail for a + * number of reasons, such as the bio being full or target block device + * limitations. The target block device must allow bio's up to PAGE_SIZE, + * so it is always possible to add a single page to an empty bio. + */ +int bio_add_page(struct bio *bio, struct page *page, unsigned int len, + unsigned int offset) +{ + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); +} +EXPORT_SYMBOL(bio_add_page); + +struct submit_bio_ret { + struct completion event; + int error; +}; + +static void submit_bio_wait_endio(struct bio *bio, int error) +{ + struct submit_bio_ret *ret = bio->bi_private; + + ret->error = error; + complete(&ret->event); +} + +/** + * submit_bio_wait - submit a bio, and wait until it completes + * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) + * @bio: The &struct bio which describes the I/O + * + * Simple wrapper around submit_bio(). Returns 0 on success, or the error from + * bio_endio() on failure. + */ +int submit_bio_wait(int rw, struct bio *bio) +{ + struct submit_bio_ret ret; + + rw |= REQ_SYNC; + init_completion(&ret.event); + bio->bi_private = &ret; + bio->bi_end_io = submit_bio_wait_endio; + submit_bio(rw, bio); + wait_for_completion(&ret.event); + + return ret.error; +} +EXPORT_SYMBOL(submit_bio_wait); + +/** + * bio_advance - increment/complete a bio by some number of bytes + * @bio: bio to advance + * @bytes: number of bytes to complete + * + * This updates bi_sector, bi_size and bi_idx; if the number of bytes to + * complete doesn't align with a bvec boundary, then bv_len and bv_offset will + * be updated on the last bvec as well. + * + * @bio will then represent the remaining, uncompleted portion of the io. + */ +void bio_advance(struct bio *bio, unsigned bytes) +{ + if (bio_integrity(bio)) + bio_integrity_advance(bio, bytes); + + bio_advance_iter(bio, &bio->bi_iter, bytes); +} +EXPORT_SYMBOL(bio_advance); + +/** + * bio_alloc_pages - allocates a single page for each bvec in a bio + * @bio: bio to allocate pages for + * @gfp_mask: flags for allocation + * + * Allocates pages up to @bio->bi_vcnt. + * + * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are + * freed. + */ +int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) +{ + int i; + struct bio_vec *bv; + + bio_for_each_segment_all(bv, bio, i) { + bv->bv_page = alloc_page(gfp_mask); + if (!bv->bv_page) { + while (--bv >= bio->bi_io_vec) + __free_page(bv->bv_page); + return -ENOMEM; + } + } + + return 0; +} +EXPORT_SYMBOL(bio_alloc_pages); + +/** + * bio_copy_data - copy contents of data buffers from one chain of bios to + * another + * @src: source bio list + * @dst: destination bio list + * + * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats + * @src and @dst as linked lists of bios. + * + * Stops when it reaches the end of either @src or @dst - that is, copies + * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). + */ +void bio_copy_data(struct bio *dst, struct bio *src) +{ + struct bvec_iter src_iter, dst_iter; + struct bio_vec src_bv, dst_bv; + void *src_p, *dst_p; + unsigned bytes; + + src_iter = src->bi_iter; + dst_iter = dst->bi_iter; + + while (1) { + if (!src_iter.bi_size) { + src = src->bi_next; + if (!src) + break; + + src_iter = src->bi_iter; + } + + if (!dst_iter.bi_size) { + dst = dst->bi_next; + if (!dst) + break; + + dst_iter = dst->bi_iter; + } + + src_bv = bio_iter_iovec(src, src_iter); + dst_bv = bio_iter_iovec(dst, dst_iter); + + bytes = min(src_bv.bv_len, dst_bv.bv_len); + + src_p = kmap_atomic(src_bv.bv_page); + dst_p = kmap_atomic(dst_bv.bv_page); + + memcpy(dst_p + dst_bv.bv_offset, + src_p + src_bv.bv_offset, + bytes); + + kunmap_atomic(dst_p); + kunmap_atomic(src_p); + + bio_advance_iter(src, &src_iter, bytes); + bio_advance_iter(dst, &dst_iter, bytes); + } +} +EXPORT_SYMBOL(bio_copy_data); + +struct bio_map_data { + int nr_sgvecs; + int is_our_pages; + struct sg_iovec sgvecs[]; +}; + +static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, + const struct sg_iovec *iov, int iov_count, + int is_our_pages) +{ + memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); + bmd->nr_sgvecs = iov_count; + bmd->is_our_pages = is_our_pages; + bio->bi_private = bmd; +} + +static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, + gfp_t gfp_mask) +{ + if (iov_count > UIO_MAXIOV) + return NULL; + + return kmalloc(sizeof(struct bio_map_data) + + sizeof(struct sg_iovec) * iov_count, gfp_mask); +} + +static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count, + int to_user, int from_user, int do_free_page) +{ + int ret = 0, i; + struct bio_vec *bvec; + int iov_idx = 0; + unsigned int iov_off = 0; + + bio_for_each_segment_all(bvec, bio, i) { + char *bv_addr = page_address(bvec->bv_page); + unsigned int bv_len = bvec->bv_len; + + while (bv_len && iov_idx < iov_count) { + unsigned int bytes; + char __user *iov_addr; + + bytes = min_t(unsigned int, + iov[iov_idx].iov_len - iov_off, bv_len); + iov_addr = iov[iov_idx].iov_base + iov_off; + + if (!ret) { + if (to_user) + ret = copy_to_user(iov_addr, bv_addr, + bytes); + + if (from_user) + ret = copy_from_user(bv_addr, iov_addr, + bytes); + + if (ret) + ret = -EFAULT; + } + + bv_len -= bytes; + bv_addr += bytes; + iov_addr += bytes; + iov_off += bytes; + + if (iov[iov_idx].iov_len == iov_off) { + iov_idx++; + iov_off = 0; + } + } + + if (do_free_page) + __free_page(bvec->bv_page); + } + + return ret; +} + +/** + * bio_uncopy_user - finish previously mapped bio + * @bio: bio being terminated + * + * Free pages allocated from bio_copy_user() and write back data + * to user space in case of a read. + */ +int bio_uncopy_user(struct bio *bio) +{ + struct bio_map_data *bmd = bio->bi_private; + struct bio_vec *bvec; + int ret = 0, i; + + if (!bio_flagged(bio, BIO_NULL_MAPPED)) { + /* + * if we're in a workqueue, the request is orphaned, so + * don't copy into a random user address space, just free. + */ + if (current->mm) + ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, + bio_data_dir(bio) == READ, + 0, bmd->is_our_pages); + else if (bmd->is_our_pages) + bio_for_each_segment_all(bvec, bio, i) + __free_page(bvec->bv_page); + } + kfree(bmd); + bio_put(bio); + return ret; +} +EXPORT_SYMBOL(bio_uncopy_user); + +/** + * bio_copy_user_iov - copy user data to bio + * @q: destination block queue + * @map_data: pointer to the rq_map_data holding pages (if necessary) + * @iov: the iovec. + * @iov_count: number of elements in the iovec + * @write_to_vm: bool indicating writing to pages or not + * @gfp_mask: memory allocation flags + * + * Prepares and returns a bio for indirect user io, bouncing data + * to/from kernel pages as necessary. Must be paired with + * call bio_uncopy_user() on io completion. + */ +struct bio *bio_copy_user_iov(struct request_queue *q, + struct rq_map_data *map_data, + const struct sg_iovec *iov, int iov_count, + int write_to_vm, gfp_t gfp_mask) +{ + struct bio_map_data *bmd; + struct bio_vec *bvec; + struct page *page; + struct bio *bio; + int i, ret; + int nr_pages = 0; + unsigned int len = 0; + unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0; + + for (i = 0; i < iov_count; i++) { + unsigned long uaddr; + unsigned long end; + unsigned long start; + + uaddr = (unsigned long)iov[i].iov_base; + end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; + start = uaddr >> PAGE_SHIFT; + + /* + * Overflow, abort + */ + if (end < start) + return ERR_PTR(-EINVAL); + + nr_pages += end - start; + len += iov[i].iov_len; + } + + if (offset) + nr_pages++; + + bmd = bio_alloc_map_data(iov_count, gfp_mask); + if (!bmd) + return ERR_PTR(-ENOMEM); + + ret = -ENOMEM; + bio = bio_kmalloc(gfp_mask, nr_pages); + if (!bio) + goto out_bmd; + + if (!write_to_vm) + bio->bi_rw |= REQ_WRITE; + + ret = 0; + + if (map_data) { + nr_pages = 1 << map_data->page_order; + i = map_data->offset / PAGE_SIZE; + } + while (len) { + unsigned int bytes = PAGE_SIZE; + + bytes -= offset; + + if (bytes > len) + bytes = len; + + if (map_data) { + if (i == map_data->nr_entries * nr_pages) { + ret = -ENOMEM; + break; + } + + page = map_data->pages[i / nr_pages]; + page += (i % nr_pages); + + i++; + } else { + page = alloc_page(q->bounce_gfp | gfp_mask); + if (!page) { + ret = -ENOMEM; + break; + } + } + + if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) + break; + + len -= bytes; + offset = 0; + } + + if (ret) + goto cleanup; + + /* + * success + */ + if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || + (map_data && map_data->from_user)) { + ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0); + if (ret) + goto cleanup; + } + + bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1); + return bio; +cleanup: + if (!map_data) + bio_for_each_segment_all(bvec, bio, i) + __free_page(bvec->bv_page); + + bio_put(bio); +out_bmd: + kfree(bmd); + return ERR_PTR(ret); +} + +/** + * bio_copy_user - copy user data to bio + * @q: destination block queue + * @map_data: pointer to the rq_map_data holding pages (if necessary) + * @uaddr: start of user address + * @len: length in bytes + * @write_to_vm: bool indicating writing to pages or not + * @gfp_mask: memory allocation flags + * + * Prepares and returns a bio for indirect user io, bouncing data + * to/from kernel pages as necessary. Must be paired with + * call bio_uncopy_user() on io completion. + */ +struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, + unsigned long uaddr, unsigned int len, + int write_to_vm, gfp_t gfp_mask) +{ + struct sg_iovec iov; + + iov.iov_base = (void __user *)uaddr; + iov.iov_len = len; + + return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); +} +EXPORT_SYMBOL(bio_copy_user); + +static struct bio *__bio_map_user_iov(struct request_queue *q, + struct block_device *bdev, + const struct sg_iovec *iov, int iov_count, + int write_to_vm, gfp_t gfp_mask) +{ + int i, j; + int nr_pages = 0; + struct page **pages; + struct bio *bio; + int cur_page = 0; + int ret, offset; + + for (i = 0; i < iov_count; i++) { + unsigned long uaddr = (unsigned long)iov[i].iov_base; + unsigned long len = iov[i].iov_len; + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + + /* + * Overflow, abort + */ + if (end < start) + return ERR_PTR(-EINVAL); + + nr_pages += end - start; + /* + * buffer must be aligned to at least hardsector size for now + */ + if (uaddr & queue_dma_alignment(q)) + return ERR_PTR(-EINVAL); + } + + if (!nr_pages) + return ERR_PTR(-EINVAL); + + bio = bio_kmalloc(gfp_mask, nr_pages); + if (!bio) + return ERR_PTR(-ENOMEM); + + ret = -ENOMEM; + pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); + if (!pages) + goto out; + + for (i = 0; i < iov_count; i++) { + unsigned long uaddr = (unsigned long)iov[i].iov_base; + unsigned long len = iov[i].iov_len; + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = uaddr >> PAGE_SHIFT; + const int local_nr_pages = end - start; + const int page_limit = cur_page + local_nr_pages; + + ret = get_user_pages_fast(uaddr, local_nr_pages, + write_to_vm, &pages[cur_page]); + if (ret < local_nr_pages) { + ret = -EFAULT; + goto out_unmap; + } + + offset = uaddr & ~PAGE_MASK; + for (j = cur_page; j < page_limit; j++) { + unsigned int bytes = PAGE_SIZE - offset; + + if (len <= 0) + break; + + if (bytes > len) + bytes = len; + + /* + * sorry... + */ + if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < + bytes) + break; + + len -= bytes; + offset = 0; + } + + cur_page = j; + /* + * release the pages we didn't map into the bio, if any + */ + while (j < page_limit) + page_cache_release(pages[j++]); + } + + kfree(pages); + + /* + * set data direction, and check if mapped pages need bouncing + */ + if (!write_to_vm) + bio->bi_rw |= REQ_WRITE; + + bio->bi_bdev = bdev; + bio->bi_flags |= (1 << BIO_USER_MAPPED); + return bio; + + out_unmap: + for (i = 0; i < nr_pages; i++) { + if(!pages[i]) + break; + page_cache_release(pages[i]); + } + out: + kfree(pages); + bio_put(bio); + return ERR_PTR(ret); +} + +/** + * bio_map_user - map user address into bio + * @q: the struct request_queue for the bio + * @bdev: destination block device + * @uaddr: start of user address + * @len: length in bytes + * @write_to_vm: bool indicating writing to pages or not + * @gfp_mask: memory allocation flags + * + * Map the user space address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. + */ +struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, + unsigned long uaddr, unsigned int len, int write_to_vm, + gfp_t gfp_mask) +{ + struct sg_iovec iov; + + iov.iov_base = (void __user *)uaddr; + iov.iov_len = len; + + return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); +} +EXPORT_SYMBOL(bio_map_user); + +/** + * bio_map_user_iov - map user sg_iovec table into bio + * @q: the struct request_queue for the bio + * @bdev: destination block device + * @iov: the iovec. + * @iov_count: number of elements in the iovec + * @write_to_vm: bool indicating writing to pages or not + * @gfp_mask: memory allocation flags + * + * Map the user space address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. + */ +struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, + const struct sg_iovec *iov, int iov_count, + int write_to_vm, gfp_t gfp_mask) +{ + struct bio *bio; + + bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm, + gfp_mask); + if (IS_ERR(bio)) + return bio; + + /* + * subtle -- if __bio_map_user() ended up bouncing a bio, + * it would normally disappear when its bi_end_io is run. + * however, we need it for the unmap, so grab an extra + * reference to it + */ + bio_get(bio); + + return bio; +} + +static void __bio_unmap_user(struct bio *bio) +{ + struct bio_vec *bvec; + int i; + + /* + * make sure we dirty pages we wrote to + */ + bio_for_each_segment_all(bvec, bio, i) { + if (bio_data_dir(bio) == READ) + set_page_dirty_lock(bvec->bv_page); + + page_cache_release(bvec->bv_page); + } + + bio_put(bio); +} + +/** + * bio_unmap_user - unmap a bio + * @bio: the bio being unmapped + * + * Unmap a bio previously mapped by bio_map_user(). Must be called with + * a process context. + * + * bio_unmap_user() may sleep. + */ +void bio_unmap_user(struct bio *bio) +{ + __bio_unmap_user(bio); + bio_put(bio); +} +EXPORT_SYMBOL(bio_unmap_user); + +static void bio_map_kern_endio(struct bio *bio, int err) +{ + bio_put(bio); +} + +static struct bio *__bio_map_kern(struct request_queue *q, void *data, + unsigned int len, gfp_t gfp_mask) +{ + unsigned long kaddr = (unsigned long)data; + unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long start = kaddr >> PAGE_SHIFT; + const int nr_pages = end - start; + int offset, i; + struct bio *bio; + + bio = bio_kmalloc(gfp_mask, nr_pages); + if (!bio) + return ERR_PTR(-ENOMEM); + + offset = offset_in_page(kaddr); + for (i = 0; i < nr_pages; i++) { + unsigned int bytes = PAGE_SIZE - offset; + + if (len <= 0) + break; + + if (bytes > len) + bytes = len; + + if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, + offset) < bytes) + break; + + data += bytes; + len -= bytes; + offset = 0; + } + + bio->bi_end_io = bio_map_kern_endio; + return bio; +} + +/** + * bio_map_kern - map kernel address into bio + * @q: the struct request_queue for the bio + * @data: pointer to buffer to map + * @len: length in bytes + * @gfp_mask: allocation flags for bio allocation + * + * Map the kernel address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. + */ +struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, + gfp_t gfp_mask) +{ + struct bio *bio; + + bio = __bio_map_kern(q, data, len, gfp_mask); + if (IS_ERR(bio)) + return bio; + + if (bio->bi_iter.bi_size == len) + return bio; + + /* + * Don't support partial mappings. + */ + bio_put(bio); + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL(bio_map_kern); + +static void bio_copy_kern_endio(struct bio *bio, int err) +{ + struct bio_vec *bvec; + const int read = bio_data_dir(bio) == READ; + struct bio_map_data *bmd = bio->bi_private; + int i; + char *p = bmd->sgvecs[0].iov_base; + + bio_for_each_segment_all(bvec, bio, i) { + char *addr = page_address(bvec->bv_page); + + if (read) + memcpy(p, addr, bvec->bv_len); + + __free_page(bvec->bv_page); + p += bvec->bv_len; + } + + kfree(bmd); + bio_put(bio); +} + +/** + * bio_copy_kern - copy kernel address into bio + * @q: the struct request_queue for the bio + * @data: pointer to buffer to copy + * @len: length in bytes + * @gfp_mask: allocation flags for bio and page allocation + * @reading: data direction is READ + * + * copy the kernel address into a bio suitable for io to a block + * device. Returns an error pointer in case of error. + */ +struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, + gfp_t gfp_mask, int reading) +{ + struct bio *bio; + struct bio_vec *bvec; + int i; + + bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask); + if (IS_ERR(bio)) + return bio; + + if (!reading) { + void *p = data; + + bio_for_each_segment_all(bvec, bio, i) { + char *addr = page_address(bvec->bv_page); + + memcpy(addr, p, bvec->bv_len); + p += bvec->bv_len; + } + } + + bio->bi_end_io = bio_copy_kern_endio; + + return bio; +} +EXPORT_SYMBOL(bio_copy_kern); + +/* + * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions + * for performing direct-IO in BIOs. + * + * The problem is that we cannot run set_page_dirty() from interrupt context + * because the required locks are not interrupt-safe. So what we can do is to + * mark the pages dirty _before_ performing IO. And in interrupt context, + * check that the pages are still dirty. If so, fine. If not, redirty them + * in process context. + * + * We special-case compound pages here: normally this means reads into hugetlb + * pages. The logic in here doesn't really work right for compound pages + * because the VM does not uniformly chase down the head page in all cases. + * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't + * handle them at all. So we skip compound pages here at an early stage. + * + * Note that this code is very hard to test under normal circumstances because + * direct-io pins the pages with get_user_pages(). This makes + * is_page_cache_freeable return false, and the VM will not clean the pages. + * But other code (eg, flusher threads) could clean the pages if they are mapped + * pagecache. + * + * Simply disabling the call to bio_set_pages_dirty() is a good way to test the + * deferred bio dirtying paths. + */ + +/* + * bio_set_pages_dirty() will mark all the bio's pages as dirty. + */ +void bio_set_pages_dirty(struct bio *bio) +{ + struct bio_vec *bvec; + int i; + + bio_for_each_segment_all(bvec, bio, i) { + struct page *page = bvec->bv_page; + + if (page && !PageCompound(page)) + set_page_dirty_lock(page); + } +} + +static void bio_release_pages(struct bio *bio) +{ + struct bio_vec *bvec; + int i; + + bio_for_each_segment_all(bvec, bio, i) { + struct page *page = bvec->bv_page; + + if (page) + put_page(page); + } +} + +/* + * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. + * If they are, then fine. If, however, some pages are clean then they must + * have been written out during the direct-IO read. So we take another ref on + * the BIO and the offending pages and re-dirty the pages in process context. + * + * It is expected that bio_check_pages_dirty() will wholly own the BIO from + * here on. It will run one page_cache_release() against each page and will + * run one bio_put() against the BIO. + */ + +static void bio_dirty_fn(struct work_struct *work); + +static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); +static DEFINE_SPINLOCK(bio_dirty_lock); +static struct bio *bio_dirty_list; + +/* + * This runs in process context + */ +static void bio_dirty_fn(struct work_struct *work) +{ + unsigned long flags; + struct bio *bio; + + spin_lock_irqsave(&bio_dirty_lock, flags); + bio = bio_dirty_list; + bio_dirty_list = NULL; + spin_unlock_irqrestore(&bio_dirty_lock, flags); + + while (bio) { + struct bio *next = bio->bi_private; + + bio_set_pages_dirty(bio); + bio_release_pages(bio); + bio_put(bio); + bio = next; + } +} + +void bio_check_pages_dirty(struct bio *bio) +{ + struct bio_vec *bvec; + int nr_clean_pages = 0; + int i; + + bio_for_each_segment_all(bvec, bio, i) { + struct page *page = bvec->bv_page; + + if (PageDirty(page) || PageCompound(page)) { + page_cache_release(page); + bvec->bv_page = NULL; + } else { + nr_clean_pages++; + } + } + + if (nr_clean_pages) { + unsigned long flags; + + spin_lock_irqsave(&bio_dirty_lock, flags); + bio->bi_private = bio_dirty_list; + bio_dirty_list = bio; + spin_unlock_irqrestore(&bio_dirty_lock, flags); + schedule_work(&bio_dirty_work); + } else { + bio_put(bio); + } +} + +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +void bio_flush_dcache_pages(struct bio *bi) +{ + struct bio_vec bvec; + struct bvec_iter iter; + + bio_for_each_segment(bvec, bi, iter) + flush_dcache_page(bvec.bv_page); +} +EXPORT_SYMBOL(bio_flush_dcache_pages); +#endif + +/** + * bio_endio - end I/O on a bio + * @bio: bio + * @error: error, if any + * + * Description: + * bio_endio() will end I/O on the whole bio. bio_endio() is the + * preferred way to end I/O on a bio, it takes care of clearing + * BIO_UPTODATE on error. @error is 0 on success, and and one of the + * established -Exxxx (-EIO, for instance) error values in case + * something went wrong. No one should call bi_end_io() directly on a + * bio unless they own it and thus know that it has an end_io + * function. + **/ +void bio_endio(struct bio *bio, int error) +{ + while (bio) { + BUG_ON(atomic_read(&bio->bi_remaining) <= 0); + + if (error) + clear_bit(BIO_UPTODATE, &bio->bi_flags); + else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + error = -EIO; + + if (!atomic_dec_and_test(&bio->bi_remaining)) + return; + + /* + * Need to have a real endio function for chained bios, + * otherwise various corner cases will break (like stacking + * block devices that save/restore bi_end_io) - however, we want + * to avoid unbounded recursion and blowing the stack. Tail call + * optimization would handle this, but compiling with frame + * pointers also disables gcc's sibling call optimization. + */ + if (bio->bi_end_io == bio_chain_endio) { + struct bio *parent = bio->bi_private; + bio_put(bio); + bio = parent; + } else { + if (bio->bi_end_io) + bio->bi_end_io(bio, error); + bio = NULL; + } + } +} +EXPORT_SYMBOL(bio_endio); + +/** + * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining + * @bio: bio + * @error: error, if any + * + * For code that has saved and restored bi_end_io; thing hard before using this + * function, probably you should've cloned the entire bio. + **/ +void bio_endio_nodec(struct bio *bio, int error) +{ + atomic_inc(&bio->bi_remaining); + bio_endio(bio, error); +} +EXPORT_SYMBOL(bio_endio_nodec); + +/** + * bio_split - split a bio + * @bio: bio to split + * @sectors: number of sectors to split from the front of @bio + * @gfp: gfp mask + * @bs: bio set to allocate from + * + * Allocates and returns a new bio which represents @sectors from the start of + * @bio, and updates @bio to represent the remaining sectors. + * + * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's + * responsibility to ensure that @bio is not freed before the split. + */ +struct bio *bio_split(struct bio *bio, int sectors, + gfp_t gfp, struct bio_set *bs) +{ + struct bio *split = NULL; + + BUG_ON(sectors <= 0); + BUG_ON(sectors >= bio_sectors(bio)); + + split = bio_clone_fast(bio, gfp, bs); + if (!split) + return NULL; + + split->bi_iter.bi_size = sectors << 9; + + if (bio_integrity(split)) + bio_integrity_trim(split, 0, sectors); + + bio_advance(bio, split->bi_iter.bi_size); + + return split; +} +EXPORT_SYMBOL(bio_split); + +/** + * bio_trim - trim a bio + * @bio: bio to trim + * @offset: number of sectors to trim from the front of @bio + * @size: size we want to trim @bio to, in sectors + */ +void bio_trim(struct bio *bio, int offset, int size) +{ + /* 'bio' is a cloned bio which we need to trim to match + * the given offset and size. + */ + + size <<= 9; + if (offset == 0 && size == bio->bi_iter.bi_size) + return; + + clear_bit(BIO_SEG_VALID, &bio->bi_flags); + + bio_advance(bio, offset << 9); + + bio->bi_iter.bi_size = size; +} +EXPORT_SYMBOL_GPL(bio_trim); + +/* + * create memory pools for biovec's in a bio_set. + * use the global biovec slabs created for general use. + */ +mempool_t *biovec_create_pool(int pool_entries) +{ + struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; + + return mempool_create_slab_pool(pool_entries, bp->slab); +} + +void bioset_free(struct bio_set *bs) +{ + if (bs->rescue_workqueue) + destroy_workqueue(bs->rescue_workqueue); + + if (bs->bio_pool) + mempool_destroy(bs->bio_pool); + + if (bs->bvec_pool) + mempool_destroy(bs->bvec_pool); + + bioset_integrity_free(bs); + bio_put_slab(bs); + + kfree(bs); +} +EXPORT_SYMBOL(bioset_free); + +/** + * bioset_create - Create a bio_set + * @pool_size: Number of bio and bio_vecs to cache in the mempool + * @front_pad: Number of bytes to allocate in front of the returned bio + * + * Description: + * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller + * to ask for a number of bytes to be allocated in front of the bio. + * Front pad allocation is useful for embedding the bio inside + * another structure, to avoid allocating extra data to go with the bio. + * Note that the bio must be embedded at the END of that structure always, + * or things will break badly. + */ +struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) +{ + unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); + struct bio_set *bs; + + bs = kzalloc(sizeof(*bs), GFP_KERNEL); + if (!bs) + return NULL; + + bs->front_pad = front_pad; + + spin_lock_init(&bs->rescue_lock); + bio_list_init(&bs->rescue_list); + INIT_WORK(&bs->rescue_work, bio_alloc_rescue); + + bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); + if (!bs->bio_slab) { + kfree(bs); + return NULL; + } + + bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); + if (!bs->bio_pool) + goto bad; + + bs->bvec_pool = biovec_create_pool(pool_size); + if (!bs->bvec_pool) + goto bad; + + bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); + if (!bs->rescue_workqueue) + goto bad; + + return bs; +bad: + bioset_free(bs); + return NULL; +} +EXPORT_SYMBOL(bioset_create); + +#ifdef CONFIG_BLK_CGROUP +/** + * bio_associate_current - associate a bio with %current + * @bio: target bio + * + * Associate @bio with %current if it hasn't been associated yet. Block + * layer will treat @bio as if it were issued by %current no matter which + * task actually issues it. + * + * This function takes an extra reference of @task's io_context and blkcg + * which will be put when @bio is released. The caller must own @bio, + * ensure %current->io_context exists, and is responsible for synchronizing + * calls to this function. + */ +int bio_associate_current(struct bio *bio) +{ + struct io_context *ioc; + struct cgroup_subsys_state *css; + + if (bio->bi_ioc) + return -EBUSY; + + ioc = current->io_context; + if (!ioc) + return -ENOENT; + + /* acquire active ref on @ioc and associate */ + get_io_context_active(ioc); + bio->bi_ioc = ioc; + + /* associate blkcg if exists */ + rcu_read_lock(); + css = task_css(current, blkio_cgrp_id); + if (css && css_tryget(css)) + bio->bi_css = css; + rcu_read_unlock(); + + return 0; +} + +/** + * bio_disassociate_task - undo bio_associate_current() + * @bio: target bio + */ +void bio_disassociate_task(struct bio *bio) +{ + if (bio->bi_ioc) { + put_io_context(bio->bi_ioc); + bio->bi_ioc = NULL; + } + if (bio->bi_css) { + css_put(bio->bi_css); + bio->bi_css = NULL; + } +} + +#endif /* CONFIG_BLK_CGROUP */ + +static void __init biovec_init_slabs(void) +{ + int i; + + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + int size; + struct biovec_slab *bvs = bvec_slabs + i; + + if (bvs->nr_vecs <= BIO_INLINE_VECS) { + bvs->slab = NULL; + continue; + } + + size = bvs->nr_vecs * sizeof(struct bio_vec); + bvs->slab = kmem_cache_create(bvs->name, size, 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + } +} + +static int __init init_bio(void) +{ + bio_slab_max = 2; + bio_slab_nr = 0; + bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); + if (!bio_slabs) + panic("bio: can't allocate bios\n"); + + bio_integrity_init(); + biovec_init_slabs(); + + fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); + if (!fs_bio_set) + panic("bio: can't allocate bios\n"); + + if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) + panic("bio: can't create integrity pool\n"); + + return 0; +} +subsys_initcall(init_bio); diff --git a/fs/Makefile b/fs/Makefile index f9cb9876e466..1ed9eab5e0a9 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -14,14 +14,13 @@ obj-y := open.o read_write.o file_table.o super.o \ stack.o fs_struct.o statfs.o ifeq ($(CONFIG_BLOCK),y) -obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o +obj-y += buffer.o block_dev.o direct-io.o mpage.o ioprio.o else obj-y += no-block.o endif obj-$(CONFIG_PROC_FS) += proc_namespace.o -obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o obj-y += notify/ obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_ANON_INODES) += anon_inodes.o diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c deleted file mode 100644 index 9e241063a616..000000000000 --- a/fs/bio-integrity.c +++ /dev/null @@ -1,657 +0,0 @@ -/* - * bio-integrity.c - bio data integrity extensions - * - * Copyright (C) 2007, 2008, 2009 Oracle Corporation - * Written by: Martin K. Petersen - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, - * USA. - * - */ - -#include -#include -#include -#include -#include -#include - -#define BIP_INLINE_VECS 4 - -static struct kmem_cache *bip_slab; -static struct workqueue_struct *kintegrityd_wq; - -/** - * bio_integrity_alloc - Allocate integrity payload and attach it to bio - * @bio: bio to attach integrity metadata to - * @gfp_mask: Memory allocation mask - * @nr_vecs: Number of integrity metadata scatter-gather elements - * - * Description: This function prepares a bio for attaching integrity - * metadata. nr_vecs specifies the maximum number of pages containing - * integrity metadata that can be attached. - */ -struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, - gfp_t gfp_mask, - unsigned int nr_vecs) -{ - struct bio_integrity_payload *bip; - struct bio_set *bs = bio->bi_pool; - unsigned long idx = BIO_POOL_NONE; - unsigned inline_vecs; - - if (!bs) { - bip = kmalloc(sizeof(struct bio_integrity_payload) + - sizeof(struct bio_vec) * nr_vecs, gfp_mask); - inline_vecs = nr_vecs; - } else { - bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); - inline_vecs = BIP_INLINE_VECS; - } - - if (unlikely(!bip)) - return NULL; - - memset(bip, 0, sizeof(*bip)); - - if (nr_vecs > inline_vecs) { - bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, - bs->bvec_integrity_pool); - if (!bip->bip_vec) - goto err; - } else { - bip->bip_vec = bip->bip_inline_vecs; - } - - bip->bip_slab = idx; - bip->bip_bio = bio; - bio->bi_integrity = bip; - - return bip; -err: - mempool_free(bip, bs->bio_integrity_pool); - return NULL; -} -EXPORT_SYMBOL(bio_integrity_alloc); - -/** - * bio_integrity_free - Free bio integrity payload - * @bio: bio containing bip to be freed - * - * Description: Used to free the integrity portion of a bio. Usually - * called from bio_free(). - */ -void bio_integrity_free(struct bio *bio) -{ - struct bio_integrity_payload *bip = bio->bi_integrity; - struct bio_set *bs = bio->bi_pool; - - if (bip->bip_owns_buf) - kfree(bip->bip_buf); - - if (bs) { - if (bip->bip_slab != BIO_POOL_NONE) - bvec_free(bs->bvec_integrity_pool, bip->bip_vec, - bip->bip_slab); - - mempool_free(bip, bs->bio_integrity_pool); - } else { - kfree(bip); - } - - bio->bi_integrity = NULL; -} -EXPORT_SYMBOL(bio_integrity_free); - -static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip) -{ - if (bip->bip_slab == BIO_POOL_NONE) - return BIP_INLINE_VECS; - - return bvec_nr_vecs(bip->bip_slab); -} - -/** - * bio_integrity_add_page - Attach integrity metadata - * @bio: bio to update - * @page: page containing integrity metadata - * @len: number of bytes of integrity metadata in page - * @offset: start offset within page - * - * Description: Attach a page containing integrity metadata to bio. - */ -int bio_integrity_add_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) -{ - struct bio_integrity_payload *bip = bio->bi_integrity; - struct bio_vec *iv; - - if (bip->bip_vcnt >= bip_integrity_vecs(bip)) { - printk(KERN_ERR "%s: bip_vec full\n", __func__); - return 0; - } - - iv = bip->bip_vec + bip->bip_vcnt; - - iv->bv_page = page; - iv->bv_len = len; - iv->bv_offset = offset; - bip->bip_vcnt++; - - return len; -} -EXPORT_SYMBOL(bio_integrity_add_page); - -static int bdev_integrity_enabled(struct block_device *bdev, int rw) -{ - struct blk_integrity *bi = bdev_get_integrity(bdev); - - if (bi == NULL) - return 0; - - if (rw == READ && bi->verify_fn != NULL && - (bi->flags & INTEGRITY_FLAG_READ)) - return 1; - - if (rw == WRITE && bi->generate_fn != NULL && - (bi->flags & INTEGRITY_FLAG_WRITE)) - return 1; - - return 0; -} - -/** - * bio_integrity_enabled - Check whether integrity can be passed - * @bio: bio to check - * - * Description: Determines whether bio_integrity_prep() can be called - * on this bio or not. bio data direction and target device must be - * set prior to calling. The functions honors the write_generate and - * read_verify flags in sysfs. - */ -int bio_integrity_enabled(struct bio *bio) -{ - if (!bio_is_rw(bio)) - return 0; - - /* Already protected? */ - if (bio_integrity(bio)) - return 0; - - return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio)); -} -EXPORT_SYMBOL(bio_integrity_enabled); - -/** - * bio_integrity_hw_sectors - Convert 512b sectors to hardware ditto - * @bi: blk_integrity profile for device - * @sectors: Number of 512 sectors to convert - * - * Description: The block layer calculates everything in 512 byte - * sectors but integrity metadata is done in terms of the hardware - * sector size of the storage device. Convert the block layer sectors - * to physical sectors. - */ -static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi, - unsigned int sectors) -{ - /* At this point there are only 512b or 4096b DIF/EPP devices */ - if (bi->sector_size == 4096) - return sectors >>= 3; - - return sectors; -} - -static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, - unsigned int sectors) -{ - return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size; -} - -/** - * bio_integrity_tag_size - Retrieve integrity tag space - * @bio: bio to inspect - * - * Description: Returns the maximum number of tag bytes that can be - * attached to this bio. Filesystems can use this to determine how - * much metadata to attach to an I/O. - */ -unsigned int bio_integrity_tag_size(struct bio *bio) -{ - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); - - BUG_ON(bio->bi_iter.bi_size == 0); - - return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size); -} -EXPORT_SYMBOL(bio_integrity_tag_size); - -static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, - int set) -{ - struct bio_integrity_payload *bip = bio->bi_integrity; - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); - unsigned int nr_sectors; - - BUG_ON(bip->bip_buf == NULL); - - if (bi->tag_size == 0) - return -1; - - nr_sectors = bio_integrity_hw_sectors(bi, - DIV_ROUND_UP(len, bi->tag_size)); - - if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) { - printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__, - nr_sectors * bi->tuple_size, bip->bip_iter.bi_size); - return -1; - } - - if (set) - bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors); - else - bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors); - - return 0; -} - -/** - * bio_integrity_set_tag - Attach a tag buffer to a bio - * @bio: bio to attach buffer to - * @tag_buf: Pointer to a buffer containing tag data - * @len: Length of the included buffer - * - * Description: Use this function to tag a bio by leveraging the extra - * space provided by devices formatted with integrity protection. The - * size of the integrity buffer must be <= to the size reported by - * bio_integrity_tag_size(). - */ -int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len) -{ - BUG_ON(bio_data_dir(bio) != WRITE); - - return bio_integrity_tag(bio, tag_buf, len, 1); -} -EXPORT_SYMBOL(bio_integrity_set_tag); - -/** - * bio_integrity_get_tag - Retrieve a tag buffer from a bio - * @bio: bio to retrieve buffer from - * @tag_buf: Pointer to a buffer for the tag data - * @len: Length of the target buffer - * - * Description: Use this function to retrieve the tag buffer from a - * completed I/O. The size of the integrity buffer must be <= to the - * size reported by bio_integrity_tag_size(). - */ -int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len) -{ - BUG_ON(bio_data_dir(bio) != READ); - - return bio_integrity_tag(bio, tag_buf, len, 0); -} -EXPORT_SYMBOL(bio_integrity_get_tag); - -/** - * bio_integrity_generate_verify - Generate/verify integrity metadata for a bio - * @bio: bio to generate/verify integrity metadata for - * @operate: operate number, 1 for generate, 0 for verify - */ -static int bio_integrity_generate_verify(struct bio *bio, int operate) -{ - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); - struct blk_integrity_exchg bix; - struct bio_vec *bv; - sector_t sector; - unsigned int sectors, ret = 0, i; - void *prot_buf = bio->bi_integrity->bip_buf; - - if (operate) - sector = bio->bi_iter.bi_sector; - else - sector = bio->bi_integrity->bip_iter.bi_sector; - - bix.disk_name = bio->bi_bdev->bd_disk->disk_name; - bix.sector_size = bi->sector_size; - - bio_for_each_segment_all(bv, bio, i) { - void *kaddr = kmap_atomic(bv->bv_page); - bix.data_buf = kaddr + bv->bv_offset; - bix.data_size = bv->bv_len; - bix.prot_buf = prot_buf; - bix.sector = sector; - - if (operate) - bi->generate_fn(&bix); - else { - ret = bi->verify_fn(&bix); - if (ret) { - kunmap_atomic(kaddr); - return ret; - } - } - - sectors = bv->bv_len / bi->sector_size; - sector += sectors; - prot_buf += sectors * bi->tuple_size; - - kunmap_atomic(kaddr); - } - return ret; -} - -/** - * bio_integrity_generate - Generate integrity metadata for a bio - * @bio: bio to generate integrity metadata for - * - * Description: Generates integrity metadata for a bio by calling the - * block device's generation callback function. The bio must have a - * bip attached with enough room to accommodate the generated - * integrity metadata. - */ -static void bio_integrity_generate(struct bio *bio) -{ - bio_integrity_generate_verify(bio, 1); -} - -static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) -{ - if (bi) - return bi->tuple_size; - - return 0; -} - -/** - * bio_integrity_prep - Prepare bio for integrity I/O - * @bio: bio to prepare - * - * Description: Allocates a buffer for integrity metadata, maps the - * pages and attaches them to a bio. The bio must have data - * direction, target device and start sector set priot to calling. In - * the WRITE case, integrity metadata will be generated using the - * block device's integrity function. In the READ case, the buffer - * will be prepared for DMA and a suitable end_io handler set up. - */ -int bio_integrity_prep(struct bio *bio) -{ - struct bio_integrity_payload *bip; - struct blk_integrity *bi; - struct request_queue *q; - void *buf; - unsigned long start, end; - unsigned int len, nr_pages; - unsigned int bytes, offset, i; - unsigned int sectors; - - bi = bdev_get_integrity(bio->bi_bdev); - q = bdev_get_queue(bio->bi_bdev); - BUG_ON(bi == NULL); - BUG_ON(bio_integrity(bio)); - - sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio)); - - /* Allocate kernel buffer for protection data */ - len = sectors * blk_integrity_tuple_size(bi); - buf = kmalloc(len, GFP_NOIO | q->bounce_gfp); - if (unlikely(buf == NULL)) { - printk(KERN_ERR "could not allocate integrity buffer\n"); - return -ENOMEM; - } - - end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - start = ((unsigned long) buf) >> PAGE_SHIFT; - nr_pages = end - start; - - /* Allocate bio integrity payload and integrity vectors */ - bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); - if (unlikely(bip == NULL)) { - printk(KERN_ERR "could not allocate data integrity bioset\n"); - kfree(buf); - return -EIO; - } - - bip->bip_owns_buf = 1; - bip->bip_buf = buf; - bip->bip_iter.bi_size = len; - bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; - - /* Map it */ - offset = offset_in_page(buf); - for (i = 0 ; i < nr_pages ; i++) { - int ret; - bytes = PAGE_SIZE - offset; - - if (len <= 0) - break; - - if (bytes > len) - bytes = len; - - ret = bio_integrity_add_page(bio, virt_to_page(buf), - bytes, offset); - - if (ret == 0) - return 0; - - if (ret < bytes) - break; - - buf += bytes; - len -= bytes; - offset = 0; - } - - /* Install custom I/O completion handler if read verify is enabled */ - if (bio_data_dir(bio) == READ) { - bip->bip_end_io = bio->bi_end_io; - bio->bi_end_io = bio_integrity_endio; - } - - /* Auto-generate integrity metadata if this is a write */ - if (bio_data_dir(bio) == WRITE) - bio_integrity_generate(bio); - - return 0; -} -EXPORT_SYMBOL(bio_integrity_prep); - -/** - * bio_integrity_verify - Verify integrity metadata for a bio - * @bio: bio to verify - * - * Description: This function is called to verify the integrity of a - * bio. The data in the bio io_vec is compared to the integrity - * metadata returned by the HBA. - */ -static int bio_integrity_verify(struct bio *bio) -{ - return bio_integrity_generate_verify(bio, 0); -} - -/** - * bio_integrity_verify_fn - Integrity I/O completion worker - * @work: Work struct stored in bio to be verified - * - * Description: This workqueue function is called to complete a READ - * request. The function verifies the transferred integrity metadata - * and then calls the original bio end_io function. - */ -static void bio_integrity_verify_fn(struct work_struct *work) -{ - struct bio_integrity_payload *bip = - container_of(work, struct bio_integrity_payload, bip_work); - struct bio *bio = bip->bip_bio; - int error; - - error = bio_integrity_verify(bio); - - /* Restore original bio completion handler */ - bio->bi_end_io = bip->bip_end_io; - bio_endio_nodec(bio, error); -} - -/** - * bio_integrity_endio - Integrity I/O completion function - * @bio: Protected bio - * @error: Pointer to errno - * - * Description: Completion for integrity I/O - * - * Normally I/O completion is done in interrupt context. However, - * verifying I/O integrity is a time-consuming task which must be run - * in process context. This function postpones completion - * accordingly. - */ -void bio_integrity_endio(struct bio *bio, int error) -{ - struct bio_integrity_payload *bip = bio->bi_integrity; - - BUG_ON(bip->bip_bio != bio); - - /* In case of an I/O error there is no point in verifying the - * integrity metadata. Restore original bio end_io handler - * and run it. - */ - if (error) { - bio->bi_end_io = bip->bip_end_io; - bio_endio(bio, error); - - return; - } - - INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); - queue_work(kintegrityd_wq, &bip->bip_work); -} -EXPORT_SYMBOL(bio_integrity_endio); - -/** - * bio_integrity_advance - Advance integrity vector - * @bio: bio whose integrity vector to update - * @bytes_done: number of data bytes that have been completed - * - * Description: This function calculates how many integrity bytes the - * number of completed data bytes correspond to and advances the - * integrity vector accordingly. - */ -void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) -{ - struct bio_integrity_payload *bip = bio->bi_integrity; - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); - unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); - - bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); -} -EXPORT_SYMBOL(bio_integrity_advance); - -/** - * bio_integrity_trim - Trim integrity vector - * @bio: bio whose integrity vector to update - * @offset: offset to first data sector - * @sectors: number of data sectors - * - * Description: Used to trim the integrity vector in a cloned bio. - * The ivec will be advanced corresponding to 'offset' data sectors - * and the length will be truncated corresponding to 'len' data - * sectors. - */ -void bio_integrity_trim(struct bio *bio, unsigned int offset, - unsigned int sectors) -{ - struct bio_integrity_payload *bip = bio->bi_integrity; - struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); - - bio_integrity_advance(bio, offset << 9); - bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors); -} -EXPORT_SYMBOL(bio_integrity_trim); - -/** - * bio_integrity_clone - Callback for cloning bios with integrity metadata - * @bio: New bio - * @bio_src: Original bio - * @gfp_mask: Memory allocation mask - * - * Description: Called to allocate a bip when cloning a bio - */ -int bio_integrity_clone(struct bio *bio, struct bio *bio_src, - gfp_t gfp_mask) -{ - struct bio_integrity_payload *bip_src = bio_src->bi_integrity; - struct bio_integrity_payload *bip; - - BUG_ON(bip_src == NULL); - - bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); - - if (bip == NULL) - return -EIO; - - memcpy(bip->bip_vec, bip_src->bip_vec, - bip_src->bip_vcnt * sizeof(struct bio_vec)); - - bip->bip_vcnt = bip_src->bip_vcnt; - bip->bip_iter = bip_src->bip_iter; - - return 0; -} -EXPORT_SYMBOL(bio_integrity_clone); - -int bioset_integrity_create(struct bio_set *bs, int pool_size) -{ - if (bs->bio_integrity_pool) - return 0; - - bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab); - if (!bs->bio_integrity_pool) - return -1; - - bs->bvec_integrity_pool = biovec_create_pool(pool_size); - if (!bs->bvec_integrity_pool) { - mempool_destroy(bs->bio_integrity_pool); - return -1; - } - - return 0; -} -EXPORT_SYMBOL(bioset_integrity_create); - -void bioset_integrity_free(struct bio_set *bs) -{ - if (bs->bio_integrity_pool) - mempool_destroy(bs->bio_integrity_pool); - - if (bs->bvec_integrity_pool) - mempool_destroy(bs->bvec_integrity_pool); -} -EXPORT_SYMBOL(bioset_integrity_free); - -void __init bio_integrity_init(void) -{ - /* - * kintegrityd won't block much but may burn a lot of CPU cycles. - * Make it highpri CPU intensive wq with max concurrency of 1. - */ - kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM | - WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); - if (!kintegrityd_wq) - panic("Failed to create kintegrityd\n"); - - bip_slab = kmem_cache_create("bio_integrity_payload", - sizeof(struct bio_integrity_payload) + - sizeof(struct bio_vec) * BIP_INLINE_VECS, - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); - if (!bip_slab) - panic("Failed to create slab\n"); -} diff --git a/fs/bio.c b/fs/bio.c deleted file mode 100644 index 96d28eee8a1e..000000000000 --- a/fs/bio.c +++ /dev/null @@ -1,2038 +0,0 @@ -/* - * Copyright (C) 2001 Jens Axboe - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public Licens - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* for struct sg_iovec */ - -#include - -/* - * Test patch to inline a certain number of bi_io_vec's inside the bio - * itself, to shrink a bio data allocation from two mempool calls to one - */ -#define BIO_INLINE_VECS 4 - -/* - * if you change this list, also change bvec_alloc or things will - * break badly! cannot be bigger than what you can fit into an - * unsigned short - */ -#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } -static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { - BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), -}; -#undef BV - -/* - * fs_bio_set is the bio_set containing bio and iovec memory pools used by - * IO code that does not need private memory pools. - */ -struct bio_set *fs_bio_set; -EXPORT_SYMBOL(fs_bio_set); - -/* - * Our slab pool management - */ -struct bio_slab { - struct kmem_cache *slab; - unsigned int slab_ref; - unsigned int slab_size; - char name[8]; -}; -static DEFINE_MUTEX(bio_slab_lock); -static struct bio_slab *bio_slabs; -static unsigned int bio_slab_nr, bio_slab_max; - -static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) -{ - unsigned int sz = sizeof(struct bio) + extra_size; - struct kmem_cache *slab = NULL; - struct bio_slab *bslab, *new_bio_slabs; - unsigned int new_bio_slab_max; - unsigned int i, entry = -1; - - mutex_lock(&bio_slab_lock); - - i = 0; - while (i < bio_slab_nr) { - bslab = &bio_slabs[i]; - - if (!bslab->slab && entry == -1) - entry = i; - else if (bslab->slab_size == sz) { - slab = bslab->slab; - bslab->slab_ref++; - break; - } - i++; - } - - if (slab) - goto out_unlock; - - if (bio_slab_nr == bio_slab_max && entry == -1) { - new_bio_slab_max = bio_slab_max << 1; - new_bio_slabs = krealloc(bio_slabs, - new_bio_slab_max * sizeof(struct bio_slab), - GFP_KERNEL); - if (!new_bio_slabs) - goto out_unlock; - bio_slab_max = new_bio_slab_max; - bio_slabs = new_bio_slabs; - } - if (entry == -1) - entry = bio_slab_nr++; - - bslab = &bio_slabs[entry]; - - snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); - slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL); - if (!slab) - goto out_unlock; - - bslab->slab = slab; - bslab->slab_ref = 1; - bslab->slab_size = sz; -out_unlock: - mutex_unlock(&bio_slab_lock); - return slab; -} - -static void bio_put_slab(struct bio_set *bs) -{ - struct bio_slab *bslab = NULL; - unsigned int i; - - mutex_lock(&bio_slab_lock); - - for (i = 0; i < bio_slab_nr; i++) { - if (bs->bio_slab == bio_slabs[i].slab) { - bslab = &bio_slabs[i]; - break; - } - } - - if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) - goto out; - - WARN_ON(!bslab->slab_ref); - - if (--bslab->slab_ref) - goto out; - - kmem_cache_destroy(bslab->slab); - bslab->slab = NULL; - -out: - mutex_unlock(&bio_slab_lock); -} - -unsigned int bvec_nr_vecs(unsigned short idx) -{ - return bvec_slabs[idx].nr_vecs; -} - -void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) -{ - BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); - - if (idx == BIOVEC_MAX_IDX) - mempool_free(bv, pool); - else { - struct biovec_slab *bvs = bvec_slabs + idx; - - kmem_cache_free(bvs->slab, bv); - } -} - -struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, - mempool_t *pool) -{ - struct bio_vec *bvl; - - /* - * see comment near bvec_array define! - */ - switch (nr) { - case 1: - *idx = 0; - break; - case 2 ... 4: - *idx = 1; - break; - case 5 ... 16: - *idx = 2; - break; - case 17 ... 64: - *idx = 3; - break; - case 65 ... 128: - *idx = 4; - break; - case 129 ... BIO_MAX_PAGES: - *idx = 5; - break; - default: - return NULL; - } - - /* - * idx now points to the pool we want to allocate from. only the - * 1-vec entry pool is mempool backed. - */ - if (*idx == BIOVEC_MAX_IDX) { -fallback: - bvl = mempool_alloc(pool, gfp_mask); - } else { - struct biovec_slab *bvs = bvec_slabs + *idx; - gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); - - /* - * Make this allocation restricted and don't dump info on - * allocation failures, since we'll fallback to the mempool - * in case of failure. - */ - __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; - - /* - * Try a slab allocation. If this fails and __GFP_WAIT - * is set, retry with the 1-entry mempool - */ - bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); - if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { - *idx = BIOVEC_MAX_IDX; - goto fallback; - } - } - - return bvl; -} - -static void __bio_free(struct bio *bio) -{ - bio_disassociate_task(bio); - - if (bio_integrity(bio)) - bio_integrity_free(bio); -} - -static void bio_free(struct bio *bio) -{ - struct bio_set *bs = bio->bi_pool; - void *p; - - __bio_free(bio); - - if (bs) { - if (bio_flagged(bio, BIO_OWNS_VEC)) - bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); - - /* - * If we have front padding, adjust the bio pointer before freeing - */ - p = bio; - p -= bs->front_pad; - - mempool_free(p, bs->bio_pool); - } else { - /* Bio was allocated by bio_kmalloc() */ - kfree(bio); - } -} - -void bio_init(struct bio *bio) -{ - memset(bio, 0, sizeof(*bio)); - bio->bi_flags = 1 << BIO_UPTODATE; - atomic_set(&bio->bi_remaining, 1); - atomic_set(&bio->bi_cnt, 1); -} -EXPORT_SYMBOL(bio_init); - -/** - * bio_reset - reinitialize a bio - * @bio: bio to reset - * - * Description: - * After calling bio_reset(), @bio will be in the same state as a freshly - * allocated bio returned bio bio_alloc_bioset() - the only fields that are - * preserved are the ones that are initialized by bio_alloc_bioset(). See - * comment in struct bio. - */ -void bio_reset(struct bio *bio) -{ - unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); - - __bio_free(bio); - - memset(bio, 0, BIO_RESET_BYTES); - bio->bi_flags = flags|(1 << BIO_UPTODATE); - atomic_set(&bio->bi_remaining, 1); -} -EXPORT_SYMBOL(bio_reset); - -static void bio_chain_endio(struct bio *bio, int error) -{ - bio_endio(bio->bi_private, error); - bio_put(bio); -} - -/** - * bio_chain - chain bio completions - * @bio: the target bio - * @parent: the @bio's parent bio - * - * The caller won't have a bi_end_io called when @bio completes - instead, - * @parent's bi_end_io won't be called until both @parent and @bio have - * completed; the chained bio will also be freed when it completes. - * - * The caller must not set bi_private or bi_end_io in @bio. - */ -void bio_chain(struct bio *bio, struct bio *parent) -{ - BUG_ON(bio->bi_private || bio->bi_end_io); - - bio->bi_private = parent; - bio->bi_end_io = bio_chain_endio; - atomic_inc(&parent->bi_remaining); -} -EXPORT_SYMBOL(bio_chain); - -static void bio_alloc_rescue(struct work_struct *work) -{ - struct bio_set *bs = container_of(work, struct bio_set, rescue_work); - struct bio *bio; - - while (1) { - spin_lock(&bs->rescue_lock); - bio = bio_list_pop(&bs->rescue_list); - spin_unlock(&bs->rescue_lock); - - if (!bio) - break; - - generic_make_request(bio); - } -} - -static void punt_bios_to_rescuer(struct bio_set *bs) -{ - struct bio_list punt, nopunt; - struct bio *bio; - - /* - * In order to guarantee forward progress we must punt only bios that - * were allocated from this bio_set; otherwise, if there was a bio on - * there for a stacking driver higher up in the stack, processing it - * could require allocating bios from this bio_set, and doing that from - * our own rescuer would be bad. - * - * Since bio lists are singly linked, pop them all instead of trying to - * remove from the middle of the list: - */ - - bio_list_init(&punt); - bio_list_init(&nopunt); - - while ((bio = bio_list_pop(current->bio_list))) - bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); - - *current->bio_list = nopunt; - - spin_lock(&bs->rescue_lock); - bio_list_merge(&bs->rescue_list, &punt); - spin_unlock(&bs->rescue_lock); - - queue_work(bs->rescue_workqueue, &bs->rescue_work); -} - -/** - * bio_alloc_bioset - allocate a bio for I/O - * @gfp_mask: the GFP_ mask given to the slab allocator - * @nr_iovecs: number of iovecs to pre-allocate - * @bs: the bio_set to allocate from. - * - * Description: - * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is - * backed by the @bs's mempool. - * - * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be - * able to allocate a bio. This is due to the mempool guarantees. To make this - * work, callers must never allocate more than 1 bio at a time from this pool. - * Callers that need to allocate more than 1 bio must always submit the - * previously allocated bio for IO before attempting to allocate a new one. - * Failure to do so can cause deadlocks under memory pressure. - * - * Note that when running under generic_make_request() (i.e. any block - * driver), bios are not submitted until after you return - see the code in - * generic_make_request() that converts recursion into iteration, to prevent - * stack overflows. - * - * This would normally mean allocating multiple bios under - * generic_make_request() would be susceptible to deadlocks, but we have - * deadlock avoidance code that resubmits any blocked bios from a rescuer - * thread. - * - * However, we do not guarantee forward progress for allocations from other - * mempools. Doing multiple allocations from the same mempool under - * generic_make_request() should be avoided - instead, use bio_set's front_pad - * for per bio allocations. - * - * RETURNS: - * Pointer to new bio on success, NULL on failure. - */ -struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) -{ - gfp_t saved_gfp = gfp_mask; - unsigned front_pad; - unsigned inline_vecs; - unsigned long idx = BIO_POOL_NONE; - struct bio_vec *bvl = NULL; - struct bio *bio; - void *p; - - if (!bs) { - if (nr_iovecs > UIO_MAXIOV) - return NULL; - - p = kmalloc(sizeof(struct bio) + - nr_iovecs * sizeof(struct bio_vec), - gfp_mask); - front_pad = 0; - inline_vecs = nr_iovecs; - } else { - /* - * generic_make_request() converts recursion to iteration; this - * means if we're running beneath it, any bios we allocate and - * submit will not be submitted (and thus freed) until after we - * return. - * - * This exposes us to a potential deadlock if we allocate - * multiple bios from the same bio_set() while running - * underneath generic_make_request(). If we were to allocate - * multiple bios (say a stacking block driver that was splitting - * bios), we would deadlock if we exhausted the mempool's - * reserve. - * - * We solve this, and guarantee forward progress, with a rescuer - * workqueue per bio_set. If we go to allocate and there are - * bios on current->bio_list, we first try the allocation - * without __GFP_WAIT; if that fails, we punt those bios we - * would be blocking to the rescuer workqueue before we retry - * with the original gfp_flags. - */ - - if (current->bio_list && !bio_list_empty(current->bio_list)) - gfp_mask &= ~__GFP_WAIT; - - p = mempool_alloc(bs->bio_pool, gfp_mask); - if (!p && gfp_mask != saved_gfp) { - punt_bios_to_rescuer(bs); - gfp_mask = saved_gfp; - p = mempool_alloc(bs->bio_pool, gfp_mask); - } - - front_pad = bs->front_pad; - inline_vecs = BIO_INLINE_VECS; - } - - if (unlikely(!p)) - return NULL; - - bio = p + front_pad; - bio_init(bio); - - if (nr_iovecs > inline_vecs) { - bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); - if (!bvl && gfp_mask != saved_gfp) { - punt_bios_to_rescuer(bs); - gfp_mask = saved_gfp; - bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); - } - - if (unlikely(!bvl)) - goto err_free; - - bio->bi_flags |= 1 << BIO_OWNS_VEC; - } else if (nr_iovecs) { - bvl = bio->bi_inline_vecs; - } - - bio->bi_pool = bs; - bio->bi_flags |= idx << BIO_POOL_OFFSET; - bio->bi_max_vecs = nr_iovecs; - bio->bi_io_vec = bvl; - return bio; - -err_free: - mempool_free(p, bs->bio_pool); - return NULL; -} -EXPORT_SYMBOL(bio_alloc_bioset); - -void zero_fill_bio(struct bio *bio) -{ - unsigned long flags; - struct bio_vec bv; - struct bvec_iter iter; - - bio_for_each_segment(bv, bio, iter) { - char *data = bvec_kmap_irq(&bv, &flags); - memset(data, 0, bv.bv_len); - flush_dcache_page(bv.bv_page); - bvec_kunmap_irq(data, &flags); - } -} -EXPORT_SYMBOL(zero_fill_bio); - -/** - * bio_put - release a reference to a bio - * @bio: bio to release reference to - * - * Description: - * Put a reference to a &struct bio, either one you have gotten with - * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. - **/ -void bio_put(struct bio *bio) -{ - BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); - - /* - * last put frees it - */ - if (atomic_dec_and_test(&bio->bi_cnt)) - bio_free(bio); -} -EXPORT_SYMBOL(bio_put); - -inline int bio_phys_segments(struct request_queue *q, struct bio *bio) -{ - if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) - blk_recount_segments(q, bio); - - return bio->bi_phys_segments; -} -EXPORT_SYMBOL(bio_phys_segments); - -/** - * __bio_clone_fast - clone a bio that shares the original bio's biovec - * @bio: destination bio - * @bio_src: bio to clone - * - * Clone a &bio. Caller will own the returned bio, but not - * the actual data it points to. Reference count of returned - * bio will be one. - * - * Caller must ensure that @bio_src is not freed before @bio. - */ -void __bio_clone_fast(struct bio *bio, struct bio *bio_src) -{ - BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE); - - /* - * most users will be overriding ->bi_bdev with a new target, - * so we don't set nor calculate new physical/hw segment counts here - */ - bio->bi_bdev = bio_src->bi_bdev; - bio->bi_flags |= 1 << BIO_CLONED; - bio->bi_rw = bio_src->bi_rw; - bio->bi_iter = bio_src->bi_iter; - bio->bi_io_vec = bio_src->bi_io_vec; -} -EXPORT_SYMBOL(__bio_clone_fast); - -/** - * bio_clone_fast - clone a bio that shares the original bio's biovec - * @bio: bio to clone - * @gfp_mask: allocation priority - * @bs: bio_set to allocate from - * - * Like __bio_clone_fast, only also allocates the returned bio - */ -struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) -{ - struct bio *b; - - b = bio_alloc_bioset(gfp_mask, 0, bs); - if (!b) - return NULL; - - __bio_clone_fast(b, bio); - - if (bio_integrity(bio)) { - int ret; - - ret = bio_integrity_clone(b, bio, gfp_mask); - - if (ret < 0) { - bio_put(b); - return NULL; - } - } - - return b; -} -EXPORT_SYMBOL(bio_clone_fast); - -/** - * bio_clone_bioset - clone a bio - * @bio_src: bio to clone - * @gfp_mask: allocation priority - * @bs: bio_set to allocate from - * - * Clone bio. Caller will own the returned bio, but not the actual data it - * points to. Reference count of returned bio will be one. - */ -struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, - struct bio_set *bs) -{ - struct bvec_iter iter; - struct bio_vec bv; - struct bio *bio; - - /* - * Pre immutable biovecs, __bio_clone() used to just do a memcpy from - * bio_src->bi_io_vec to bio->bi_io_vec. - * - * We can't do that anymore, because: - * - * - The point of cloning the biovec is to produce a bio with a biovec - * the caller can modify: bi_idx and bi_bvec_done should be 0. - * - * - The original bio could've had more than BIO_MAX_PAGES biovecs; if - * we tried to clone the whole thing bio_alloc_bioset() would fail. - * But the clone should succeed as long as the number of biovecs we - * actually need to allocate is fewer than BIO_MAX_PAGES. - * - * - Lastly, bi_vcnt should not be looked at or relied upon by code - * that does not own the bio - reason being drivers don't use it for - * iterating over the biovec anymore, so expecting it to be kept up - * to date (i.e. for clones that share the parent biovec) is just - * asking for trouble and would force extra work on - * __bio_clone_fast() anyways. - */ - - bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); - if (!bio) - return NULL; - - bio->bi_bdev = bio_src->bi_bdev; - bio->bi_rw = bio_src->bi_rw; - bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; - bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; - - if (bio->bi_rw & REQ_DISCARD) - goto integrity_clone; - - if (bio->bi_rw & REQ_WRITE_SAME) { - bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; - goto integrity_clone; - } - - bio_for_each_segment(bv, bio_src, iter) - bio->bi_io_vec[bio->bi_vcnt++] = bv; - -integrity_clone: - if (bio_integrity(bio_src)) { - int ret; - - ret = bio_integrity_clone(bio, bio_src, gfp_mask); - if (ret < 0) { - bio_put(bio); - return NULL; - } - } - - return bio; -} -EXPORT_SYMBOL(bio_clone_bioset); - -/** - * bio_get_nr_vecs - return approx number of vecs - * @bdev: I/O target - * - * Return the approximate number of pages we can send to this target. - * There's no guarantee that you will be able to fit this number of pages - * into a bio, it does not account for dynamic restrictions that vary - * on offset. - */ -int bio_get_nr_vecs(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - int nr_pages; - - nr_pages = min_t(unsigned, - queue_max_segments(q), - queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); - - return min_t(unsigned, nr_pages, BIO_MAX_PAGES); - -} -EXPORT_SYMBOL(bio_get_nr_vecs); - -static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page - *page, unsigned int len, unsigned int offset, - unsigned int max_sectors) -{ - int retried_segments = 0; - struct bio_vec *bvec; - - /* - * cloned bio must not modify vec list - */ - if (unlikely(bio_flagged(bio, BIO_CLONED))) - return 0; - - if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) - return 0; - - /* - * For filesystems with a blocksize smaller than the pagesize - * we will often be called with the same page as last time and - * a consecutive offset. Optimize this special case. - */ - if (bio->bi_vcnt > 0) { - struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; - - if (page == prev->bv_page && - offset == prev->bv_offset + prev->bv_len) { - unsigned int prev_bv_len = prev->bv_len; - prev->bv_len += len; - - if (q->merge_bvec_fn) { - struct bvec_merge_data bvm = { - /* prev_bvec is already charged in - bi_size, discharge it in order to - simulate merging updated prev_bvec - as new bvec. */ - .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_iter.bi_sector, - .bi_size = bio->bi_iter.bi_size - - prev_bv_len, - .bi_rw = bio->bi_rw, - }; - - if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) { - prev->bv_len -= len; - return 0; - } - } - - goto done; - } - } - - if (bio->bi_vcnt >= bio->bi_max_vecs) - return 0; - - /* - * we might lose a segment or two here, but rather that than - * make this too complex. - */ - - while (bio->bi_phys_segments >= queue_max_segments(q)) { - - if (retried_segments) - return 0; - - retried_segments = 1; - blk_recount_segments(q, bio); - } - - /* - * setup the new entry, we might clear it again later if we - * cannot add the page - */ - bvec = &bio->bi_io_vec[bio->bi_vcnt]; - bvec->bv_page = page; - bvec->bv_len = len; - bvec->bv_offset = offset; - - /* - * if queue has other restrictions (eg varying max sector size - * depending on offset), it can specify a merge_bvec_fn in the - * queue to get further control - */ - if (q->merge_bvec_fn) { - struct bvec_merge_data bvm = { - .bi_bdev = bio->bi_bdev, - .bi_sector = bio->bi_iter.bi_sector, - .bi_size = bio->bi_iter.bi_size, - .bi_rw = bio->bi_rw, - }; - - /* - * merge_bvec_fn() returns number of bytes it can accept - * at this offset - */ - if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) { - bvec->bv_page = NULL; - bvec->bv_len = 0; - bvec->bv_offset = 0; - return 0; - } - } - - /* If we may be able to merge these biovecs, force a recount */ - if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) - bio->bi_flags &= ~(1 << BIO_SEG_VALID); - - bio->bi_vcnt++; - bio->bi_phys_segments++; - done: - bio->bi_iter.bi_size += len; - return len; -} - -/** - * bio_add_pc_page - attempt to add page to bio - * @q: the target queue - * @bio: destination bio - * @page: page to add - * @len: vec entry length - * @offset: vec entry offset - * - * Attempt to add a page to the bio_vec maplist. This can fail for a - * number of reasons, such as the bio being full or target block device - * limitations. The target block device must allow bio's up to PAGE_SIZE, - * so it is always possible to add a single page to an empty bio. - * - * This should only be used by REQ_PC bios. - */ -int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, - unsigned int len, unsigned int offset) -{ - return __bio_add_page(q, bio, page, len, offset, - queue_max_hw_sectors(q)); -} -EXPORT_SYMBOL(bio_add_pc_page); - -/** - * bio_add_page - attempt to add page to bio - * @bio: destination bio - * @page: page to add - * @len: vec entry length - * @offset: vec entry offset - * - * Attempt to add a page to the bio_vec maplist. This can fail for a - * number of reasons, such as the bio being full or target block device - * limitations. The target block device must allow bio's up to PAGE_SIZE, - * so it is always possible to add a single page to an empty bio. - */ -int bio_add_page(struct bio *bio, struct page *page, unsigned int len, - unsigned int offset) -{ - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); -} -EXPORT_SYMBOL(bio_add_page); - -struct submit_bio_ret { - struct completion event; - int error; -}; - -static void submit_bio_wait_endio(struct bio *bio, int error) -{ - struct submit_bio_ret *ret = bio->bi_private; - - ret->error = error; - complete(&ret->event); -} - -/** - * submit_bio_wait - submit a bio, and wait until it completes - * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) - * @bio: The &struct bio which describes the I/O - * - * Simple wrapper around submit_bio(). Returns 0 on success, or the error from - * bio_endio() on failure. - */ -int submit_bio_wait(int rw, struct bio *bio) -{ - struct submit_bio_ret ret; - - rw |= REQ_SYNC; - init_completion(&ret.event); - bio->bi_private = &ret; - bio->bi_end_io = submit_bio_wait_endio; - submit_bio(rw, bio); - wait_for_completion(&ret.event); - - return ret.error; -} -EXPORT_SYMBOL(submit_bio_wait); - -/** - * bio_advance - increment/complete a bio by some number of bytes - * @bio: bio to advance - * @bytes: number of bytes to complete - * - * This updates bi_sector, bi_size and bi_idx; if the number of bytes to - * complete doesn't align with a bvec boundary, then bv_len and bv_offset will - * be updated on the last bvec as well. - * - * @bio will then represent the remaining, uncompleted portion of the io. - */ -void bio_advance(struct bio *bio, unsigned bytes) -{ - if (bio_integrity(bio)) - bio_integrity_advance(bio, bytes); - - bio_advance_iter(bio, &bio->bi_iter, bytes); -} -EXPORT_SYMBOL(bio_advance); - -/** - * bio_alloc_pages - allocates a single page for each bvec in a bio - * @bio: bio to allocate pages for - * @gfp_mask: flags for allocation - * - * Allocates pages up to @bio->bi_vcnt. - * - * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are - * freed. - */ -int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) -{ - int i; - struct bio_vec *bv; - - bio_for_each_segment_all(bv, bio, i) { - bv->bv_page = alloc_page(gfp_mask); - if (!bv->bv_page) { - while (--bv >= bio->bi_io_vec) - __free_page(bv->bv_page); - return -ENOMEM; - } - } - - return 0; -} -EXPORT_SYMBOL(bio_alloc_pages); - -/** - * bio_copy_data - copy contents of data buffers from one chain of bios to - * another - * @src: source bio list - * @dst: destination bio list - * - * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats - * @src and @dst as linked lists of bios. - * - * Stops when it reaches the end of either @src or @dst - that is, copies - * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). - */ -void bio_copy_data(struct bio *dst, struct bio *src) -{ - struct bvec_iter src_iter, dst_iter; - struct bio_vec src_bv, dst_bv; - void *src_p, *dst_p; - unsigned bytes; - - src_iter = src->bi_iter; - dst_iter = dst->bi_iter; - - while (1) { - if (!src_iter.bi_size) { - src = src->bi_next; - if (!src) - break; - - src_iter = src->bi_iter; - } - - if (!dst_iter.bi_size) { - dst = dst->bi_next; - if (!dst) - break; - - dst_iter = dst->bi_iter; - } - - src_bv = bio_iter_iovec(src, src_iter); - dst_bv = bio_iter_iovec(dst, dst_iter); - - bytes = min(src_bv.bv_len, dst_bv.bv_len); - - src_p = kmap_atomic(src_bv.bv_page); - dst_p = kmap_atomic(dst_bv.bv_page); - - memcpy(dst_p + dst_bv.bv_offset, - src_p + src_bv.bv_offset, - bytes); - - kunmap_atomic(dst_p); - kunmap_atomic(src_p); - - bio_advance_iter(src, &src_iter, bytes); - bio_advance_iter(dst, &dst_iter, bytes); - } -} -EXPORT_SYMBOL(bio_copy_data); - -struct bio_map_data { - int nr_sgvecs; - int is_our_pages; - struct sg_iovec sgvecs[]; -}; - -static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, - const struct sg_iovec *iov, int iov_count, - int is_our_pages) -{ - memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); - bmd->nr_sgvecs = iov_count; - bmd->is_our_pages = is_our_pages; - bio->bi_private = bmd; -} - -static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, - gfp_t gfp_mask) -{ - if (iov_count > UIO_MAXIOV) - return NULL; - - return kmalloc(sizeof(struct bio_map_data) + - sizeof(struct sg_iovec) * iov_count, gfp_mask); -} - -static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count, - int to_user, int from_user, int do_free_page) -{ - int ret = 0, i; - struct bio_vec *bvec; - int iov_idx = 0; - unsigned int iov_off = 0; - - bio_for_each_segment_all(bvec, bio, i) { - char *bv_addr = page_address(bvec->bv_page); - unsigned int bv_len = bvec->bv_len; - - while (bv_len && iov_idx < iov_count) { - unsigned int bytes; - char __user *iov_addr; - - bytes = min_t(unsigned int, - iov[iov_idx].iov_len - iov_off, bv_len); - iov_addr = iov[iov_idx].iov_base + iov_off; - - if (!ret) { - if (to_user) - ret = copy_to_user(iov_addr, bv_addr, - bytes); - - if (from_user) - ret = copy_from_user(bv_addr, iov_addr, - bytes); - - if (ret) - ret = -EFAULT; - } - - bv_len -= bytes; - bv_addr += bytes; - iov_addr += bytes; - iov_off += bytes; - - if (iov[iov_idx].iov_len == iov_off) { - iov_idx++; - iov_off = 0; - } - } - - if (do_free_page) - __free_page(bvec->bv_page); - } - - return ret; -} - -/** - * bio_uncopy_user - finish previously mapped bio - * @bio: bio being terminated - * - * Free pages allocated from bio_copy_user() and write back data - * to user space in case of a read. - */ -int bio_uncopy_user(struct bio *bio) -{ - struct bio_map_data *bmd = bio->bi_private; - struct bio_vec *bvec; - int ret = 0, i; - - if (!bio_flagged(bio, BIO_NULL_MAPPED)) { - /* - * if we're in a workqueue, the request is orphaned, so - * don't copy into a random user address space, just free. - */ - if (current->mm) - ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, - bio_data_dir(bio) == READ, - 0, bmd->is_our_pages); - else if (bmd->is_our_pages) - bio_for_each_segment_all(bvec, bio, i) - __free_page(bvec->bv_page); - } - kfree(bmd); - bio_put(bio); - return ret; -} -EXPORT_SYMBOL(bio_uncopy_user); - -/** - * bio_copy_user_iov - copy user data to bio - * @q: destination block queue - * @map_data: pointer to the rq_map_data holding pages (if necessary) - * @iov: the iovec. - * @iov_count: number of elements in the iovec - * @write_to_vm: bool indicating writing to pages or not - * @gfp_mask: memory allocation flags - * - * Prepares and returns a bio for indirect user io, bouncing data - * to/from kernel pages as necessary. Must be paired with - * call bio_uncopy_user() on io completion. - */ -struct bio *bio_copy_user_iov(struct request_queue *q, - struct rq_map_data *map_data, - const struct sg_iovec *iov, int iov_count, - int write_to_vm, gfp_t gfp_mask) -{ - struct bio_map_data *bmd; - struct bio_vec *bvec; - struct page *page; - struct bio *bio; - int i, ret; - int nr_pages = 0; - unsigned int len = 0; - unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0; - - for (i = 0; i < iov_count; i++) { - unsigned long uaddr; - unsigned long end; - unsigned long start; - - uaddr = (unsigned long)iov[i].iov_base; - end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; - start = uaddr >> PAGE_SHIFT; - - /* - * Overflow, abort - */ - if (end < start) - return ERR_PTR(-EINVAL); - - nr_pages += end - start; - len += iov[i].iov_len; - } - - if (offset) - nr_pages++; - - bmd = bio_alloc_map_data(iov_count, gfp_mask); - if (!bmd) - return ERR_PTR(-ENOMEM); - - ret = -ENOMEM; - bio = bio_kmalloc(gfp_mask, nr_pages); - if (!bio) - goto out_bmd; - - if (!write_to_vm) - bio->bi_rw |= REQ_WRITE; - - ret = 0; - - if (map_data) { - nr_pages = 1 << map_data->page_order; - i = map_data->offset / PAGE_SIZE; - } - while (len) { - unsigned int bytes = PAGE_SIZE; - - bytes -= offset; - - if (bytes > len) - bytes = len; - - if (map_data) { - if (i == map_data->nr_entries * nr_pages) { - ret = -ENOMEM; - break; - } - - page = map_data->pages[i / nr_pages]; - page += (i % nr_pages); - - i++; - } else { - page = alloc_page(q->bounce_gfp | gfp_mask); - if (!page) { - ret = -ENOMEM; - break; - } - } - - if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) - break; - - len -= bytes; - offset = 0; - } - - if (ret) - goto cleanup; - - /* - * success - */ - if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || - (map_data && map_data->from_user)) { - ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0); - if (ret) - goto cleanup; - } - - bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1); - return bio; -cleanup: - if (!map_data) - bio_for_each_segment_all(bvec, bio, i) - __free_page(bvec->bv_page); - - bio_put(bio); -out_bmd: - kfree(bmd); - return ERR_PTR(ret); -} - -/** - * bio_copy_user - copy user data to bio - * @q: destination block queue - * @map_data: pointer to the rq_map_data holding pages (if necessary) - * @uaddr: start of user address - * @len: length in bytes - * @write_to_vm: bool indicating writing to pages or not - * @gfp_mask: memory allocation flags - * - * Prepares and returns a bio for indirect user io, bouncing data - * to/from kernel pages as necessary. Must be paired with - * call bio_uncopy_user() on io completion. - */ -struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, - unsigned long uaddr, unsigned int len, - int write_to_vm, gfp_t gfp_mask) -{ - struct sg_iovec iov; - - iov.iov_base = (void __user *)uaddr; - iov.iov_len = len; - - return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); -} -EXPORT_SYMBOL(bio_copy_user); - -static struct bio *__bio_map_user_iov(struct request_queue *q, - struct block_device *bdev, - const struct sg_iovec *iov, int iov_count, - int write_to_vm, gfp_t gfp_mask) -{ - int i, j; - int nr_pages = 0; - struct page **pages; - struct bio *bio; - int cur_page = 0; - int ret, offset; - - for (i = 0; i < iov_count; i++) { - unsigned long uaddr = (unsigned long)iov[i].iov_base; - unsigned long len = iov[i].iov_len; - unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - unsigned long start = uaddr >> PAGE_SHIFT; - - /* - * Overflow, abort - */ - if (end < start) - return ERR_PTR(-EINVAL); - - nr_pages += end - start; - /* - * buffer must be aligned to at least hardsector size for now - */ - if (uaddr & queue_dma_alignment(q)) - return ERR_PTR(-EINVAL); - } - - if (!nr_pages) - return ERR_PTR(-EINVAL); - - bio = bio_kmalloc(gfp_mask, nr_pages); - if (!bio) - return ERR_PTR(-ENOMEM); - - ret = -ENOMEM; - pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); - if (!pages) - goto out; - - for (i = 0; i < iov_count; i++) { - unsigned long uaddr = (unsigned long)iov[i].iov_base; - unsigned long len = iov[i].iov_len; - unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - unsigned long start = uaddr >> PAGE_SHIFT; - const int local_nr_pages = end - start; - const int page_limit = cur_page + local_nr_pages; - - ret = get_user_pages_fast(uaddr, local_nr_pages, - write_to_vm, &pages[cur_page]); - if (ret < local_nr_pages) { - ret = -EFAULT; - goto out_unmap; - } - - offset = uaddr & ~PAGE_MASK; - for (j = cur_page; j < page_limit; j++) { - unsigned int bytes = PAGE_SIZE - offset; - - if (len <= 0) - break; - - if (bytes > len) - bytes = len; - - /* - * sorry... - */ - if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < - bytes) - break; - - len -= bytes; - offset = 0; - } - - cur_page = j; - /* - * release the pages we didn't map into the bio, if any - */ - while (j < page_limit) - page_cache_release(pages[j++]); - } - - kfree(pages); - - /* - * set data direction, and check if mapped pages need bouncing - */ - if (!write_to_vm) - bio->bi_rw |= REQ_WRITE; - - bio->bi_bdev = bdev; - bio->bi_flags |= (1 << BIO_USER_MAPPED); - return bio; - - out_unmap: - for (i = 0; i < nr_pages; i++) { - if(!pages[i]) - break; - page_cache_release(pages[i]); - } - out: - kfree(pages); - bio_put(bio); - return ERR_PTR(ret); -} - -/** - * bio_map_user - map user address into bio - * @q: the struct request_queue for the bio - * @bdev: destination block device - * @uaddr: start of user address - * @len: length in bytes - * @write_to_vm: bool indicating writing to pages or not - * @gfp_mask: memory allocation flags - * - * Map the user space address into a bio suitable for io to a block - * device. Returns an error pointer in case of error. - */ -struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, - unsigned long uaddr, unsigned int len, int write_to_vm, - gfp_t gfp_mask) -{ - struct sg_iovec iov; - - iov.iov_base = (void __user *)uaddr; - iov.iov_len = len; - - return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); -} -EXPORT_SYMBOL(bio_map_user); - -/** - * bio_map_user_iov - map user sg_iovec table into bio - * @q: the struct request_queue for the bio - * @bdev: destination block device - * @iov: the iovec. - * @iov_count: number of elements in the iovec - * @write_to_vm: bool indicating writing to pages or not - * @gfp_mask: memory allocation flags - * - * Map the user space address into a bio suitable for io to a block - * device. Returns an error pointer in case of error. - */ -struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, - const struct sg_iovec *iov, int iov_count, - int write_to_vm, gfp_t gfp_mask) -{ - struct bio *bio; - - bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm, - gfp_mask); - if (IS_ERR(bio)) - return bio; - - /* - * subtle -- if __bio_map_user() ended up bouncing a bio, - * it would normally disappear when its bi_end_io is run. - * however, we need it for the unmap, so grab an extra - * reference to it - */ - bio_get(bio); - - return bio; -} - -static void __bio_unmap_user(struct bio *bio) -{ - struct bio_vec *bvec; - int i; - - /* - * make sure we dirty pages we wrote to - */ - bio_for_each_segment_all(bvec, bio, i) { - if (bio_data_dir(bio) == READ) - set_page_dirty_lock(bvec->bv_page); - - page_cache_release(bvec->bv_page); - } - - bio_put(bio); -} - -/** - * bio_unmap_user - unmap a bio - * @bio: the bio being unmapped - * - * Unmap a bio previously mapped by bio_map_user(). Must be called with - * a process context. - * - * bio_unmap_user() may sleep. - */ -void bio_unmap_user(struct bio *bio) -{ - __bio_unmap_user(bio); - bio_put(bio); -} -EXPORT_SYMBOL(bio_unmap_user); - -static void bio_map_kern_endio(struct bio *bio, int err) -{ - bio_put(bio); -} - -static struct bio *__bio_map_kern(struct request_queue *q, void *data, - unsigned int len, gfp_t gfp_mask) -{ - unsigned long kaddr = (unsigned long)data; - unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; - unsigned long start = kaddr >> PAGE_SHIFT; - const int nr_pages = end - start; - int offset, i; - struct bio *bio; - - bio = bio_kmalloc(gfp_mask, nr_pages); - if (!bio) - return ERR_PTR(-ENOMEM); - - offset = offset_in_page(kaddr); - for (i = 0; i < nr_pages; i++) { - unsigned int bytes = PAGE_SIZE - offset; - - if (len <= 0) - break; - - if (bytes > len) - bytes = len; - - if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, - offset) < bytes) - break; - - data += bytes; - len -= bytes; - offset = 0; - } - - bio->bi_end_io = bio_map_kern_endio; - return bio; -} - -/** - * bio_map_kern - map kernel address into bio - * @q: the struct request_queue for the bio - * @data: pointer to buffer to map - * @len: length in bytes - * @gfp_mask: allocation flags for bio allocation - * - * Map the kernel address into a bio suitable for io to a block - * device. Returns an error pointer in case of error. - */ -struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, - gfp_t gfp_mask) -{ - struct bio *bio; - - bio = __bio_map_kern(q, data, len, gfp_mask); - if (IS_ERR(bio)) - return bio; - - if (bio->bi_iter.bi_size == len) - return bio; - - /* - * Don't support partial mappings. - */ - bio_put(bio); - return ERR_PTR(-EINVAL); -} -EXPORT_SYMBOL(bio_map_kern); - -static void bio_copy_kern_endio(struct bio *bio, int err) -{ - struct bio_vec *bvec; - const int read = bio_data_dir(bio) == READ; - struct bio_map_data *bmd = bio->bi_private; - int i; - char *p = bmd->sgvecs[0].iov_base; - - bio_for_each_segment_all(bvec, bio, i) { - char *addr = page_address(bvec->bv_page); - - if (read) - memcpy(p, addr, bvec->bv_len); - - __free_page(bvec->bv_page); - p += bvec->bv_len; - } - - kfree(bmd); - bio_put(bio); -} - -/** - * bio_copy_kern - copy kernel address into bio - * @q: the struct request_queue for the bio - * @data: pointer to buffer to copy - * @len: length in bytes - * @gfp_mask: allocation flags for bio and page allocation - * @reading: data direction is READ - * - * copy the kernel address into a bio suitable for io to a block - * device. Returns an error pointer in case of error. - */ -struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, - gfp_t gfp_mask, int reading) -{ - struct bio *bio; - struct bio_vec *bvec; - int i; - - bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask); - if (IS_ERR(bio)) - return bio; - - if (!reading) { - void *p = data; - - bio_for_each_segment_all(bvec, bio, i) { - char *addr = page_address(bvec->bv_page); - - memcpy(addr, p, bvec->bv_len); - p += bvec->bv_len; - } - } - - bio->bi_end_io = bio_copy_kern_endio; - - return bio; -} -EXPORT_SYMBOL(bio_copy_kern); - -/* - * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions - * for performing direct-IO in BIOs. - * - * The problem is that we cannot run set_page_dirty() from interrupt context - * because the required locks are not interrupt-safe. So what we can do is to - * mark the pages dirty _before_ performing IO. And in interrupt context, - * check that the pages are still dirty. If so, fine. If not, redirty them - * in process context. - * - * We special-case compound pages here: normally this means reads into hugetlb - * pages. The logic in here doesn't really work right for compound pages - * because the VM does not uniformly chase down the head page in all cases. - * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't - * handle them at all. So we skip compound pages here at an early stage. - * - * Note that this code is very hard to test under normal circumstances because - * direct-io pins the pages with get_user_pages(). This makes - * is_page_cache_freeable return false, and the VM will not clean the pages. - * But other code (eg, flusher threads) could clean the pages if they are mapped - * pagecache. - * - * Simply disabling the call to bio_set_pages_dirty() is a good way to test the - * deferred bio dirtying paths. - */ - -/* - * bio_set_pages_dirty() will mark all the bio's pages as dirty. - */ -void bio_set_pages_dirty(struct bio *bio) -{ - struct bio_vec *bvec; - int i; - - bio_for_each_segment_all(bvec, bio, i) { - struct page *page = bvec->bv_page; - - if (page && !PageCompound(page)) - set_page_dirty_lock(page); - } -} - -static void bio_release_pages(struct bio *bio) -{ - struct bio_vec *bvec; - int i; - - bio_for_each_segment_all(bvec, bio, i) { - struct page *page = bvec->bv_page; - - if (page) - put_page(page); - } -} - -/* - * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. - * If they are, then fine. If, however, some pages are clean then they must - * have been written out during the direct-IO read. So we take another ref on - * the BIO and the offending pages and re-dirty the pages in process context. - * - * It is expected that bio_check_pages_dirty() will wholly own the BIO from - * here on. It will run one page_cache_release() against each page and will - * run one bio_put() against the BIO. - */ - -static void bio_dirty_fn(struct work_struct *work); - -static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); -static DEFINE_SPINLOCK(bio_dirty_lock); -static struct bio *bio_dirty_list; - -/* - * This runs in process context - */ -static void bio_dirty_fn(struct work_struct *work) -{ - unsigned long flags; - struct bio *bio; - - spin_lock_irqsave(&bio_dirty_lock, flags); - bio = bio_dirty_list; - bio_dirty_list = NULL; - spin_unlock_irqrestore(&bio_dirty_lock, flags); - - while (bio) { - struct bio *next = bio->bi_private; - - bio_set_pages_dirty(bio); - bio_release_pages(bio); - bio_put(bio); - bio = next; - } -} - -void bio_check_pages_dirty(struct bio *bio) -{ - struct bio_vec *bvec; - int nr_clean_pages = 0; - int i; - - bio_for_each_segment_all(bvec, bio, i) { - struct page *page = bvec->bv_page; - - if (PageDirty(page) || PageCompound(page)) { - page_cache_release(page); - bvec->bv_page = NULL; - } else { - nr_clean_pages++; - } - } - - if (nr_clean_pages) { - unsigned long flags; - - spin_lock_irqsave(&bio_dirty_lock, flags); - bio->bi_private = bio_dirty_list; - bio_dirty_list = bio; - spin_unlock_irqrestore(&bio_dirty_lock, flags); - schedule_work(&bio_dirty_work); - } else { - bio_put(bio); - } -} - -#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -void bio_flush_dcache_pages(struct bio *bi) -{ - struct bio_vec bvec; - struct bvec_iter iter; - - bio_for_each_segment(bvec, bi, iter) - flush_dcache_page(bvec.bv_page); -} -EXPORT_SYMBOL(bio_flush_dcache_pages); -#endif - -/** - * bio_endio - end I/O on a bio - * @bio: bio - * @error: error, if any - * - * Description: - * bio_endio() will end I/O on the whole bio. bio_endio() is the - * preferred way to end I/O on a bio, it takes care of clearing - * BIO_UPTODATE on error. @error is 0 on success, and and one of the - * established -Exxxx (-EIO, for instance) error values in case - * something went wrong. No one should call bi_end_io() directly on a - * bio unless they own it and thus know that it has an end_io - * function. - **/ -void bio_endio(struct bio *bio, int error) -{ - while (bio) { - BUG_ON(atomic_read(&bio->bi_remaining) <= 0); - - if (error) - clear_bit(BIO_UPTODATE, &bio->bi_flags); - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - error = -EIO; - - if (!atomic_dec_and_test(&bio->bi_remaining)) - return; - - /* - * Need to have a real endio function for chained bios, - * otherwise various corner cases will break (like stacking - * block devices that save/restore bi_end_io) - however, we want - * to avoid unbounded recursion and blowing the stack. Tail call - * optimization would handle this, but compiling with frame - * pointers also disables gcc's sibling call optimization. - */ - if (bio->bi_end_io == bio_chain_endio) { - struct bio *parent = bio->bi_private; - bio_put(bio); - bio = parent; - } else { - if (bio->bi_end_io) - bio->bi_end_io(bio, error); - bio = NULL; - } - } -} -EXPORT_SYMBOL(bio_endio); - -/** - * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining - * @bio: bio - * @error: error, if any - * - * For code that has saved and restored bi_end_io; thing hard before using this - * function, probably you should've cloned the entire bio. - **/ -void bio_endio_nodec(struct bio *bio, int error) -{ - atomic_inc(&bio->bi_remaining); - bio_endio(bio, error); -} -EXPORT_SYMBOL(bio_endio_nodec); - -/** - * bio_split - split a bio - * @bio: bio to split - * @sectors: number of sectors to split from the front of @bio - * @gfp: gfp mask - * @bs: bio set to allocate from - * - * Allocates and returns a new bio which represents @sectors from the start of - * @bio, and updates @bio to represent the remaining sectors. - * - * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's - * responsibility to ensure that @bio is not freed before the split. - */ -struct bio *bio_split(struct bio *bio, int sectors, - gfp_t gfp, struct bio_set *bs) -{ - struct bio *split = NULL; - - BUG_ON(sectors <= 0); - BUG_ON(sectors >= bio_sectors(bio)); - - split = bio_clone_fast(bio, gfp, bs); - if (!split) - return NULL; - - split->bi_iter.bi_size = sectors << 9; - - if (bio_integrity(split)) - bio_integrity_trim(split, 0, sectors); - - bio_advance(bio, split->bi_iter.bi_size); - - return split; -} -EXPORT_SYMBOL(bio_split); - -/** - * bio_trim - trim a bio - * @bio: bio to trim - * @offset: number of sectors to trim from the front of @bio - * @size: size we want to trim @bio to, in sectors - */ -void bio_trim(struct bio *bio, int offset, int size) -{ - /* 'bio' is a cloned bio which we need to trim to match - * the given offset and size. - */ - - size <<= 9; - if (offset == 0 && size == bio->bi_iter.bi_size) - return; - - clear_bit(BIO_SEG_VALID, &bio->bi_flags); - - bio_advance(bio, offset << 9); - - bio->bi_iter.bi_size = size; -} -EXPORT_SYMBOL_GPL(bio_trim); - -/* - * create memory pools for biovec's in a bio_set. - * use the global biovec slabs created for general use. - */ -mempool_t *biovec_create_pool(int pool_entries) -{ - struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; - - return mempool_create_slab_pool(pool_entries, bp->slab); -} - -void bioset_free(struct bio_set *bs) -{ - if (bs->rescue_workqueue) - destroy_workqueue(bs->rescue_workqueue); - - if (bs->bio_pool) - mempool_destroy(bs->bio_pool); - - if (bs->bvec_pool) - mempool_destroy(bs->bvec_pool); - - bioset_integrity_free(bs); - bio_put_slab(bs); - - kfree(bs); -} -EXPORT_SYMBOL(bioset_free); - -/** - * bioset_create - Create a bio_set - * @pool_size: Number of bio and bio_vecs to cache in the mempool - * @front_pad: Number of bytes to allocate in front of the returned bio - * - * Description: - * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller - * to ask for a number of bytes to be allocated in front of the bio. - * Front pad allocation is useful for embedding the bio inside - * another structure, to avoid allocating extra data to go with the bio. - * Note that the bio must be embedded at the END of that structure always, - * or things will break badly. - */ -struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) -{ - unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); - struct bio_set *bs; - - bs = kzalloc(sizeof(*bs), GFP_KERNEL); - if (!bs) - return NULL; - - bs->front_pad = front_pad; - - spin_lock_init(&bs->rescue_lock); - bio_list_init(&bs->rescue_list); - INIT_WORK(&bs->rescue_work, bio_alloc_rescue); - - bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); - if (!bs->bio_slab) { - kfree(bs); - return NULL; - } - - bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); - if (!bs->bio_pool) - goto bad; - - bs->bvec_pool = biovec_create_pool(pool_size); - if (!bs->bvec_pool) - goto bad; - - bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); - if (!bs->rescue_workqueue) - goto bad; - - return bs; -bad: - bioset_free(bs); - return NULL; -} -EXPORT_SYMBOL(bioset_create); - -#ifdef CONFIG_BLK_CGROUP -/** - * bio_associate_current - associate a bio with %current - * @bio: target bio - * - * Associate @bio with %current if it hasn't been associated yet. Block - * layer will treat @bio as if it were issued by %current no matter which - * task actually issues it. - * - * This function takes an extra reference of @task's io_context and blkcg - * which will be put when @bio is released. The caller must own @bio, - * ensure %current->io_context exists, and is responsible for synchronizing - * calls to this function. - */ -int bio_associate_current(struct bio *bio) -{ - struct io_context *ioc; - struct cgroup_subsys_state *css; - - if (bio->bi_ioc) - return -EBUSY; - - ioc = current->io_context; - if (!ioc) - return -ENOENT; - - /* acquire active ref on @ioc and associate */ - get_io_context_active(ioc); - bio->bi_ioc = ioc; - - /* associate blkcg if exists */ - rcu_read_lock(); - css = task_css(current, blkio_cgrp_id); - if (css && css_tryget(css)) - bio->bi_css = css; - rcu_read_unlock(); - - return 0; -} - -/** - * bio_disassociate_task - undo bio_associate_current() - * @bio: target bio - */ -void bio_disassociate_task(struct bio *bio) -{ - if (bio->bi_ioc) { - put_io_context(bio->bi_ioc); - bio->bi_ioc = NULL; - } - if (bio->bi_css) { - css_put(bio->bi_css); - bio->bi_css = NULL; - } -} - -#endif /* CONFIG_BLK_CGROUP */ - -static void __init biovec_init_slabs(void) -{ - int i; - - for (i = 0; i < BIOVEC_NR_POOLS; i++) { - int size; - struct biovec_slab *bvs = bvec_slabs + i; - - if (bvs->nr_vecs <= BIO_INLINE_VECS) { - bvs->slab = NULL; - continue; - } - - size = bvs->nr_vecs * sizeof(struct bio_vec); - bvs->slab = kmem_cache_create(bvs->name, size, 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); - } -} - -static int __init init_bio(void) -{ - bio_slab_max = 2; - bio_slab_nr = 0; - bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); - if (!bio_slabs) - panic("bio: can't allocate bios\n"); - - bio_integrity_init(); - biovec_init_slabs(); - - fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); - if (!fs_bio_set) - panic("bio: can't allocate bios\n"); - - if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) - panic("bio: can't create integrity pool\n"); - - return 0; -} -subsys_initcall(init_bio); -- cgit v1.2.3-59-g8ed1b From 2667bcbbd5ed71f29b78ba69f059dbc450e07faf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 19 May 2014 11:02:18 -0600 Subject: block: move ioprio.c from fs/ to block/ Like commit f9c78b2b, move this block related file outside of fs/ and into the core block directory, block/. Signed-off-by: Jens Axboe --- block/Makefile | 3 +- block/ioprio.c | 241 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/Makefile | 2 +- fs/ioprio.c | 241 --------------------------------------------------------- 4 files changed, 244 insertions(+), 243 deletions(-) create mode 100644 block/ioprio.c delete mode 100644 fs/ioprio.c (limited to 'block') diff --git a/block/Makefile b/block/Makefile index 506a0c570be2..b4c4d3b99a2d 100644 --- a/block/Makefile +++ b/block/Makefile @@ -7,7 +7,8 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \ blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ - genhd.o scsi_ioctl.o partition-generic.o partitions/ + genhd.o scsi_ioctl.o partition-generic.o ioprio.o \ + partitions/ obj-$(CONFIG_BLK_DEV_BSG) += bsg.o obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o diff --git a/block/ioprio.c b/block/ioprio.c new file mode 100644 index 000000000000..e50170ca7c33 --- /dev/null +++ b/block/ioprio.c @@ -0,0 +1,241 @@ +/* + * fs/ioprio.c + * + * Copyright (C) 2004 Jens Axboe + * + * Helper functions for setting/querying io priorities of processes. The + * system calls closely mimmick getpriority/setpriority, see the man page for + * those. The prio argument is a composite of prio class and prio data, where + * the data argument has meaning within that class. The standard scheduling + * classes have 8 distinct prio levels, with 0 being the highest prio and 7 + * being the lowest. + * + * IOW, setting BE scheduling class with prio 2 is done ala: + * + * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2; + * + * ioprio_set(PRIO_PROCESS, pid, prio); + * + * See also Documentation/block/ioprio.txt + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int set_task_ioprio(struct task_struct *task, int ioprio) +{ + int err; + struct io_context *ioc; + const struct cred *cred = current_cred(), *tcred; + + rcu_read_lock(); + tcred = __task_cred(task); + if (!uid_eq(tcred->uid, cred->euid) && + !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + return -EPERM; + } + rcu_read_unlock(); + + err = security_task_setioprio(task, ioprio); + if (err) + return err; + + ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); + if (ioc) { + ioc->ioprio = ioprio; + put_io_context(ioc); + } + + return err; +} +EXPORT_SYMBOL_GPL(set_task_ioprio); + +SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) +{ + int class = IOPRIO_PRIO_CLASS(ioprio); + int data = IOPRIO_PRIO_DATA(ioprio); + struct task_struct *p, *g; + struct user_struct *user; + struct pid *pgrp; + kuid_t uid; + int ret; + + switch (class) { + case IOPRIO_CLASS_RT: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + /* fall through, rt has prio field too */ + case IOPRIO_CLASS_BE: + if (data >= IOPRIO_BE_NR || data < 0) + return -EINVAL; + + break; + case IOPRIO_CLASS_IDLE: + break; + case IOPRIO_CLASS_NONE: + if (data) + return -EINVAL; + break; + default: + return -EINVAL; + } + + ret = -ESRCH; + rcu_read_lock(); + switch (which) { + case IOPRIO_WHO_PROCESS: + if (!who) + p = current; + else + p = find_task_by_vpid(who); + if (p) + ret = set_task_ioprio(p, ioprio); + break; + case IOPRIO_WHO_PGRP: + if (!who) + pgrp = task_pgrp(current); + else + pgrp = find_vpid(who); + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + ret = set_task_ioprio(p, ioprio); + if (ret) + break; + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); + break; + case IOPRIO_WHO_USER: + uid = make_kuid(current_user_ns(), who); + if (!uid_valid(uid)) + break; + if (!who) + user = current_user(); + else + user = find_user(uid); + + if (!user) + break; + + do_each_thread(g, p) { + if (!uid_eq(task_uid(p), uid)) + continue; + ret = set_task_ioprio(p, ioprio); + if (ret) + goto free_uid; + } while_each_thread(g, p); +free_uid: + if (who) + free_uid(user); + break; + default: + ret = -EINVAL; + } + + rcu_read_unlock(); + return ret; +} + +static int get_task_ioprio(struct task_struct *p) +{ + int ret; + + ret = security_task_getioprio(p); + if (ret) + goto out; + ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); + if (p->io_context) + ret = p->io_context->ioprio; +out: + return ret; +} + +int ioprio_best(unsigned short aprio, unsigned short bprio) +{ + unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); + unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); + + if (aclass == IOPRIO_CLASS_NONE) + aclass = IOPRIO_CLASS_BE; + if (bclass == IOPRIO_CLASS_NONE) + bclass = IOPRIO_CLASS_BE; + + if (aclass == bclass) + return min(aprio, bprio); + if (aclass > bclass) + return bprio; + else + return aprio; +} + +SYSCALL_DEFINE2(ioprio_get, int, which, int, who) +{ + struct task_struct *g, *p; + struct user_struct *user; + struct pid *pgrp; + kuid_t uid; + int ret = -ESRCH; + int tmpio; + + rcu_read_lock(); + switch (which) { + case IOPRIO_WHO_PROCESS: + if (!who) + p = current; + else + p = find_task_by_vpid(who); + if (p) + ret = get_task_ioprio(p); + break; + case IOPRIO_WHO_PGRP: + if (!who) + pgrp = task_pgrp(current); + else + pgrp = find_vpid(who); + do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { + tmpio = get_task_ioprio(p); + if (tmpio < 0) + continue; + if (ret == -ESRCH) + ret = tmpio; + else + ret = ioprio_best(ret, tmpio); + } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); + break; + case IOPRIO_WHO_USER: + uid = make_kuid(current_user_ns(), who); + if (!who) + user = current_user(); + else + user = find_user(uid); + + if (!user) + break; + + do_each_thread(g, p) { + if (!uid_eq(task_uid(p), user->uid)) + continue; + tmpio = get_task_ioprio(p); + if (tmpio < 0) + continue; + if (ret == -ESRCH) + ret = tmpio; + else + ret = ioprio_best(ret, tmpio); + } while_each_thread(g, p); + + if (who) + free_uid(user); + break; + default: + ret = -EINVAL; + } + + rcu_read_unlock(); + return ret; +} diff --git a/fs/Makefile b/fs/Makefile index 1ed9eab5e0a9..4030cbfbc9af 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -14,7 +14,7 @@ obj-y := open.o read_write.o file_table.o super.o \ stack.o fs_struct.o statfs.o ifeq ($(CONFIG_BLOCK),y) -obj-y += buffer.o block_dev.o direct-io.o mpage.o ioprio.o +obj-y += buffer.o block_dev.o direct-io.o mpage.o else obj-y += no-block.o endif diff --git a/fs/ioprio.c b/fs/ioprio.c deleted file mode 100644 index e50170ca7c33..000000000000 --- a/fs/ioprio.c +++ /dev/null @@ -1,241 +0,0 @@ -/* - * fs/ioprio.c - * - * Copyright (C) 2004 Jens Axboe - * - * Helper functions for setting/querying io priorities of processes. The - * system calls closely mimmick getpriority/setpriority, see the man page for - * those. The prio argument is a composite of prio class and prio data, where - * the data argument has meaning within that class. The standard scheduling - * classes have 8 distinct prio levels, with 0 being the highest prio and 7 - * being the lowest. - * - * IOW, setting BE scheduling class with prio 2 is done ala: - * - * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2; - * - * ioprio_set(PRIO_PROCESS, pid, prio); - * - * See also Documentation/block/ioprio.txt - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -int set_task_ioprio(struct task_struct *task, int ioprio) -{ - int err; - struct io_context *ioc; - const struct cred *cred = current_cred(), *tcred; - - rcu_read_lock(); - tcred = __task_cred(task); - if (!uid_eq(tcred->uid, cred->euid) && - !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) { - rcu_read_unlock(); - return -EPERM; - } - rcu_read_unlock(); - - err = security_task_setioprio(task, ioprio); - if (err) - return err; - - ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); - if (ioc) { - ioc->ioprio = ioprio; - put_io_context(ioc); - } - - return err; -} -EXPORT_SYMBOL_GPL(set_task_ioprio); - -SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) -{ - int class = IOPRIO_PRIO_CLASS(ioprio); - int data = IOPRIO_PRIO_DATA(ioprio); - struct task_struct *p, *g; - struct user_struct *user; - struct pid *pgrp; - kuid_t uid; - int ret; - - switch (class) { - case IOPRIO_CLASS_RT: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - /* fall through, rt has prio field too */ - case IOPRIO_CLASS_BE: - if (data >= IOPRIO_BE_NR || data < 0) - return -EINVAL; - - break; - case IOPRIO_CLASS_IDLE: - break; - case IOPRIO_CLASS_NONE: - if (data) - return -EINVAL; - break; - default: - return -EINVAL; - } - - ret = -ESRCH; - rcu_read_lock(); - switch (which) { - case IOPRIO_WHO_PROCESS: - if (!who) - p = current; - else - p = find_task_by_vpid(who); - if (p) - ret = set_task_ioprio(p, ioprio); - break; - case IOPRIO_WHO_PGRP: - if (!who) - pgrp = task_pgrp(current); - else - pgrp = find_vpid(who); - do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { - ret = set_task_ioprio(p, ioprio); - if (ret) - break; - } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); - break; - case IOPRIO_WHO_USER: - uid = make_kuid(current_user_ns(), who); - if (!uid_valid(uid)) - break; - if (!who) - user = current_user(); - else - user = find_user(uid); - - if (!user) - break; - - do_each_thread(g, p) { - if (!uid_eq(task_uid(p), uid)) - continue; - ret = set_task_ioprio(p, ioprio); - if (ret) - goto free_uid; - } while_each_thread(g, p); -free_uid: - if (who) - free_uid(user); - break; - default: - ret = -EINVAL; - } - - rcu_read_unlock(); - return ret; -} - -static int get_task_ioprio(struct task_struct *p) -{ - int ret; - - ret = security_task_getioprio(p); - if (ret) - goto out; - ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); - if (p->io_context) - ret = p->io_context->ioprio; -out: - return ret; -} - -int ioprio_best(unsigned short aprio, unsigned short bprio) -{ - unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); - unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); - - if (aclass == IOPRIO_CLASS_NONE) - aclass = IOPRIO_CLASS_BE; - if (bclass == IOPRIO_CLASS_NONE) - bclass = IOPRIO_CLASS_BE; - - if (aclass == bclass) - return min(aprio, bprio); - if (aclass > bclass) - return bprio; - else - return aprio; -} - -SYSCALL_DEFINE2(ioprio_get, int, which, int, who) -{ - struct task_struct *g, *p; - struct user_struct *user; - struct pid *pgrp; - kuid_t uid; - int ret = -ESRCH; - int tmpio; - - rcu_read_lock(); - switch (which) { - case IOPRIO_WHO_PROCESS: - if (!who) - p = current; - else - p = find_task_by_vpid(who); - if (p) - ret = get_task_ioprio(p); - break; - case IOPRIO_WHO_PGRP: - if (!who) - pgrp = task_pgrp(current); - else - pgrp = find_vpid(who); - do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { - tmpio = get_task_ioprio(p); - if (tmpio < 0) - continue; - if (ret == -ESRCH) - ret = tmpio; - else - ret = ioprio_best(ret, tmpio); - } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); - break; - case IOPRIO_WHO_USER: - uid = make_kuid(current_user_ns(), who); - if (!who) - user = current_user(); - else - user = find_user(uid); - - if (!user) - break; - - do_each_thread(g, p) { - if (!uid_eq(task_uid(p), user->uid)) - continue; - tmpio = get_task_ioprio(p); - if (tmpio < 0) - continue; - if (ret == -ESRCH) - ret = tmpio; - else - ret = ioprio_best(ret, tmpio); - } while_each_thread(g, p); - - if (who) - free_uid(user); - break; - default: - ret = -EINVAL; - } - - rcu_read_unlock(); - return ret; -} -- cgit v1.2.3-59-g8ed1b From e93ecf602beb8439f0bdcc1fa2cbc1f31fdfb8e2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 19 May 2014 09:17:48 -0600 Subject: blk-mq: move the cache friendly bitmap type of out blk-mq-tag We will use it for the pending list in blk-mq core as well. Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 10 +++++----- block/blk-mq-tag.h | 9 +++------ block/blk-mq.h | 9 +++++++++ 3 files changed, 17 insertions(+), 11 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 8d526a3e02f6..03ce6a11ba79 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -21,7 +21,7 @@ static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) int i; for (i = 0; i < bt->map_nr; i++) { - struct blk_mq_bitmap *bm = &bt->map[i]; + struct blk_align_bitmap *bm = &bt->map[i]; int ret; ret = find_first_zero_bit(&bm->word, bm->depth); @@ -40,7 +40,7 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags) return bt_has_free_tags(&tags->bitmap_tags); } -static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag) +static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag) { int tag, org_last_tag, end; @@ -283,7 +283,7 @@ static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, int i; for (i = 0; i < bt->map_nr; i++) { - struct blk_mq_bitmap *bm = &bt->map[i]; + struct blk_align_bitmap *bm = &bt->map[i]; int bit = 0; do { @@ -323,7 +323,7 @@ static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) unsigned int i, used; for (i = 0, used = 0; i < bt->map_nr; i++) { - struct blk_mq_bitmap *bm = &bt->map[i]; + struct blk_align_bitmap *bm = &bt->map[i]; used += bitmap_weight(&bm->word, bm->depth); } @@ -361,7 +361,7 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, } nr = ALIGN(depth, tags_per_word) / tags_per_word; - bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap), + bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap), GFP_KERNEL, node); if (!bt->map) return -ENOMEM; diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 7aa9f0665489..9014269f3910 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -1,6 +1,8 @@ #ifndef INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H +#include "blk-mq.h" + enum { BT_WAIT_QUEUES = 8, BT_WAIT_BATCH = 8, @@ -14,18 +16,13 @@ struct bt_wait_state { #define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word) #define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1)) -struct blk_mq_bitmap { - unsigned long word; - unsigned long depth; -} ____cacheline_aligned_in_smp; - struct blk_mq_bitmap_tags { unsigned int depth; unsigned int wake_cnt; unsigned int bits_per_word; unsigned int map_nr; - struct blk_mq_bitmap *map; + struct blk_align_bitmap *map; unsigned int wake_index; struct bt_wait_state *bs; diff --git a/block/blk-mq.h b/block/blk-mq.h index 97cfab9c092f..5e5a378962b7 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -52,4 +52,13 @@ void blk_mq_disable_hotplug(void); extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); +/* + * Basic implementation of sparser bitmap, allowing the user to spread + * the bits over more cachelines. + */ +struct blk_align_bitmap { + unsigned long word; + unsigned long depth; +} ____cacheline_aligned_in_smp; + #endif -- cgit v1.2.3-59-g8ed1b From 1429d7c9467e1e3de0b0ff91d7e4d67c1a92f8a3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 19 May 2014 09:23:55 -0600 Subject: blk-mq: switch ctx pending map to the sparser blk_align_bitmap Each hardware queue has a bitmap of software queues with pending requests. When new IO is queued on a software queue, the bit is set, and when IO is pruned on a hardware queue run, the bit is cleared. This causes a lot of traffic. Switch this from the regular BITS_PER_LONG bitmap to a sparser layout, similarly to what was done for blk-mq tagging. 20% performance increase was observed for single threaded IO, and about 15% performanc increase on multiple threads driving the same device. Signed-off-by: Jens Axboe --- block/blk-mq.c | 119 +++++++++++++++++++++++++++++++++++++------------ include/linux/blk-mq.h | 10 ++++- 2 files changed, 99 insertions(+), 30 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 526feee31bff..e862c4408427 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -56,21 +56,40 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { unsigned int i; - for (i = 0; i < hctx->nr_ctx_map; i++) - if (hctx->ctx_map[i]) + for (i = 0; i < hctx->ctx_map.map_size; i++) + if (hctx->ctx_map.map[i].word) return true; return false; } +static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) +{ + return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; +} + +#define CTX_TO_BIT(hctx, ctx) \ + ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1)) + /* * Mark this ctx as having pending work in this hardware queue */ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { - if (!test_bit(ctx->index_hw, hctx->ctx_map)) - set_bit(ctx->index_hw, hctx->ctx_map); + struct blk_align_bitmap *bm = get_bm(hctx, ctx); + + if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word)) + set_bit(CTX_TO_BIT(hctx, ctx), &bm->word); +} + +static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) +{ + struct blk_align_bitmap *bm = get_bm(hctx, ctx); + + clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); } static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, @@ -614,6 +633,40 @@ static bool blk_mq_attempt_merge(struct request_queue *q, return false; } +/* + * Process software queues that have been marked busy, splicing them + * to the for-dispatch + */ +static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) +{ + struct blk_mq_ctx *ctx; + int i; + + for (i = 0; i < hctx->ctx_map.map_size; i++) { + struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; + unsigned int off, bit; + + if (!bm->word) + continue; + + bit = 0; + off = i * hctx->ctx_map.bits_per_word; + do { + bit = find_next_bit(&bm->word, bm->depth, bit); + if (bit >= bm->depth) + break; + + ctx = hctx->ctxs[bit + off]; + clear_bit(bit, &bm->word); + spin_lock(&ctx->lock); + list_splice_tail_init(&ctx->rq_list, list); + spin_unlock(&ctx->lock); + + bit++; + } while (1); + } +} + /* * Run this hardware queue, pulling any software queues mapped to it in. * Note that this function currently has various problems around ordering @@ -623,10 +676,9 @@ static bool blk_mq_attempt_merge(struct request_queue *q, static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; - struct blk_mq_ctx *ctx; struct request *rq; LIST_HEAD(rq_list); - int bit, queued; + int queued; WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); @@ -638,14 +690,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) /* * Touch any software queue that has pending entries. */ - for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) { - clear_bit(bit, hctx->ctx_map); - ctx = hctx->ctxs[bit]; - - spin_lock(&ctx->lock); - list_splice_tail_init(&ctx->rq_list, &rq_list); - spin_unlock(&ctx->lock); - } + flush_busy_ctxs(hctx, &rq_list); /* * If we have previous entries on our dispatch list, grab them @@ -658,14 +703,10 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) spin_unlock(&hctx->lock); } - /* - * Delete and return all entries from our dispatch list - */ - queued = 0; - /* * Now process all the entries, sending them to the driver. */ + queued = 0; while (!list_empty(&rq_list)) { int ret; @@ -1158,7 +1199,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, spin_lock(&ctx->lock); if (!list_empty(&ctx->rq_list)) { list_splice_init(&ctx->rq_list, &tmp); - clear_bit(ctx->index_hw, hctx->ctx_map); + blk_mq_hctx_clear_pending(hctx, ctx); } spin_unlock(&ctx->lock); @@ -1298,6 +1339,34 @@ fail: return NULL; } +static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap) +{ + kfree(bitmap->map); +} + +static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) +{ + unsigned int bpw = 8, total, num_maps, i; + + bitmap->bits_per_word = bpw; + + num_maps = ALIGN(nr_cpu_ids, bpw) / bpw; + bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap), + GFP_KERNEL, node); + if (!bitmap->map) + return -ENOMEM; + + bitmap->map_size = num_maps; + + total = nr_cpu_ids; + for (i = 0; i < num_maps; i++) { + bitmap->map[i].depth = min(total, bitmap->bits_per_word); + total -= bitmap->map[i].depth; + } + + return 0; +} + static int blk_mq_init_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set) { @@ -1308,7 +1377,6 @@ static int blk_mq_init_hw_queues(struct request_queue *q, * Initialize hardware queues */ queue_for_each_hw_ctx(q, hctx, i) { - unsigned int num_maps; int node; node = hctx->numa_node; @@ -1339,13 +1407,9 @@ static int blk_mq_init_hw_queues(struct request_queue *q, if (!hctx->ctxs) break; - num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG; - hctx->ctx_map = kzalloc_node(num_maps * sizeof(unsigned long), - GFP_KERNEL, node); - if (!hctx->ctx_map) + if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) break; - hctx->nr_ctx_map = num_maps; hctx->nr_ctx = 0; if (set->ops->init_hctx && @@ -1368,7 +1432,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); kfree(hctx->ctxs); - kfree(hctx->ctx_map); + blk_mq_free_bitmap(&hctx->ctx_map); } return 1; @@ -1542,7 +1606,6 @@ void blk_mq_free_queue(struct request_queue *q) int i; queue_for_each_hw_ctx(q, hctx, i) { - kfree(hctx->ctx_map); kfree(hctx->ctxs); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f83d15f6e1c1..952e558ee598 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -11,6 +11,12 @@ struct blk_mq_cpu_notifier { void (*notify)(void *data, unsigned long action, unsigned int cpu); }; +struct blk_mq_ctxmap { + unsigned int map_size; + unsigned int bits_per_word; + struct blk_align_bitmap *map; +}; + struct blk_mq_hw_ctx { struct { spinlock_t lock; @@ -31,8 +37,8 @@ struct blk_mq_hw_ctx { void *driver_data; - unsigned int nr_ctx_map; - unsigned long *ctx_map; + struct blk_mq_ctxmap ctx_map; + unsigned int nr_ctx; struct blk_mq_ctx **ctxs; -- cgit v1.2.3-59-g8ed1b From 719c555f4424b194905aa3512a754c0444f27ce8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 19 May 2014 20:01:52 -0600 Subject: block: move mm/bounce.c to block/ Continue moving some of the block files that are scattered around. bounce.c contains only code for bouncing the contents of a bio. It's block proper code, not mm code. Suggested-by: Ming Lei Signed-off-by: Jens Axboe --- block/Makefile | 1 + block/bounce.c | 287 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ mm/Makefile | 1 - mm/bounce.c | 287 --------------------------------------------------------- 4 files changed, 288 insertions(+), 288 deletions(-) create mode 100644 block/bounce.c delete mode 100644 mm/bounce.c (limited to 'block') diff --git a/block/Makefile b/block/Makefile index b4c4d3b99a2d..a2ce6ac935ec 100644 --- a/block/Makefile +++ b/block/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ genhd.o scsi_ioctl.o partition-generic.o ioprio.o \ partitions/ +obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_BLK_DEV_BSG) += bsg.o obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o diff --git a/block/bounce.c b/block/bounce.c new file mode 100644 index 000000000000..523918b8c6dc --- /dev/null +++ b/block/bounce.c @@ -0,0 +1,287 @@ +/* bounce buffer handling for block devices + * + * - Split from highmem.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define POOL_SIZE 64 +#define ISA_POOL_SIZE 16 + +static mempool_t *page_pool, *isa_page_pool; + +#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL) +static __init int init_emergency_pool(void) +{ +#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) + if (max_pfn <= max_low_pfn) + return 0; +#endif + + page_pool = mempool_create_page_pool(POOL_SIZE, 0); + BUG_ON(!page_pool); + printk("bounce pool size: %d pages\n", POOL_SIZE); + + return 0; +} + +__initcall(init_emergency_pool); +#endif + +#ifdef CONFIG_HIGHMEM +/* + * highmem version, map in to vec + */ +static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) +{ + unsigned long flags; + unsigned char *vto; + + local_irq_save(flags); + vto = kmap_atomic(to->bv_page); + memcpy(vto + to->bv_offset, vfrom, to->bv_len); + kunmap_atomic(vto); + local_irq_restore(flags); +} + +#else /* CONFIG_HIGHMEM */ + +#define bounce_copy_vec(to, vfrom) \ + memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) + +#endif /* CONFIG_HIGHMEM */ + +/* + * allocate pages in the DMA region for the ISA pool + */ +static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) +{ + return mempool_alloc_pages(gfp_mask | GFP_DMA, data); +} + +/* + * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA + * as the max address, so check if the pool has already been created. + */ +int init_emergency_isa_pool(void) +{ + if (isa_page_pool) + return 0; + + isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, + mempool_free_pages, (void *) 0); + BUG_ON(!isa_page_pool); + + printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); + return 0; +} + +/* + * Simple bounce buffer support for highmem pages. Depending on the + * queue gfp mask set, *to may or may not be a highmem page. kmap it + * always, it will do the Right Thing + */ +static void copy_to_high_bio_irq(struct bio *to, struct bio *from) +{ + unsigned char *vfrom; + struct bio_vec tovec, *fromvec = from->bi_io_vec; + struct bvec_iter iter; + + bio_for_each_segment(tovec, to, iter) { + if (tovec.bv_page != fromvec->bv_page) { + /* + * fromvec->bv_offset and fromvec->bv_len might have + * been modified by the block layer, so use the original + * copy, bounce_copy_vec already uses tovec->bv_len + */ + vfrom = page_address(fromvec->bv_page) + + tovec.bv_offset; + + bounce_copy_vec(&tovec, vfrom); + flush_dcache_page(tovec.bv_page); + } + + fromvec++; + } +} + +static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) +{ + struct bio *bio_orig = bio->bi_private; + struct bio_vec *bvec, *org_vec; + int i; + + if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) + set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); + + /* + * free up bounce indirect pages used + */ + bio_for_each_segment_all(bvec, bio, i) { + org_vec = bio_orig->bi_io_vec + i; + if (bvec->bv_page == org_vec->bv_page) + continue; + + dec_zone_page_state(bvec->bv_page, NR_BOUNCE); + mempool_free(bvec->bv_page, pool); + } + + bio_endio(bio_orig, err); + bio_put(bio); +} + +static void bounce_end_io_write(struct bio *bio, int err) +{ + bounce_end_io(bio, page_pool, err); +} + +static void bounce_end_io_write_isa(struct bio *bio, int err) +{ + + bounce_end_io(bio, isa_page_pool, err); +} + +static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) +{ + struct bio *bio_orig = bio->bi_private; + + if (test_bit(BIO_UPTODATE, &bio->bi_flags)) + copy_to_high_bio_irq(bio_orig, bio); + + bounce_end_io(bio, pool, err); +} + +static void bounce_end_io_read(struct bio *bio, int err) +{ + __bounce_end_io_read(bio, page_pool, err); +} + +static void bounce_end_io_read_isa(struct bio *bio, int err) +{ + __bounce_end_io_read(bio, isa_page_pool, err); +} + +#ifdef CONFIG_NEED_BOUNCE_POOL +static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) +{ + if (bio_data_dir(bio) != WRITE) + return 0; + + if (!bdi_cap_stable_pages_required(&q->backing_dev_info)) + return 0; + + return test_bit(BIO_SNAP_STABLE, &bio->bi_flags); +} +#else +static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) +{ + return 0; +} +#endif /* CONFIG_NEED_BOUNCE_POOL */ + +static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, + mempool_t *pool, int force) +{ + struct bio *bio; + int rw = bio_data_dir(*bio_orig); + struct bio_vec *to, from; + struct bvec_iter iter; + unsigned i; + + if (force) + goto bounce; + bio_for_each_segment(from, *bio_orig, iter) + if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) + goto bounce; + + return; +bounce: + bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); + + bio_for_each_segment_all(to, bio, i) { + struct page *page = to->bv_page; + + if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) + continue; + + inc_zone_page_state(to->bv_page, NR_BOUNCE); + to->bv_page = mempool_alloc(pool, q->bounce_gfp); + + if (rw == WRITE) { + char *vto, *vfrom; + + flush_dcache_page(page); + + vto = page_address(to->bv_page) + to->bv_offset; + vfrom = kmap_atomic(page) + to->bv_offset; + memcpy(vto, vfrom, to->bv_len); + kunmap_atomic(vfrom); + } + } + + trace_block_bio_bounce(q, *bio_orig); + + bio->bi_flags |= (1 << BIO_BOUNCED); + + if (pool == page_pool) { + bio->bi_end_io = bounce_end_io_write; + if (rw == READ) + bio->bi_end_io = bounce_end_io_read; + } else { + bio->bi_end_io = bounce_end_io_write_isa; + if (rw == READ) + bio->bi_end_io = bounce_end_io_read_isa; + } + + bio->bi_private = *bio_orig; + *bio_orig = bio; +} + +void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) +{ + int must_bounce; + mempool_t *pool; + + /* + * Data-less bio, nothing to bounce + */ + if (!bio_has_data(*bio_orig)) + return; + + must_bounce = must_snapshot_stable_pages(q, *bio_orig); + + /* + * for non-isa bounce case, just check if the bounce pfn is equal + * to or bigger than the highest pfn in the system -- in that case, + * don't waste time iterating over bio segments + */ + if (!(q->bounce_gfp & GFP_DMA)) { + if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce) + return; + pool = page_pool; + } else { + BUG_ON(!isa_page_pool); + pool = isa_page_pool; + } + + /* + * slow path + */ + __blk_queue_bounce(q, bio_orig, pool, must_bounce); +} + +EXPORT_SYMBOL(blk_queue_bounce); diff --git a/mm/Makefile b/mm/Makefile index b484452dac57..0173940407f6 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -30,7 +30,6 @@ endif obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o -obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o obj-$(CONFIG_FRONTSWAP) += frontswap.o obj-$(CONFIG_ZSWAP) += zswap.o diff --git a/mm/bounce.c b/mm/bounce.c deleted file mode 100644 index 523918b8c6dc..000000000000 --- a/mm/bounce.c +++ /dev/null @@ -1,287 +0,0 @@ -/* bounce buffer handling for block devices - * - * - Split from highmem.c - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define POOL_SIZE 64 -#define ISA_POOL_SIZE 16 - -static mempool_t *page_pool, *isa_page_pool; - -#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL) -static __init int init_emergency_pool(void) -{ -#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) - if (max_pfn <= max_low_pfn) - return 0; -#endif - - page_pool = mempool_create_page_pool(POOL_SIZE, 0); - BUG_ON(!page_pool); - printk("bounce pool size: %d pages\n", POOL_SIZE); - - return 0; -} - -__initcall(init_emergency_pool); -#endif - -#ifdef CONFIG_HIGHMEM -/* - * highmem version, map in to vec - */ -static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) -{ - unsigned long flags; - unsigned char *vto; - - local_irq_save(flags); - vto = kmap_atomic(to->bv_page); - memcpy(vto + to->bv_offset, vfrom, to->bv_len); - kunmap_atomic(vto); - local_irq_restore(flags); -} - -#else /* CONFIG_HIGHMEM */ - -#define bounce_copy_vec(to, vfrom) \ - memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) - -#endif /* CONFIG_HIGHMEM */ - -/* - * allocate pages in the DMA region for the ISA pool - */ -static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) -{ - return mempool_alloc_pages(gfp_mask | GFP_DMA, data); -} - -/* - * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA - * as the max address, so check if the pool has already been created. - */ -int init_emergency_isa_pool(void) -{ - if (isa_page_pool) - return 0; - - isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, - mempool_free_pages, (void *) 0); - BUG_ON(!isa_page_pool); - - printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); - return 0; -} - -/* - * Simple bounce buffer support for highmem pages. Depending on the - * queue gfp mask set, *to may or may not be a highmem page. kmap it - * always, it will do the Right Thing - */ -static void copy_to_high_bio_irq(struct bio *to, struct bio *from) -{ - unsigned char *vfrom; - struct bio_vec tovec, *fromvec = from->bi_io_vec; - struct bvec_iter iter; - - bio_for_each_segment(tovec, to, iter) { - if (tovec.bv_page != fromvec->bv_page) { - /* - * fromvec->bv_offset and fromvec->bv_len might have - * been modified by the block layer, so use the original - * copy, bounce_copy_vec already uses tovec->bv_len - */ - vfrom = page_address(fromvec->bv_page) + - tovec.bv_offset; - - bounce_copy_vec(&tovec, vfrom); - flush_dcache_page(tovec.bv_page); - } - - fromvec++; - } -} - -static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) -{ - struct bio *bio_orig = bio->bi_private; - struct bio_vec *bvec, *org_vec; - int i; - - if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) - set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); - - /* - * free up bounce indirect pages used - */ - bio_for_each_segment_all(bvec, bio, i) { - org_vec = bio_orig->bi_io_vec + i; - if (bvec->bv_page == org_vec->bv_page) - continue; - - dec_zone_page_state(bvec->bv_page, NR_BOUNCE); - mempool_free(bvec->bv_page, pool); - } - - bio_endio(bio_orig, err); - bio_put(bio); -} - -static void bounce_end_io_write(struct bio *bio, int err) -{ - bounce_end_io(bio, page_pool, err); -} - -static void bounce_end_io_write_isa(struct bio *bio, int err) -{ - - bounce_end_io(bio, isa_page_pool, err); -} - -static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) -{ - struct bio *bio_orig = bio->bi_private; - - if (test_bit(BIO_UPTODATE, &bio->bi_flags)) - copy_to_high_bio_irq(bio_orig, bio); - - bounce_end_io(bio, pool, err); -} - -static void bounce_end_io_read(struct bio *bio, int err) -{ - __bounce_end_io_read(bio, page_pool, err); -} - -static void bounce_end_io_read_isa(struct bio *bio, int err) -{ - __bounce_end_io_read(bio, isa_page_pool, err); -} - -#ifdef CONFIG_NEED_BOUNCE_POOL -static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) -{ - if (bio_data_dir(bio) != WRITE) - return 0; - - if (!bdi_cap_stable_pages_required(&q->backing_dev_info)) - return 0; - - return test_bit(BIO_SNAP_STABLE, &bio->bi_flags); -} -#else -static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) -{ - return 0; -} -#endif /* CONFIG_NEED_BOUNCE_POOL */ - -static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, - mempool_t *pool, int force) -{ - struct bio *bio; - int rw = bio_data_dir(*bio_orig); - struct bio_vec *to, from; - struct bvec_iter iter; - unsigned i; - - if (force) - goto bounce; - bio_for_each_segment(from, *bio_orig, iter) - if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) - goto bounce; - - return; -bounce: - bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set); - - bio_for_each_segment_all(to, bio, i) { - struct page *page = to->bv_page; - - if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) - continue; - - inc_zone_page_state(to->bv_page, NR_BOUNCE); - to->bv_page = mempool_alloc(pool, q->bounce_gfp); - - if (rw == WRITE) { - char *vto, *vfrom; - - flush_dcache_page(page); - - vto = page_address(to->bv_page) + to->bv_offset; - vfrom = kmap_atomic(page) + to->bv_offset; - memcpy(vto, vfrom, to->bv_len); - kunmap_atomic(vfrom); - } - } - - trace_block_bio_bounce(q, *bio_orig); - - bio->bi_flags |= (1 << BIO_BOUNCED); - - if (pool == page_pool) { - bio->bi_end_io = bounce_end_io_write; - if (rw == READ) - bio->bi_end_io = bounce_end_io_read; - } else { - bio->bi_end_io = bounce_end_io_write_isa; - if (rw == READ) - bio->bi_end_io = bounce_end_io_read_isa; - } - - bio->bi_private = *bio_orig; - *bio_orig = bio; -} - -void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) -{ - int must_bounce; - mempool_t *pool; - - /* - * Data-less bio, nothing to bounce - */ - if (!bio_has_data(*bio_orig)) - return; - - must_bounce = must_snapshot_stable_pages(q, *bio_orig); - - /* - * for non-isa bounce case, just check if the bounce pfn is equal - * to or bigger than the highest pfn in the system -- in that case, - * don't waste time iterating over bio segments - */ - if (!(q->bounce_gfp & GFP_DMA)) { - if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce) - return; - pool = page_pool; - } else { - BUG_ON(!isa_page_pool); - pool = isa_page_pool; - } - - /* - * slow path - */ - __blk_queue_bounce(q, bio_orig, pool, must_bounce); -} - -EXPORT_SYMBOL(blk_queue_bounce); -- cgit v1.2.3-59-g8ed1b From e3a2b3f931f59d5284abd13faf8bded726884ffd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 20 May 2014 11:49:02 -0600 Subject: blk-mq: allow changing of queue depth through sysfs For request_fn based devices, the block layer exports a 'nr_requests' file through sysfs to allow adjusting of queue depth on the fly. Currently this returns -EINVAL for blk-mq, since it's not wired up. Wire this up for blk-mq, so that it now also always dynamic adjustments of the allowed queue depth for any given block device managed by blk-mq. Signed-off-by: Jens Axboe --- block/blk-core.c | 41 ++++++++++++++++++++++++++ block/blk-mq-tag.c | 80 +++++++++++++++++++++++++++++++++++--------------- block/blk-mq-tag.h | 1 + block/blk-mq.c | 22 ++++++++++++++ block/blk-mq.h | 1 + block/blk-sysfs.c | 45 ++++++---------------------- block/blk.h | 2 ++ include/linux/blk-mq.h | 2 +- 8 files changed, 134 insertions(+), 60 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index a6bd3e702201..fe81e19099a1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -848,6 +848,47 @@ static void freed_request(struct request_list *rl, unsigned int flags) __freed_request(rl, sync ^ 1); } +int blk_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + struct request_list *rl; + + spin_lock_irq(q->queue_lock); + q->nr_requests = nr; + blk_queue_congestion_threshold(q); + + /* congestion isn't cgroup aware and follows root blkcg for now */ + rl = &q->root_rl; + + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_SYNC); + else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_SYNC); + + if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_ASYNC); + else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_ASYNC); + + blk_queue_for_each_rl(rl, q) { + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_SYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_ASYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + + spin_unlock_irq(q->queue_lock); + return 0; +} + /* * Determine if elevator data should be initialized when allocating the * request associated with @bio. diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index e6b3fbae9862..f6dea968b710 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -57,23 +57,13 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) } /* - * If a previously busy queue goes inactive, potential waiters could now - * be allowed to queue. Wake them up and check. + * Wakeup all potentially sleeping on normal (non-reserved) tags */ -void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) { - struct blk_mq_tags *tags = hctx->tags; struct blk_mq_bitmap_tags *bt; int i, wake_index; - if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) - return; - - atomic_dec(&tags->active_queues); - - /* - * Will only throttle depth on non-reserved tags - */ bt = &tags->bitmap_tags; wake_index = bt->wake_index; for (i = 0; i < BT_WAIT_QUEUES; i++) { @@ -86,6 +76,22 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) } } +/* + * If a previously busy queue goes inactive, potential waiters could now + * be allowed to queue. Wake them up and check. + */ +void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_tags *tags = hctx->tags; + + if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return; + + atomic_dec(&tags->active_queues); + + blk_mq_tag_wakeup_all(tags); +} + /* * For shared tag users, we track the number of currently active users * and attempt to provide a fair share of the tag depth for each of them. @@ -408,6 +414,28 @@ static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) return bt->depth - used; } +static void bt_update_count(struct blk_mq_bitmap_tags *bt, + unsigned int depth) +{ + unsigned int tags_per_word = 1U << bt->bits_per_word; + unsigned int map_depth = depth; + + if (depth) { + int i; + + for (i = 0; i < bt->map_nr; i++) { + bt->map[i].depth = min(map_depth, tags_per_word); + map_depth -= bt->map[i].depth; + } + } + + bt->wake_cnt = BT_WAIT_BATCH; + if (bt->wake_cnt > depth / 4) + bt->wake_cnt = max(1U, depth / 4); + + bt->depth = depth; +} + static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, int node, bool reserved) { @@ -420,7 +448,7 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, * condition. */ if (depth) { - unsigned int nr, i, map_depth, tags_per_word; + unsigned int nr, tags_per_word; tags_per_word = (1 << bt->bits_per_word); @@ -444,11 +472,6 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, return -ENOMEM; bt->map_nr = nr; - map_depth = depth; - for (i = 0; i < nr; i++) { - bt->map[i].depth = min(map_depth, tags_per_word); - map_depth -= tags_per_word; - } } bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); @@ -460,11 +483,7 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, for (i = 0; i < BT_WAIT_QUEUES; i++) init_waitqueue_head(&bt->bs[i].wait); - bt->wake_cnt = BT_WAIT_BATCH; - if (bt->wake_cnt > depth / 4) - bt->wake_cnt = max(1U, depth / 4); - - bt->depth = depth; + bt_update_count(bt, depth); return 0; } @@ -525,6 +544,21 @@ void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) *tag = prandom_u32() % depth; } +int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) +{ + tdepth -= tags->nr_reserved_tags; + if (tdepth > tags->nr_tags) + return -EINVAL; + + /* + * Don't need (or can't) update reserved tags here, they remain + * static and should never need resizing. + */ + bt_update_count(&tags->bitmap_tags, tdepth); + blk_mq_tag_wakeup_all(tags); + return 0; +} + ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index e144f68ec45f..e7ff5ceeeb97 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -55,6 +55,7 @@ extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); +extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index 0fbef7e9bef1..7b71ab1b1536 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1789,6 +1789,28 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) } EXPORT_SYMBOL(blk_mq_free_tag_set); +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + struct blk_mq_tag_set *set = q->tag_set; + struct blk_mq_hw_ctx *hctx; + int i, ret; + + if (!set || nr > set->queue_depth) + return -EINVAL; + + ret = 0; + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_tag_update_depth(hctx->tags, nr); + if (ret) + break; + } + + if (!ret) + q->nr_requests = nr; + + return ret; +} + void blk_mq_disable_hotplug(void) { mutex_lock(&all_q_mutex); diff --git a/block/blk-mq.h b/block/blk-mq.h index 5e5a378962b7..7db4fe4bd002 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -32,6 +32,7 @@ void blk_mq_drain_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); void blk_mq_clone_flush_request(struct request *flush_rq, struct request *orig_rq); +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); /* * CPU hotplug helpers diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 7500f876dae4..4d6811ac13fd 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -48,11 +48,10 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page) static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { - struct request_list *rl; unsigned long nr; - int ret; + int ret, err; - if (!q->request_fn) + if (!q->request_fn && !q->mq_ops) return -EINVAL; ret = queue_var_store(&nr, page, count); @@ -62,40 +61,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; - spin_lock_irq(q->queue_lock); - q->nr_requests = nr; - blk_queue_congestion_threshold(q); - - /* congestion isn't cgroup aware and follows root blkcg for now */ - rl = &q->root_rl; - - if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) - blk_set_queue_congested(q, BLK_RW_SYNC); - else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) - blk_clear_queue_congested(q, BLK_RW_SYNC); - - if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) - blk_set_queue_congested(q, BLK_RW_ASYNC); - else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) - blk_clear_queue_congested(q, BLK_RW_ASYNC); - - blk_queue_for_each_rl(rl, q) { - if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { - blk_set_rl_full(rl, BLK_RW_SYNC); - } else { - blk_clear_rl_full(rl, BLK_RW_SYNC); - wake_up(&rl->wait[BLK_RW_SYNC]); - } - - if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { - blk_set_rl_full(rl, BLK_RW_ASYNC); - } else { - blk_clear_rl_full(rl, BLK_RW_ASYNC); - wake_up(&rl->wait[BLK_RW_ASYNC]); - } - } + if (q->request_fn) + err = blk_update_nr_requests(q, nr); + else + err = blk_mq_update_nr_requests(q, nr); + + if (err) + return err; - spin_unlock_irq(q->queue_lock); return ret; } diff --git a/block/blk.h b/block/blk.h index 95cab70000e3..45385e9abf6f 100644 --- a/block/blk.h +++ b/block/blk.h @@ -188,6 +188,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) return q->nr_congestion_off; } +extern int blk_update_nr_requests(struct request_queue *, unsigned int); + /* * Contribute to IO statistics IFF: * diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a06ca7b5ea05..f45424453338 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -63,7 +63,7 @@ struct blk_mq_hw_ctx { struct blk_mq_tag_set { struct blk_mq_ops *ops; unsigned int nr_hw_queues; - unsigned int queue_depth; + unsigned int queue_depth; /* max hw supported */ unsigned int reserved_tags; unsigned int cmd_size; /* per-request extra data */ int numa_node; -- cgit v1.2.3-59-g8ed1b From eba7176826ddab1d04c51bb2d5f2bbf22865444c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 20 May 2014 15:17:27 -0600 Subject: blk-mq: initialize q->nr_requests after calling blk_queue_make_request() blk_queue_make_requests() overwrites our set value for q->nr_requests, turning it into the default of 128. Set this appropriately after initializing queue values in blk_queue_make_request(). Signed-off-by: Jens Axboe --- block/blk-mq.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 7b71ab1b1536..fec8fcc4f8a4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1623,6 +1623,11 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (set->timeout) blk_queue_rq_timeout(q, set->timeout); + /* + * Do this after blk_queue_make_request() overrides it... + */ + q->nr_requests = set->queue_depth; + if (set->ops->complete) blk_queue_softirq_done(q, set->ops->complete); -- cgit v1.2.3-59-g8ed1b From da41a589f52464e24ddefe76814ee35bfb07950c Mon Sep 17 00:00:00 2001 From: Robert Elliott Date: Tue, 20 May 2014 16:46:26 -0500 Subject: blk-mq: Micro-optimize blk_queue_nomerges() check In blk_mq_make_request(), do the blk_queue_nomerges() check outside the call to blk_attempt_plug_merge() to eliminate function call overhead when nomerges=2 (disabled) Signed-off-by: Robert Elliott Signed-off-by: Jens Axboe --- block/blk-core.c | 8 ++++---- block/blk-mq.c | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index fe81e19099a1..5b6f768a7c01 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1471,6 +1471,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, * added on the elevator at this point. In addition, we don't have * reliable access to the elevator outside queue lock. Only check basic * merging parameters without querying the elevator. + * + * Caller must ensure !blk_queue_nomerges(q) beforehand. */ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int *request_count) @@ -1480,9 +1482,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, bool ret = false; struct list_head *plug_list; - if (blk_queue_nomerges(q)) - goto out; - plug = current->plug; if (!plug) goto out; @@ -1561,7 +1560,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) * Check if we can merge with the plugged list before grabbing * any locks. */ - if (blk_attempt_plug_merge(q, bio, &request_count)) + if (!blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count)) return; spin_lock_irq(q->queue_lock); diff --git a/block/blk-mq.c b/block/blk-mq.c index fec8fcc4f8a4..ef7ed5e95d6d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1087,7 +1087,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) return; } - if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) + if (use_plug && !blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count)) return; if (blk_mq_queue_enter(q)) { -- cgit v1.2.3-59-g8ed1b From e814e71ba4a6e1d7509b0f4b1928365ea650cace Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 21 May 2014 13:59:08 -0600 Subject: blk-mq: allow the hctx cpu hotplug notifier to return errors Prepare this for the next patch which adds more smarts in the plugging logic, so that we can save some memory. Signed-off-by: Jens Axboe --- block/blk-mq-cpu.c | 12 ++++++++---- block/blk-mq.c | 9 +++++---- block/blk-mq.h | 2 +- include/linux/blk-mq.h | 2 +- 4 files changed, 15 insertions(+), 10 deletions(-) (limited to 'block') diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index 136ef8643bba..d2c253f71b86 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c @@ -18,14 +18,18 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, { unsigned int cpu = (unsigned long) hcpu; struct blk_mq_cpu_notifier *notify; + int ret = NOTIFY_OK; raw_spin_lock(&blk_mq_cpu_notify_lock); - list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) - notify->notify(notify->data, action, cpu); + list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) { + ret = notify->notify(notify->data, action, cpu); + if (ret != NOTIFY_OK) + break; + } raw_spin_unlock(&blk_mq_cpu_notify_lock); - return NOTIFY_OK; + return ret; } void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) @@ -45,7 +49,7 @@ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) } void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, - void (*fn)(void *, unsigned long, unsigned int), + int (*fn)(void *, unsigned long, unsigned int), void *data) { notifier->notify = fn; diff --git a/block/blk-mq.c b/block/blk-mq.c index ef7ed5e95d6d..5a3683fc5bdb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1196,8 +1196,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, } EXPORT_SYMBOL(blk_mq_free_single_hw_queue); -static void blk_mq_hctx_notify(void *data, unsigned long action, - unsigned int cpu) +static int blk_mq_hctx_notify(void *data, unsigned long action, + unsigned int cpu) { struct blk_mq_hw_ctx *hctx = data; struct request_queue *q = hctx->queue; @@ -1205,7 +1205,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, LIST_HEAD(tmp); if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) - return; + return NOTIFY_OK; /* * Move ctx entries to new CPU, if this one is going away. @@ -1220,7 +1220,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, spin_unlock(&ctx->lock); if (list_empty(&tmp)) - return; + return NOTIFY_OK; ctx = blk_mq_get_ctx(q); spin_lock(&ctx->lock); @@ -1240,6 +1240,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_run_hw_queue(hctx, true); blk_mq_put_ctx(ctx); + return NOTIFY_OK; } static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, diff --git a/block/blk-mq.h b/block/blk-mq.h index 7db4fe4bd002..491dbd4e93f5 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -39,7 +39,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); */ struct blk_mq_cpu_notifier; void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, - void (*fn)(void *, unsigned long, unsigned int), + int (*fn)(void *, unsigned long, unsigned int), void *data); void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f45424453338..4d2800567aad 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -8,7 +8,7 @@ struct blk_mq_tags; struct blk_mq_cpu_notifier { struct list_head list; void *data; - void (*notify)(void *data, unsigned long action, unsigned int cpu); + int (*notify)(void *data, unsigned long action, unsigned int cpu); }; struct blk_mq_ctxmap { -- cgit v1.2.3-59-g8ed1b From 484b4061e6683e0e6a09c7455f80781128dc8a6b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 21 May 2014 14:01:15 -0600 Subject: blk-mq: save memory by freeing requests on unused hardware queues Depending on the topology of the machine and the number of queues exposed by a device, we can end up in a situation where some of the hardware queues are unused (as in, they don't map to any software queues). For this case, free up the memory used by the request map, as we will not use it. This can be a substantial amount of memory, depending on the number of queues vs CPUs and the queue depth of the device. Signed-off-by: Jens Axboe --- block/blk-mq.c | 157 ++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 105 insertions(+), 52 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 5a3683fc5bdb..103aa1dbc000 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -597,8 +597,16 @@ static void blk_mq_rq_timer(unsigned long data) unsigned long next = 0; int i, next_set = 0; - queue_for_each_hw_ctx(q, hctx, i) + queue_for_each_hw_ctx(q, hctx, i) { + /* + * If not software queues are currently mapped to this + * hardware queue, there's nothing to check + */ + if (!hctx->nr_ctx || !hctx->tags) + continue; + blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set); + } if (next_set) { next = blk_rq_timeout(round_jiffies_up(next)); @@ -1196,53 +1204,6 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, } EXPORT_SYMBOL(blk_mq_free_single_hw_queue); -static int blk_mq_hctx_notify(void *data, unsigned long action, - unsigned int cpu) -{ - struct blk_mq_hw_ctx *hctx = data; - struct request_queue *q = hctx->queue; - struct blk_mq_ctx *ctx; - LIST_HEAD(tmp); - - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) - return NOTIFY_OK; - - /* - * Move ctx entries to new CPU, if this one is going away. - */ - ctx = __blk_mq_get_ctx(q, cpu); - - spin_lock(&ctx->lock); - if (!list_empty(&ctx->rq_list)) { - list_splice_init(&ctx->rq_list, &tmp); - blk_mq_hctx_clear_pending(hctx, ctx); - } - spin_unlock(&ctx->lock); - - if (list_empty(&tmp)) - return NOTIFY_OK; - - ctx = blk_mq_get_ctx(q); - spin_lock(&ctx->lock); - - while (!list_empty(&tmp)) { - struct request *rq; - - rq = list_first_entry(&tmp, struct request, queuelist); - rq->mq_ctx = ctx; - list_move_tail(&rq->queuelist, &ctx->rq_list); - } - - hctx = q->mq_ops->map_queue(q, ctx->cpu); - blk_mq_hctx_mark_pending(hctx, ctx); - - spin_unlock(&ctx->lock); - - blk_mq_run_hw_queue(hctx, true); - blk_mq_put_ctx(ctx); - return NOTIFY_OK; -} - static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { @@ -1384,6 +1345,77 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) return 0; } +static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) +{ + struct request_queue *q = hctx->queue; + struct blk_mq_ctx *ctx; + LIST_HEAD(tmp); + + /* + * Move ctx entries to new CPU, if this one is going away. + */ + ctx = __blk_mq_get_ctx(q, cpu); + + spin_lock(&ctx->lock); + if (!list_empty(&ctx->rq_list)) { + list_splice_init(&ctx->rq_list, &tmp); + blk_mq_hctx_clear_pending(hctx, ctx); + } + spin_unlock(&ctx->lock); + + if (list_empty(&tmp)) + return NOTIFY_OK; + + ctx = blk_mq_get_ctx(q); + spin_lock(&ctx->lock); + + while (!list_empty(&tmp)) { + struct request *rq; + + rq = list_first_entry(&tmp, struct request, queuelist); + rq->mq_ctx = ctx; + list_move_tail(&rq->queuelist, &ctx->rq_list); + } + + hctx = q->mq_ops->map_queue(q, ctx->cpu); + blk_mq_hctx_mark_pending(hctx, ctx); + + spin_unlock(&ctx->lock); + + blk_mq_run_hw_queue(hctx, true); + blk_mq_put_ctx(ctx); + return NOTIFY_OK; +} + +static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu) +{ + struct request_queue *q = hctx->queue; + struct blk_mq_tag_set *set = q->tag_set; + + if (set->tags[hctx->queue_num]) + return NOTIFY_OK; + + set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num); + if (!set->tags[hctx->queue_num]) + return NOTIFY_STOP; + + hctx->tags = set->tags[hctx->queue_num]; + return NOTIFY_OK; +} + +static int blk_mq_hctx_notify(void *data, unsigned long action, + unsigned int cpu) +{ + struct blk_mq_hw_ctx *hctx = data; + + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) + return blk_mq_hctx_cpu_offline(hctx, cpu); + else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) + return blk_mq_hctx_cpu_online(hctx, cpu); + + return NOTIFY_OK; +} + static int blk_mq_init_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set) { @@ -1513,6 +1545,24 @@ static void blk_mq_map_swqueue(struct request_queue *q) } queue_for_each_hw_ctx(q, hctx, i) { + /* + * If not software queues are mapped to this hardware queue, + * disable it and free the request entries + */ + if (!hctx->nr_ctx) { + struct blk_mq_tag_set *set = q->tag_set; + + if (set->tags[i]) { + blk_mq_free_rq_map(set, set->tags[i], i); + set->tags[i] = NULL; + hctx->tags = NULL; + } + continue; + } + + /* + * Initialize batch roundrobin counts + */ hctx->next_cpu = cpumask_first(hctx->cpumask); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } @@ -1645,14 +1695,14 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (blk_mq_init_hw_queues(q, set)) goto err_flush_rq; - blk_mq_map_swqueue(q); - mutex_lock(&all_q_mutex); list_add_tail(&q->all_q_node, &all_q_list); mutex_unlock(&all_q_mutex); blk_mq_add_queue_tag_set(set, q); + blk_mq_map_swqueue(q); + return q; err_flush_rq: @@ -1790,8 +1840,11 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i; - for (i = 0; i < set->nr_hw_queues; i++) - blk_mq_free_rq_map(set, set->tags[i], i); + for (i = 0; i < set->nr_hw_queues; i++) { + if (set->tags[i]) + blk_mq_free_rq_map(set, set->tags[i], i); + } + kfree(set->tags); } EXPORT_SYMBOL(blk_mq_free_tag_set); -- cgit v1.2.3-59-g8ed1b From 07068d5b8ed8fa6759b2826ba9197e49b69a1fc3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 22 May 2014 10:40:51 -0600 Subject: blk-mq: split make request handler for multi and single queue We want slightly different behavior from them: - On single queue devices, we currently use the per-process plug for deferred IO and for merging. - On multi queue devices, we don't use the per-process plug, but we want to go straight to hardware for SYNC IO. Split blk_mq_make_request() into a blk_sq_make_request() for single queue devices, and retain blk_mq_make_request() for multi queue devices. Then we don't need multiple checks for q->nr_hw_queues in the request mapping. Signed-off-by: Jens Axboe --- block/blk-mq.c | 207 +++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 157 insertions(+), 50 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 103aa1dbc000..54e78863c083 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1072,43 +1072,57 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) blk_account_io_start(rq, 1); } -static void blk_mq_make_request(struct request_queue *q, struct bio *bio) +static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, + struct request *rq, struct bio *bio) { - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; - const int is_sync = rw_is_sync(bio->bi_rw); - const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); - int rw = bio_data_dir(bio); - struct request *rq; - unsigned int use_plug, request_count = 0; - - /* - * If we have multiple hardware queues, just go directly to - * one of those for sync IO. - */ - use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync); + struct request_queue *q = hctx->queue; - blk_queue_bounce(q, &bio); + if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) { + blk_mq_bio_to_request(rq, bio); + spin_lock(&ctx->lock); +insert_rq: + __blk_mq_insert_request(hctx, rq, false); + spin_unlock(&ctx->lock); + return false; + } else { + spin_lock(&ctx->lock); + if (!blk_mq_attempt_merge(q, ctx, bio)) { + blk_mq_bio_to_request(rq, bio); + goto insert_rq; + } - if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - bio_endio(bio, -EIO); - return; + spin_unlock(&ctx->lock); + __blk_mq_free_request(hctx, ctx, rq); + return true; } +} - if (use_plug && !blk_queue_nomerges(q) && - blk_attempt_plug_merge(q, bio, &request_count)) - return; +struct blk_map_ctx { + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; +}; + +static struct request *blk_mq_map_request(struct request_queue *q, + struct bio *bio, + struct blk_map_ctx *data) +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + struct request *rq; + int rw = bio_data_dir(bio); - if (blk_mq_queue_enter(q)) { + if (unlikely(blk_mq_queue_enter(q))) { bio_endio(bio, -EIO); - return; + return NULL; } ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); - if (is_sync) + if (rw_is_sync(bio->bi_rw)) rw |= REQ_SYNC; + trace_block_getrq(q, bio, rw); rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false); if (likely(rq)) @@ -1123,6 +1137,109 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) } hctx->queued++; + data->hctx = hctx; + data->ctx = ctx; + return rq; +} + +/* + * Multiple hardware queue variant. This will not use per-process plugs, + * but will attempt to bypass the hctx queueing if we can go straight to + * hardware for SYNC IO. + */ +static void blk_mq_make_request(struct request_queue *q, struct bio *bio) +{ + const int is_sync = rw_is_sync(bio->bi_rw); + const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); + struct blk_map_ctx data; + struct request *rq; + + blk_queue_bounce(q, &bio); + + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + + rq = blk_mq_map_request(q, bio, &data); + if (unlikely(!rq)) + return; + + if (unlikely(is_flush_fua)) { + blk_mq_bio_to_request(rq, bio); + blk_insert_flush(rq); + goto run_queue; + } + + if (is_sync) { + int ret; + + blk_mq_bio_to_request(rq, bio); + blk_mq_start_request(rq, true); + + /* + * For OK queue, we are done. For error, kill it. Any other + * error (busy), just add it to our list as we previously + * would have done + */ + ret = q->mq_ops->queue_rq(data.hctx, rq); + if (ret == BLK_MQ_RQ_QUEUE_OK) + goto done; + else { + __blk_mq_requeue_request(rq); + + if (ret == BLK_MQ_RQ_QUEUE_ERROR) { + rq->errors = -EIO; + blk_mq_end_io(rq, rq->errors); + goto done; + } + } + } + + if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { + /* + * For a SYNC request, send it to the hardware immediately. For + * an ASYNC request, just ensure that we run it later on. The + * latter allows for merging opportunities and more efficient + * dispatching. + */ +run_queue: + blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); + } +done: + blk_mq_put_ctx(data.ctx); +} + +/* + * Single hardware queue variant. This will attempt to use any per-process + * plug for merging and IO deferral. + */ +static void blk_sq_make_request(struct request_queue *q, struct bio *bio) +{ + const int is_sync = rw_is_sync(bio->bi_rw); + const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); + unsigned int use_plug, request_count = 0; + struct blk_map_ctx data; + struct request *rq; + + /* + * If we have multiple hardware queues, just go directly to + * one of those for sync IO. + */ + use_plug = !is_flush_fua && !is_sync; + + blk_queue_bounce(q, &bio); + + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + + if (use_plug && !blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count)) + return; + + rq = blk_mq_map_request(q, bio, &data); if (unlikely(is_flush_fua)) { blk_mq_bio_to_request(rq, bio); @@ -1147,37 +1264,23 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) trace_block_plug(q); } list_add_tail(&rq->queuelist, &plug->mq_list); - blk_mq_put_ctx(ctx); + blk_mq_put_ctx(data.ctx); return; } } - if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) { - blk_mq_bio_to_request(rq, bio); - spin_lock(&ctx->lock); -insert_rq: - __blk_mq_insert_request(hctx, rq, false); - spin_unlock(&ctx->lock); - } else { - spin_lock(&ctx->lock); - if (!blk_mq_attempt_merge(q, ctx, bio)) { - blk_mq_bio_to_request(rq, bio); - goto insert_rq; - } - - spin_unlock(&ctx->lock); - __blk_mq_free_request(hctx, ctx, rq); + if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { + /* + * For a SYNC request, send it to the hardware immediately. For + * an ASYNC request, just ensure that we run it later on. The + * latter allows for merging opportunities and more efficient + * dispatching. + */ +run_queue: + blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); } - - /* - * For a SYNC request, send it to the hardware immediately. For an - * ASYNC request, just ensure that we run it later on. The latter - * allows for merging opportunities and more efficient dispatching. - */ -run_queue: - blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua); - blk_mq_put_ctx(ctx); + blk_mq_put_ctx(data.ctx); } /* @@ -1670,7 +1773,11 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) q->sg_reserved_size = INT_MAX; - blk_queue_make_request(q, blk_mq_make_request); + if (q->nr_hw_queues > 1) + blk_queue_make_request(q, blk_mq_make_request); + else + blk_queue_make_request(q, blk_sq_make_request); + blk_queue_rq_timed_out(q, blk_mq_rq_timed_out); if (set->timeout) blk_queue_rq_timeout(q, set->timeout); -- cgit v1.2.3-59-g8ed1b From edf866b3805c5651bf7d035b72dc0190cb6ff4a7 Mon Sep 17 00:00:00 2001 From: Sam Bradshaw Date: Fri, 23 May 2014 13:30:16 -0600 Subject: blk-mq: export blk_mq_tag_busy_iter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Export the blk-mq in-flight tag iterator for driver consumption. This is particularly useful in exception paths or SRSI where in-flight IOs need to be cancelled and/or reissued. The NVMe driver conversion will use this. Signed-off-by: Sam Bradshaw Signed-off-by: Matias Bjørling Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 1 + block/blk-mq-tag.h | 1 - include/linux/blk-mq.h | 1 + 3 files changed, 2 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index f6dea968b710..05e2baf4fa0d 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -400,6 +400,7 @@ void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, fn(data, tag_map); kfree(tag_map); } +EXPORT_SYMBOL(blk_mq_tag_busy_iter); static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) { diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index e7ff5ceeeb97..2e5e6872d089 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -51,7 +51,6 @@ extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); extern void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved); extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); -extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 4d2800567aad..f76bb18350af 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -181,6 +181,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); +void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); /* * Driver command data is immediately after the request. So subtract request -- cgit v1.2.3-59-g8ed1b From c22d9d8a60646a1521ae12a2176f58da19afa186 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 23 May 2014 14:14:57 -0600 Subject: blk-mq: allow setting of per-request timeouts Currently blk-mq uses the queue timeout for all requests. But for some commands, drivers may want to set a specific timeout for special requests. Allow this to be passed in through request->timeout, and use it if set. Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 54e78863c083..9bc075335d06 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -454,9 +454,13 @@ static void blk_mq_start_request(struct request *rq, bool last) /* * Just mark start time and set the started bit. Due to memory * ordering, we know we'll see the correct deadline as long as - * REQ_ATOMIC_STARTED is seen. + * REQ_ATOMIC_STARTED is seen. Use the default queue timeout, + * unless one has been set in the request. */ - rq->deadline = jiffies + q->rq_timeout; + if (!rq->timeout) + rq->deadline = jiffies + q->rq_timeout; + else + rq->deadline = jiffies + rq->timeout; /* * Mark us as started and clear complete. Complete might have been -- cgit v1.2.3-59-g8ed1b From 19c5d84f14d2b01fe4c24e1444142f69e6dc08f6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 26 May 2014 11:45:02 +0200 Subject: blk-mq: idle all hardware contexts before freeing a queue Without this we can leak the active_queues reference if a command is freed while it is considered active. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 9bc075335d06..62082c5daae7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1844,6 +1844,7 @@ void blk_mq_free_queue(struct request_queue *q) blk_mq_del_queue_tag_set(q); queue_for_each_hw_ctx(q, hctx, i) { + blk_mq_tag_idle(hctx); kfree(hctx->ctxs); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) -- cgit v1.2.3-59-g8ed1b From 35086784caec571be185f643eb1b045a275d60b3 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Mon, 26 May 2014 22:19:14 +0200 Subject: block/blk-lib.c: make __blkdev_issue_zeroout static __blkdev_issue_zeroout is only used in blk-lib.c Cc: Jens Axboe Cc: Andrew Morton Signed-off-by: Fabian Frederick Signed-off-by: Jens Axboe --- block/blk-lib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-lib.c b/block/blk-lib.c index 97a733cf3d5f..8411be3c19d3 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -226,8 +226,8 @@ EXPORT_SYMBOL(blkdev_issue_write_same); * Generate and issue number of bios with zerofiled pages. */ -int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask) +static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask) { int ret; struct bio *bio; -- cgit v1.2.3-59-g8ed1b From 1f9f07e917f43af420f8cb3afc2b3fa703cea6e9 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 27 May 2014 08:34:45 -0600 Subject: blk-mq: fix leak of hctx->ctx_map hctx->ctx_map should have been freed inside blk_mq_free_queue(). Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 62082c5daae7..dad22a9abe49 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1846,6 +1846,7 @@ void blk_mq_free_queue(struct request_queue *q) queue_for_each_hw_ctx(q, hctx, i) { blk_mq_tag_idle(hctx); kfree(hctx->ctxs); + blk_mq_free_bitmap(&hctx->ctx_map); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); if (q->mq_ops->exit_hctx) q->mq_ops->exit_hctx(hctx, i); -- cgit v1.2.3-59-g8ed1b From 624dbe47541643b72868a59b2c0059bb53dc923f Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 27 May 2014 23:35:13 +0800 Subject: blk-mq: avoid code duplication blk_mq_exit_hw_queues() and blk_mq_free_hw_queues() are introduced to avoid code duplication. Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 61 +++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 24 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index dad22a9abe49..07851753a049 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1523,11 +1523,43 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, return NOTIFY_OK; } +static void blk_mq_exit_hw_queues(struct request_queue *q, + struct blk_mq_tag_set *set, int nr_queue) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) { + if (i == nr_queue) + break; + + if (set->ops->exit_hctx) + set->ops->exit_hctx(hctx, i); + + blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); + kfree(hctx->ctxs); + blk_mq_free_bitmap(&hctx->ctx_map); + } + +} + +static void blk_mq_free_hw_queues(struct request_queue *q, + struct blk_mq_tag_set *set) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) { + free_cpumask_var(hctx->cpumask); + set->ops->free_hctx(hctx, i); + } +} + static int blk_mq_init_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx *hctx; - unsigned int i, j; + unsigned int i; /* * Initialize hardware queues @@ -1579,17 +1611,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, /* * Init failed */ - queue_for_each_hw_ctx(q, hctx, j) { - if (i == j) - break; - - if (set->ops->exit_hctx) - set->ops->exit_hctx(hctx, j); - - blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); - kfree(hctx->ctxs); - blk_mq_free_bitmap(&hctx->ctx_map); - } + blk_mq_exit_hw_queues(q, set, i); return 1; } @@ -1838,21 +1860,12 @@ EXPORT_SYMBOL(blk_mq_init_queue); void blk_mq_free_queue(struct request_queue *q) { - struct blk_mq_hw_ctx *hctx; - int i; + struct blk_mq_tag_set *set = q->tag_set; blk_mq_del_queue_tag_set(q); - queue_for_each_hw_ctx(q, hctx, i) { - blk_mq_tag_idle(hctx); - kfree(hctx->ctxs); - blk_mq_free_bitmap(&hctx->ctx_map); - blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); - if (q->mq_ops->exit_hctx) - q->mq_ops->exit_hctx(hctx, i); - free_cpumask_var(hctx->cpumask); - q->mq_ops->free_hctx(hctx, i); - } + blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); + blk_mq_free_hw_queues(q, set); free_percpu(q->queue_ctx); kfree(q->queue_hw_ctx); -- cgit v1.2.3-59-g8ed1b From 3d2936f457a847d9d88a9cc127e0eb7a0ebba0ff Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 27 May 2014 23:35:14 +0800 Subject: block: only allocate/free mq_usage_counter in blk-mq The percpu counter is only used for blk-mq, so move its allocation and free inside blk-mq, and don't allocate it for legacy queue device. Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-core.c | 7 +------ block/blk-mq.c | 5 +++++ block/blk-sysfs.c | 2 -- 3 files changed, 6 insertions(+), 8 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 5b6f768a7c01..29d5fbafd94a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -576,12 +576,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (!q) return NULL; - if (percpu_counter_init(&q->mq_usage_counter, 0)) - goto fail_q; - q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); if (q->id < 0) - goto fail_c; + goto fail_q; q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; @@ -639,8 +636,6 @@ fail_bdi: bdi_destroy(&q->backing_dev_info); fail_id: ida_simple_remove(&blk_queue_ida, q->id); -fail_c: - percpu_counter_destroy(&q->mq_usage_counter); fail_q: kmem_cache_free(blk_requestq_cachep, q); return NULL; diff --git a/block/blk-mq.c b/block/blk-mq.c index 07851753a049..e8b5f74dc1a1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1781,6 +1781,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (!q) goto err_hctxs; + if (percpu_counter_init(&q->mq_usage_counter, 0)) + goto err_map; + q->mq_map = blk_mq_make_queue_map(set); if (!q->mq_map) goto err_map; @@ -1867,6 +1870,8 @@ void blk_mq_free_queue(struct request_queue *q) blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_free_hw_queues(q, set); + percpu_counter_destroy(&q->mq_usage_counter); + free_percpu(q->queue_ctx); kfree(q->queue_hw_ctx); kfree(q->mq_map); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 4d6811ac13fd..23321fbab293 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -517,8 +517,6 @@ static void blk_release_queue(struct kobject *kobj) if (q->queue_tags) __blk_queue_free_tags(q); - percpu_counter_destroy(&q->mq_usage_counter); - if (q->mq_ops) blk_mq_free_queue(q); -- cgit v1.2.3-59-g8ed1b From f14bbe77a96bb979dc539d8308ee18a9363a544f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 27 May 2014 12:06:53 -0600 Subject: blk-mq: pass in suggested NUMA node to ->alloc_hctx() Drivers currently have to figure this out on their own, and they are missing information to do it properly. The ones that did attempt to do it, do it wrong. So just pass in the suggested node directly to the alloc function. Signed-off-by: Jens Axboe --- block/blk-mq-cpumap.c | 16 ++++++++++++++++ block/blk-mq.c | 26 +++++++++++++++----------- block/blk-mq.h | 1 + drivers/block/null_blk.c | 35 +++-------------------------------- include/linux/blk-mq.h | 4 ++-- 5 files changed, 37 insertions(+), 45 deletions(-) (limited to 'block') diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 5d0f93cf358c..0daacb927be1 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -96,3 +96,19 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) kfree(map); return NULL; } + +/* + * We have no quick way of doing reverse lookups. This is only used at + * queue init time, so runtime isn't important. + */ +int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) +{ + int i; + + for_each_possible_cpu(i) { + if (index == mq_map[i]) + return cpu_to_node(i); + } + + return NUMA_NO_NODE; +} diff --git a/block/blk-mq.c b/block/blk-mq.c index e8b5f74dc1a1..30bad930e661 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1297,10 +1297,10 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) EXPORT_SYMBOL(blk_mq_map_queue); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, - unsigned int hctx_index) + unsigned int hctx_index, + int node) { - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, - set->numa_node); + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); } EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); @@ -1752,6 +1752,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) struct blk_mq_hw_ctx **hctxs; struct blk_mq_ctx *ctx; struct request_queue *q; + unsigned int *map; int i; ctx = alloc_percpu(struct blk_mq_ctx); @@ -1764,8 +1765,14 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (!hctxs) goto err_percpu; + map = blk_mq_make_queue_map(set); + if (!map) + goto err_map; + for (i = 0; i < set->nr_hw_queues; i++) { - hctxs[i] = set->ops->alloc_hctx(set, i); + int node = blk_mq_hw_queue_to_node(map, i); + + hctxs[i] = set->ops->alloc_hctx(set, i, node); if (!hctxs[i]) goto err_hctxs; @@ -1773,7 +1780,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) goto err_hctxs; atomic_set(&hctxs[i]->nr_active, 0); - hctxs[i]->numa_node = NUMA_NO_NODE; + hctxs[i]->numa_node = node; hctxs[i]->queue_num = i; } @@ -1784,15 +1791,12 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) if (percpu_counter_init(&q->mq_usage_counter, 0)) goto err_map; - q->mq_map = blk_mq_make_queue_map(set); - if (!q->mq_map) - goto err_map; - setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); blk_queue_rq_timeout(q, 30000); q->nr_queues = nr_cpu_ids; q->nr_hw_queues = set->nr_hw_queues; + q->mq_map = map; q->queue_ctx = ctx; q->queue_hw_ctx = hctxs; @@ -1844,16 +1848,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) err_flush_rq: kfree(q->flush_rq); err_hw: - kfree(q->mq_map); -err_map: blk_cleanup_queue(q); err_hctxs: + kfree(map); for (i = 0; i < set->nr_hw_queues; i++) { if (!hctxs[i]) break; free_cpumask_var(hctxs[i]->cpumask); set->ops->free_hctx(hctxs[i], i); } +err_map: kfree(hctxs); err_percpu: free_percpu(ctx); diff --git a/block/blk-mq.h b/block/blk-mq.h index 491dbd4e93f5..ff5e6bf0f691 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -52,6 +52,7 @@ void blk_mq_disable_hotplug(void); */ extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); +extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); /* * Basic implementation of sparser bitmap, allowing the user to spread diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 8e7e3a0b0d24..4d33c8c25fbf 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -322,39 +322,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) } static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set, - unsigned int hctx_index) + unsigned int hctx_index, + int node) { - int b_size = DIV_ROUND_UP(set->nr_hw_queues, nr_online_nodes); - int tip = (set->nr_hw_queues % nr_online_nodes); - int node = 0, i, n; - - /* - * Split submit queues evenly wrt to the number of nodes. If uneven, - * fill the first buckets with one extra, until the rest is filled with - * no extra. - */ - for (i = 0, n = 1; i < hctx_index; i++, n++) { - if (n % b_size == 0) { - n = 0; - node++; - - tip--; - if (!tip) - b_size = set->nr_hw_queues / nr_online_nodes; - } - } - - /* - * A node might not be online, therefore map the relative node id to the - * real node id. - */ - for_each_online_node(n) { - if (!node) - break; - node--; - } - - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); } static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f76bb18350af..afeb93496907 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -80,7 +80,7 @@ struct blk_mq_tag_set { typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *, - unsigned int); + unsigned int, int); typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); @@ -165,7 +165,7 @@ struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, g struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int); +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); void blk_mq_end_io(struct request *rq, int error); -- cgit v1.2.3-59-g8ed1b From 95f096849932fe5eaa7bfec887530cf556744a76 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 27 May 2014 17:46:48 -0600 Subject: blk-mq: allow non-softirq completions Right now we export two ways of completing a request: 1) blk_mq_complete_request(). This uses an IPI (if needed) and completes through q->softirq_done_fn(). It also works with timeouts. 2) blk_mq_end_io(). This completes inline, and ignores any timeout state of the request. Let blk_mq_complete_request() handle non-softirq_done_fn completions as well, by just completing inline. If a driver has enough completion ports to place completions correctly, it need not define a mq_ops->complete() and we can avoid an indirect function call by doing the completion inline. Signed-off-by: Jens Axboe --- block/blk-mq.c | 12 +++++++++--- include/linux/blk-mq.h | 4 ++++ 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 30bad930e661..010b878d53b3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -434,10 +434,16 @@ void __blk_mq_complete_request(struct request *rq) **/ void blk_mq_complete_request(struct request *rq) { - if (unlikely(blk_should_fake_timeout(rq->q))) + struct request_queue *q = rq->q; + + if (unlikely(blk_should_fake_timeout(q))) return; - if (!blk_mark_rq_complete(rq)) - __blk_mq_complete_request(rq); + if (!blk_mark_rq_complete(rq)) { + if (q->softirq_done_fn) + __blk_mq_complete_request(rq); + else + blk_mq_end_io(rq, rq->errors); + } } EXPORT_SYMBOL(blk_mq_complete_request); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index afeb93496907..1dfeb1529a61 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -173,6 +173,10 @@ void __blk_mq_end_io(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); +/* + * Complete request through potential IPI for right placement. Driver must + * have defined a mq_ops->complete() hook for this. + */ void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); -- cgit v1.2.3-59-g8ed1b From 6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 28 May 2014 08:08:02 -0600 Subject: blk-mq: add helper to insert requests from irq context Both the cache flush state machine and the SCSI midlayer want to submit requests from irq context, and the current per-request requeue_work unfortunately causes corruption due to sharing with the csd field for flushes. Replace them with a per-request_queue list of requests to be requeued. Based on an earlier test by Ming Lei. Signed-off-by: Christoph Hellwig Reported-by: Ming Lei Tested-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-flush.c | 16 ++++--------- block/blk-mq.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/blk-mq.h | 2 ++ include/linux/blkdev.h | 5 +++- 4 files changed, 73 insertions(+), 14 deletions(-) (limited to 'block') diff --git a/block/blk-flush.c b/block/blk-flush.c index ec7a224d6733..ef608b35d9be 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq) blk_clear_rq_complete(rq); } -static void mq_flush_run(struct work_struct *work) -{ - struct request *rq; - - rq = container_of(work, struct request, requeue_work); - - memset(&rq->csd, 0, sizeof(rq->csd)); - blk_mq_insert_request(rq, false, true, false); -} - static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { - INIT_WORK(&rq->requeue_work, mq_flush_run); - kblockd_schedule_work(&rq->requeue_work); + struct request_queue *q = rq->q; + + blk_mq_add_to_requeue_list(rq, add_front); + blk_mq_kick_requeue_list(q); return false; } else { if (add_front) diff --git a/block/blk-mq.c b/block/blk-mq.c index 010b878d53b3..67066ecc79c0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -516,10 +516,68 @@ void blk_mq_requeue_request(struct request *rq) blk_clear_rq_complete(rq); BUG_ON(blk_queued_rq(rq)); - blk_mq_insert_request(rq, true, true, false); + blk_mq_add_to_requeue_list(rq, true); } EXPORT_SYMBOL(blk_mq_requeue_request); +static void blk_mq_requeue_work(struct work_struct *work) +{ + struct request_queue *q = + container_of(work, struct request_queue, requeue_work); + LIST_HEAD(rq_list); + struct request *rq, *next; + unsigned long flags; + + spin_lock_irqsave(&q->requeue_lock, flags); + list_splice_init(&q->requeue_list, &rq_list); + spin_unlock_irqrestore(&q->requeue_lock, flags); + + list_for_each_entry_safe(rq, next, &rq_list, queuelist) { + if (!(rq->cmd_flags & REQ_SOFTBARRIER)) + continue; + + rq->cmd_flags &= ~REQ_SOFTBARRIER; + list_del_init(&rq->queuelist); + blk_mq_insert_request(rq, true, false, false); + } + + while (!list_empty(&rq_list)) { + rq = list_entry(rq_list.next, struct request, queuelist); + list_del_init(&rq->queuelist); + blk_mq_insert_request(rq, false, false, false); + } + + blk_mq_run_queues(q, false); +} + +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) +{ + struct request_queue *q = rq->q; + unsigned long flags; + + /* + * We abuse this flag that is otherwise used by the I/O scheduler to + * request head insertation from the workqueue. + */ + BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); + + spin_lock_irqsave(&q->requeue_lock, flags); + if (at_head) { + rq->cmd_flags |= REQ_SOFTBARRIER; + list_add(&rq->queuelist, &q->requeue_list); + } else { + list_add_tail(&rq->queuelist, &q->requeue_list); + } + spin_unlock_irqrestore(&q->requeue_lock, flags); +} +EXPORT_SYMBOL(blk_mq_add_to_requeue_list); + +void blk_mq_kick_requeue_list(struct request_queue *q) +{ + kblockd_schedule_work(&q->requeue_work); +} +EXPORT_SYMBOL(blk_mq_kick_requeue_list); + struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) { return tags->rqs[tag]; @@ -1812,6 +1870,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) q->sg_reserved_size = INT_MAX; + INIT_WORK(&q->requeue_work, blk_mq_requeue_work); + INIT_LIST_HEAD(&q->requeue_list); + spin_lock_init(&q->requeue_lock); + if (q->nr_hw_queues > 1) blk_queue_make_request(q, blk_mq_make_request); else diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 5b171fbe95c5..b9a74a386dbc 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -172,6 +172,8 @@ void blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6bc011a09e82..913f1c2d3be0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -99,7 +99,6 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; - struct work_struct requeue_work; unsigned long fifo_time; }; @@ -463,6 +462,10 @@ struct request_queue { struct request *flush_rq; spinlock_t mq_flush_lock; + struct list_head requeue_list; + spinlock_t requeue_lock; + struct work_struct requeue_work; + struct mutex sysfs_lock; int bypass_depth; -- cgit v1.2.3-59-g8ed1b From 4ce01dd1a07d9cf3eaf44fbf4ea9a61b11badccc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 27 May 2014 20:59:46 +0200 Subject: blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request Instead of having two almost identical copies of the same code just let the callers pass in the reserved flag directly. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-mq.c | 20 +++----------------- include/linux/blk-mq.h | 4 ++-- 3 files changed, 6 insertions(+), 20 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index 29d5fbafd94a..d87be5b4e554 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1173,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { if (q->mq_ops) - return blk_mq_alloc_request(q, rw, gfp_mask); + return blk_mq_alloc_request(q, rw, gfp_mask, false); else return blk_old_get_request(q, rw, gfp_mask); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 67066ecc79c0..63d581d72a70 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -294,35 +294,21 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, return rq; } -struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, + bool reserved) { struct request *rq; if (blk_mq_queue_enter(q)) return NULL; - rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); + rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); if (rq) blk_mq_put_ctx(rq->mq_ctx); return rq; } EXPORT_SYMBOL(blk_mq_alloc_request); -struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, - gfp_t gfp) -{ - struct request *rq; - - if (blk_mq_queue_enter(q)) - return NULL; - - rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); - if (rq) - blk_mq_put_ctx(rq->mq_ctx); - return rq; -} -EXPORT_SYMBOL(blk_mq_alloc_reserved_request); - static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct request *rq) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b9a74a386dbc..2bd82f399128 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -160,8 +160,8 @@ void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); -struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); -struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, + gfp_t gfp, bool reserved); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); -- cgit v1.2.3-59-g8ed1b From 5dee857720db15e2c8ef0c03f7eeac00c4c63cb2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 27 May 2014 20:59:47 +0200 Subject: blk-mq: initialize request in __blk_mq_alloc_request Both callers if __blk_mq_alloc_request want to initialize the request, so lift it into the common path. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 62 ++++++++++++++++++++++++++++------------------------------ 1 file changed, 30 insertions(+), 32 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 63d581d72a70..04ef7ecb3c7f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -92,30 +92,6 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); } -static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, - gfp_t gfp, bool reserved) -{ - struct request *rq; - unsigned int tag; - - tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); - if (tag != BLK_MQ_TAG_FAIL) { - rq = hctx->tags->rqs[tag]; - - rq->cmd_flags = 0; - if (blk_mq_tag_busy(hctx)) { - rq->cmd_flags = REQ_MQ_INFLIGHT; - atomic_inc(&hctx->nr_active); - } - - rq->tag = tag; - return rq; - } - - return NULL; -} - static int blk_mq_queue_enter(struct request_queue *q) { int ret; @@ -263,6 +239,32 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } +static struct request * +__blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved) +{ + struct request *rq; + unsigned int tag; + + tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); + if (tag != BLK_MQ_TAG_FAIL) { + rq = hctx->tags->rqs[tag]; + + rq->cmd_flags = 0; + if (blk_mq_tag_busy(hctx)) { + rq->cmd_flags = REQ_MQ_INFLIGHT; + atomic_inc(&hctx->nr_active); + } + + rq->tag = tag; + blk_mq_rq_ctx_init(q, ctx, rq, rw); + return rq; + } + + return NULL; +} + + static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, int rw, gfp_t gfp, bool reserved) @@ -273,12 +275,10 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); - rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT, + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, reserved); - if (rq) { - blk_mq_rq_ctx_init(q, ctx, rq, rw); + if (rq) break; - } if (gfp & __GFP_WAIT) { __blk_mq_run_hw_queue(hctx); @@ -1178,10 +1178,8 @@ static struct request *blk_mq_map_request(struct request_queue *q, rw |= REQ_SYNC; trace_block_getrq(q, bio, rw); - rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false); - if (likely(rq)) - blk_mq_rq_ctx_init(q, ctx, rq, rw); - else { + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false); + if (unlikely(!rq)) { blk_mq_put_ctx(ctx); trace_block_sleeprq(q, bio, rw); rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC, -- cgit v1.2.3-59-g8ed1b From a3bd77567cae6af700dcd245148befc73fc89a50 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 27 May 2014 20:59:48 +0200 Subject: blk-mq: remove blk_mq_wait_for_tags The current logic for blocking tag allocation is rather confusing, as we first allocated and then free again a tag in blk_mq_wait_for_tags, just to attempt a non-blocking allocation and then repeat if someone else managed to grab the tag before us. Instead change blk_mq_alloc_request_pinned to simply do a blocking tag allocation itself and use the request we get back from it. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 8 -------- block/blk-mq-tag.h | 1 - block/blk-mq.c | 13 ++++++------- 3 files changed, 6 insertions(+), 16 deletions(-) (limited to 'block') diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 05e2baf4fa0d..0d0640d38a06 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -7,14 +7,6 @@ #include "blk-mq.h" #include "blk-mq-tag.h" -void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved) -{ - int tag, zero = 0; - - tag = blk_mq_get_tag(hctx, &zero, __GFP_WAIT, reserved); - blk_mq_put_tag(hctx, tag, &zero); -} - static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) { int i; diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 2e5e6872d089..c959de58d2a5 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -49,7 +49,6 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); -extern void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved); extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); diff --git a/block/blk-mq.c b/block/blk-mq.c index 04ef7ecb3c7f..3224888d329a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -264,31 +264,30 @@ __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx, return NULL; } - static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, int rw, gfp_t gfp, bool reserved) { + bool gfp_mask = gfp & ~__GFP_WAIT; struct request *rq; do { struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); - rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp_mask, reserved); if (rq) break; - if (gfp & __GFP_WAIT) { - __blk_mq_run_hw_queue(hctx); - blk_mq_put_ctx(ctx); - } else { + if (!(gfp & __GFP_WAIT)) { blk_mq_put_ctx(ctx); break; } - blk_mq_wait_for_tags(hctx, reserved); + __blk_mq_run_hw_queue(hctx); + blk_mq_put_ctx(ctx); + gfp_mask = gfp; } while (1); return rq; -- cgit v1.2.3-59-g8ed1b From 793597a6a95675f4f85671cf747c1d92e7dbc295 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 27 May 2014 20:59:49 +0200 Subject: blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request We already do a non-blocking allocation in blk_mq_map_request, no need to repeat it. Just call __blk_mq_alloc_request to wait directly. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 3224888d329a..43f0c8ffa92a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1179,12 +1179,14 @@ static struct request *blk_mq_map_request(struct request_queue *q, trace_block_getrq(q, bio, rw); rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false); if (unlikely(!rq)) { + __blk_mq_run_hw_queue(hctx); blk_mq_put_ctx(ctx); trace_block_sleeprq(q, bio, rw); - rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC, - false); - ctx = rq->mq_ctx; + + ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, + __GFP_WAIT|GFP_ATOMIC, false); } hctx->queued++; -- cgit v1.2.3-59-g8ed1b From d852564f8c88b0604490234fdeeb6fb47e4bcc7a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 27 May 2014 20:59:50 +0200 Subject: blk-mq: remove blk_mq_alloc_request_pinned We now only have one caller left and can open code it there in a cleaner way. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 48 ++++++++++++++++-------------------------------- 1 file changed, 16 insertions(+), 32 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 43f0c8ffa92a..ae14749b530c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -264,46 +264,30 @@ __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx, return NULL; } -static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, - int rw, gfp_t gfp, - bool reserved) -{ - bool gfp_mask = gfp & ~__GFP_WAIT; - struct request *rq; - - do { - struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); - struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); - - rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp_mask, - reserved); - if (rq) - break; - - if (!(gfp & __GFP_WAIT)) { - blk_mq_put_ctx(ctx); - break; - } - - __blk_mq_run_hw_queue(hctx); - blk_mq_put_ctx(ctx); - gfp_mask = gfp; - } while (1); - - return rq; -} - struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved) { + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; struct request *rq; if (blk_mq_queue_enter(q)) return NULL; - rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); - if (rq) - blk_mq_put_ctx(rq->mq_ctx); + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, + reserved); + if (!rq && (gfp & __GFP_WAIT)) { + __blk_mq_run_hw_queue(hctx); + blk_mq_put_ctx(ctx); + + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved); + } + blk_mq_put_ctx(ctx); return rq; } EXPORT_SYMBOL(blk_mq_alloc_request); -- cgit v1.2.3-59-g8ed1b From 75bb4625bb78d6a2d879dcb6a7d482861295765b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 28 May 2014 10:15:41 -0600 Subject: blk-mq: add file comments and update copyright notices None of the blk-mq files have an explanatory comment at the top for what that particular file does. Add that and add appropriate copyright notices as well. Signed-off-by: Jens Axboe --- block/blk-mq-cpu.c | 5 +++++ block/blk-mq-cpumap.c | 5 +++++ block/blk-mq-tag.c | 12 ++++++++++++ block/blk-mq.c | 6 ++++++ 4 files changed, 28 insertions(+) (limited to 'block') diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index d2c253f71b86..bb3ed488f7b5 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c @@ -1,3 +1,8 @@ +/* + * CPU notifier helper code for blk-mq + * + * Copyright (C) 2013-2014 Jens Axboe + */ #include #include #include diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 0daacb927be1..1065d7c65fa1 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -1,3 +1,8 @@ +/* + * CPU <-> hardware queue mapping helpers + * + * Copyright (C) 2013-2014 Jens Axboe + */ #include #include #include diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 0d0640d38a06..d90c4aeb7dd3 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -1,3 +1,15 @@ +/* + * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread + * over multiple cachelines to avoid ping-pong between multiple submitters + * or submitter and completer. Uses rolling wakeups to avoid falling of + * the scaling cliff when we run out of tags and have to start putting + * submitters to sleep. + * + * Uses active queue tracking to support fairer distribution of tags + * between multiple submitters when a shared tag map is used. + * + * Copyright (C) 2013-2014 Jens Axboe + */ #include #include #include diff --git a/block/blk-mq.c b/block/blk-mq.c index ae14749b530c..5cc4b871cb11 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1,3 +1,9 @@ +/* + * Block multiqueue core code + * + * Copyright (C) 2013-2014 Jens Axboe + * Copyright (C) 2013-2014 Christoph Hellwig + */ #include #include #include -- cgit v1.2.3-59-g8ed1b From cdef54dd85ad66e77262ea57796a3e81683dd5d6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 28 May 2014 18:11:06 +0200 Subject: blk-mq: remove alloc_hctx and free_hctx methods There is no need for drivers to control hardware context allocation now that we do the context to node mapping in common code. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 26 +++++--------------------- drivers/block/null_blk.c | 28 +--------------------------- drivers/block/virtio_blk.c | 2 -- include/linux/blk-mq.h | 10 ---------- 4 files changed, 6 insertions(+), 60 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 5cc4b871cb11..f27fe44230c2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1335,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) } EXPORT_SYMBOL(blk_mq_map_queue); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set, - unsigned int hctx_index, - int node) -{ - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); -} -EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); - -void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, - unsigned int hctx_index) -{ - kfree(hctx); -} -EXPORT_SYMBOL(blk_mq_free_single_hw_queue); - static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { @@ -1590,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q, queue_for_each_hw_ctx(q, hctx, i) { free_cpumask_var(hctx->cpumask); - set->ops->free_hctx(hctx, i); + kfree(hctx); } } @@ -1811,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) for (i = 0; i < set->nr_hw_queues; i++) { int node = blk_mq_hw_queue_to_node(map, i); - hctxs[i] = set->ops->alloc_hctx(set, i, node); + hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), + GFP_KERNEL, node); if (!hctxs[i]) goto err_hctxs; @@ -1898,7 +1884,7 @@ err_hctxs: if (!hctxs[i]) break; free_cpumask_var(hctxs[i]->cpumask); - set->ops->free_hctx(hctxs[i], i); + kfree(hctxs[i]); } err_map: kfree(hctxs); @@ -1983,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) return -EINVAL; - if (!set->nr_hw_queues || - !set->ops->queue_rq || !set->ops->map_queue || - !set->ops->alloc_hctx || !set->ops->free_hctx) + if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) return -EINVAL; diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 4d33c8c25fbf..b40af63a5476 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -321,18 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) return BLK_MQ_RQ_QUEUE_OK; } -static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set, - unsigned int hctx_index, - int node) -{ - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node); -} - -static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) -{ - kfree(hctx); -} - static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) { BUG_ON(!nullb); @@ -360,17 +348,6 @@ static struct blk_mq_ops null_mq_ops = { .map_queue = blk_mq_map_queue, .init_hctx = null_init_hctx, .complete = null_softirq_done_fn, - .alloc_hctx = blk_mq_alloc_single_hw_queue, - .free_hctx = blk_mq_free_single_hw_queue, -}; - -static struct blk_mq_ops null_mq_ops_pernode = { - .queue_rq = null_queue_rq, - .map_queue = blk_mq_map_queue, - .init_hctx = null_init_hctx, - .complete = null_softirq_done_fn, - .alloc_hctx = null_alloc_hctx, - .free_hctx = null_free_hctx, }; static void null_del_dev(struct nullb *nullb) @@ -496,10 +473,7 @@ static int null_add_dev(void) goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { - if (use_per_node_hctx) - nullb->tag_set.ops = &null_mq_ops_pernode; - else - nullb->tag_set.ops = &null_mq_ops; + nullb->tag_set.ops = &null_mq_ops; nullb->tag_set.nr_hw_queues = submit_queues; nullb->tag_set.queue_depth = hw_queue_depth; nullb->tag_set.numa_node = home_node; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7a51f065edcd..16c21c0cb14d 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -497,8 +497,6 @@ static int virtblk_init_request(void *data, struct request *rq, static struct blk_mq_ops virtio_mq_ops = { .queue_rq = virtio_queue_rq, .map_queue = blk_mq_map_queue, - .alloc_hctx = blk_mq_alloc_single_hw_queue, - .free_hctx = blk_mq_free_single_hw_queue, .complete = virtblk_request_done, .init_request = virtblk_init_request, }; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2bd82f399128..91dfb75ce39f 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -79,9 +79,6 @@ struct blk_mq_tag_set { typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); -typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *, - unsigned int, int); -typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_request_fn)(void *, struct request *, unsigned int, @@ -107,12 +104,6 @@ struct blk_mq_ops { softirq_done_fn *complete; - /* - * Override for hctx allocations (should probably go) - */ - alloc_hctx_fn *alloc_hctx; - free_hctx_fn *free_hctx; - /* * Called when the block layer side of a hardware queue has been * set up, allowing the driver to allocate/init matching structures. @@ -166,7 +157,6 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); void blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error); -- cgit v1.2.3-59-g8ed1b From 4d92a9beb39d80a7d8ff7c04ae12a10290105ae5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 29 May 2014 08:09:00 -0600 Subject: block: remove 'magic' from struct blk_plug I don't think we've ever caught any bugs with this, and there's the list poisoning for the plug lists to catch uninitialized cases. So remove the magic member and save 8 bytes in the struct. Signed-off-by: Jens Axboe --- block/blk-core.c | 5 ----- include/linux/blkdev.h | 1 - 2 files changed, 6 deletions(-) (limited to 'block') diff --git a/block/blk-core.c b/block/blk-core.c index d87be5b4e554..40d654861c33 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2957,8 +2957,6 @@ int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, } EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); -#define PLUG_MAGIC 0x91827364 - /** * blk_start_plug - initialize blk_plug and track it inside the task_struct * @plug: The &struct blk_plug that needs to be initialized @@ -2977,7 +2975,6 @@ void blk_start_plug(struct blk_plug *plug) { struct task_struct *tsk = current; - plug->magic = PLUG_MAGIC; INIT_LIST_HEAD(&plug->list); INIT_LIST_HEAD(&plug->mq_list); INIT_LIST_HEAD(&plug->cb_list); @@ -3074,8 +3071,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) LIST_HEAD(list); unsigned int depth; - BUG_ON(plug->magic != PLUG_MAGIC); - flush_plug_callbacks(plug, from_schedule); if (!list_empty(&plug->mq_list)) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 913f1c2d3be0..098304576d51 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1060,7 +1060,6 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} * schedule() where blk_schedule_flush_plug() is called. */ struct blk_plug { - unsigned long magic; /* detect uninitialized use-cases */ struct list_head list; /* requests */ struct list_head mq_list; /* blk-mq requests */ struct list_head cb_list; /* md requires an unplug callback */ -- cgit v1.2.3-59-g8ed1b From 05f1dd5315217398fc8d122bdee80f96a9f21274 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 29 May 2014 09:53:32 -0600 Subject: block: add queue flag for disabling SG merging If devices are not SG starved, we waste a lot of time potentially collapsing SG segments. Enough that 1.5% of the CPU time goes to this, at only 400K IOPS. Add a queue flag, QUEUE_FLAG_NO_SG_MERGE, which just returns the number of vectors in a bio instead of looping over all segments and checking for collapsible ones. Add a BLK_MQ_F_SG_MERGE flag so that drivers can opt-in on the sg merging, if they so desire. Signed-off-by: Jens Axboe --- block/blk-merge.c | 28 +++++++++++++++++++++------- block/blk-mq.c | 3 +++ include/linux/blk-mq.h | 1 + include/linux/blkdev.h | 1 + 4 files changed, 26 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/blk-merge.c b/block/blk-merge.c index 6c583f9c5b65..b3bf0df0f4c2 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio) { struct bio_vec bv, bvprv = { NULL }; - int cluster, high, highprv = 1; + int cluster, high, highprv = 1, no_sg_merge; unsigned int seg_size, nr_phys_segs; struct bio *fbio, *bbio; struct bvec_iter iter; @@ -35,12 +35,21 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, cluster = blk_queue_cluster(q); seg_size = 0; nr_phys_segs = 0; + no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); + high = 0; for_each_bio(bio) { bio_for_each_segment(bv, bio, iter) { + /* + * If SG merging is disabled, each bio vector is + * a segment + */ + if (no_sg_merge) + goto new_segment; + /* * the trick here is making sure that a high page is - * never considered part of another segment, since that - * might change with the bounce page. + * never considered part of another segment, since + * that might change with the bounce page. */ high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); if (!high && !highprv && cluster) { @@ -84,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq) void blk_recount_segments(struct request_queue *q, struct bio *bio) { - struct bio *nxt = bio->bi_next; + if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags)) + bio->bi_phys_segments = bio->bi_vcnt; + else { + struct bio *nxt = bio->bi_next; + + bio->bi_next = NULL; + bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); + bio->bi_next = nxt; + } - bio->bi_next = NULL; - bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); - bio->bi_next = nxt; bio->bi_flags |= (1 << BIO_SEG_VALID); } EXPORT_SYMBOL(blk_recount_segments); diff --git a/block/blk-mq.c b/block/blk-mq.c index f27fe44230c2..f98d977fd150 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1829,6 +1829,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) q->mq_ops = set->ops; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; + if (!(set->flags & BLK_MQ_F_SG_MERGE)) + q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; + q->sg_reserved_size = INT_MAX; INIT_WORK(&q->requeue_work, blk_mq_requeue_work); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 91dfb75ce39f..95de239444d2 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -129,6 +129,7 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_SORT = 1 << 1, BLK_MQ_F_TAG_SHARED = 1 << 2, + BLK_MQ_F_SG_MERGE = 1 << 3, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 098304576d51..695b9fd41efe 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -510,6 +510,7 @@ struct request_queue { #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ +#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ -- cgit v1.2.3-59-g8ed1b From 4b570521be54666e6ad7e5f47af92fd609fbd8b5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 29 May 2014 11:00:11 -0600 Subject: blk-mq: request initialization optimizations We currently clear a lot more than we need to, so make that a bit more clever. Make some of the init dependent on features, like only setting start_time if we are going to use it. Signed-off-by: Jens Axboe --- block/blk-mq.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index f98d977fd150..6160128085fc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -199,19 +199,12 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, rq->q = q; rq->mq_ctx = ctx; rq->cmd_flags |= rw_flags; - rq->cmd_type = 0; /* do not touch atomic flags, it needs atomic ops against the timer */ rq->cpu = -1; - rq->__data_len = 0; - rq->__sector = (sector_t) -1; - rq->bio = NULL; - rq->biotail = NULL; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); - memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv))); rq->rq_disk = NULL; rq->part = NULL; - rq->start_time = jiffies; #ifdef CONFIG_BLK_CGROUP rq->rl = NULL; set_start_time_ns(rq); @@ -221,23 +214,16 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; #endif - rq->ioprio = 0; rq->special = NULL; /* tag was already set */ rq->errors = 0; - memset(rq->__cmd, 0, sizeof(rq->__cmd)); - rq->cmd = rq->__cmd; - rq->cmd_len = BLK_MAX_CDB; rq->extra_len = 0; rq->sense_len = 0; rq->resid_len = 0; rq->sense = NULL; - rq->deadline = 0; INIT_LIST_HEAD(&rq->timeout_list); - rq->timeout = 0; - rq->retries = 0; rq->end_io = NULL; rq->end_io_data = NULL; rq->next_rq = NULL; @@ -449,8 +435,10 @@ static void blk_mq_start_request(struct request *rq, bool last) * complete. So be sure to clear complete again when we start * the request, otherwise we'll ignore the completion event. */ - set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); - clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); if (q->dma_drain_size && blk_rq_bytes(rq)) { /* @@ -1112,7 +1100,11 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) { init_request_from_bio(rq, bio); - blk_account_io_start(rq, 1); + + if (blk_do_io_stat(rq)) { + rq->start_time = jiffies; + blk_account_io_start(rq, 1); + } } static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, -- cgit v1.2.3-59-g8ed1b From da52f22fa924b4a21d8e11fbfd3eeebd7a90a366 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Thu, 29 May 2014 15:11:30 -0400 Subject: block: remove dead code in scsi_ioctl:blk_verify_command filter gets assigned the address of blk_default_cmd_filter on entry to this function, so the !filter condition can never be true. Signed-off-by: Dave Jones Signed-off-by: Jens Axboe --- block/scsi_ioctl.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'block') diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 26487972ac54..9c28a5b38042 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -205,10 +205,6 @@ int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm) if (capable(CAP_SYS_RAWIO)) return 0; - /* if there's no filter set, assume we're filtering everything out */ - if (!filter) - return -EPERM; - /* Anybody who can open the device can do a read-safe command */ if (test_bit(cmd[0], filter->read_ok)) return 0; -- cgit v1.2.3-59-g8ed1b From 2230237500821aedfcf2bba2a79d9cbca389233c Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 30 May 2014 08:06:42 -0600 Subject: blk-mq: blk_mq_tag_to_rq should handle flush request flush request is special, which borrows the tag from the parent request. Hence blk_mq_tag_to_rq needs special handling to return the flush request from the tag. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- block/blk-flush.c | 4 +++- block/blk-mq.c | 12 +++++++++--- include/linux/blk-mq.h | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) (limited to 'block') diff --git a/block/blk-flush.c b/block/blk-flush.c index ef608b35d9be..ff87c664b7df 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -223,8 +223,10 @@ static void flush_end_io(struct request *flush_rq, int error) struct request *rq, *n; unsigned long flags = 0; - if (q->mq_ops) + if (q->mq_ops) { spin_lock_irqsave(&q->mq_flush_lock, flags); + q->flush_rq->cmd_flags = 0; + } running = &q->flush_queue[q->flush_running_idx]; BUG_ON(q->flush_pending_idx == q->flush_running_idx); diff --git a/block/blk-mq.c b/block/blk-mq.c index 6160128085fc..21f952ab3581 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -541,9 +541,15 @@ void blk_mq_kick_requeue_list(struct request_queue *q) } EXPORT_SYMBOL(blk_mq_kick_requeue_list); -struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) +struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag) { - return tags->rqs[tag]; + struct request_queue *q = hctx->queue; + + if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) && + q->flush_rq->tag == tag) + return q->flush_rq; + + return hctx->tags->rqs[tag]; } EXPORT_SYMBOL(blk_mq_tag_to_rq); @@ -572,7 +578,7 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) if (tag >= hctx->tags->nr_tags) break; - rq = blk_mq_tag_to_rq(hctx->tags, tag++); + rq = blk_mq_tag_to_rq(hctx, tag++); if (rq->q != hctx->queue) continue; if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 95de239444d2..ad3adb73cc70 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -154,7 +154,7 @@ void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); -struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); +struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -- cgit v1.2.3-59-g8ed1b From 67aec14ce87fe25bdfff7dbf468556333df11c4e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 30 May 2014 08:25:36 -0600 Subject: blk-mq: make the sysfs mq/ layout reflect current mappings Currently blk-mq registers all the hardware queues in sysfs, regardless of whether it uses them (e.g. they have CPU mappings) or not. The unused hardware queues lack the cpux/ directories, and the other sysfs entries (like active, pending, etc) are all zeroes. Change this so that sysfs correctly reflects the current mappings of the hardware queues. Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 102 ++++++++++++++++++++++++++++++++++++++++--------- block/blk-mq.c | 4 ++ block/blk-mq.h | 6 +++ include/linux/blk-mq.h | 1 + 4 files changed, 94 insertions(+), 19 deletions(-) (limited to 'block') diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 99a60a829e69..e5f575ff0bf9 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -327,6 +327,42 @@ static struct kobj_type blk_mq_hw_ktype = { .release = blk_mq_sysfs_release, }; +void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_ctx *ctx; + int i; + + if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + return; + + hctx_for_each_ctx(hctx, ctx, i) + kobject_del(&ctx->kobj); + + kobject_del(&hctx->kobj); +} + +int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct blk_mq_ctx *ctx; + int i, ret; + + if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + return 0; + + ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); + if (ret) + return ret; + + hctx_for_each_ctx(hctx, ctx, i) { + ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); + if (ret) + break; + } + + return ret; +} + void blk_mq_unregister_disk(struct gendisk *disk) { struct request_queue *q = disk->queue; @@ -335,11 +371,11 @@ void blk_mq_unregister_disk(struct gendisk *disk) int i, j; queue_for_each_hw_ctx(q, hctx, i) { - hctx_for_each_ctx(hctx, ctx, j) { - kobject_del(&ctx->kobj); + blk_mq_unregister_hctx(hctx); + + hctx_for_each_ctx(hctx, ctx, j) kobject_put(&ctx->kobj); - } - kobject_del(&hctx->kobj); + kobject_put(&hctx->kobj); } @@ -350,15 +386,30 @@ void blk_mq_unregister_disk(struct gendisk *disk) kobject_put(&disk_to_dev(disk)->kobj); } +static void blk_mq_sysfs_init(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int i, j; + + kobject_init(&q->mq_kobj, &blk_mq_ktype); + + queue_for_each_hw_ctx(q, hctx, i) { + kobject_init(&hctx->kobj, &blk_mq_hw_ktype); + + hctx_for_each_ctx(hctx, ctx, j) + kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); + } +} + int blk_mq_register_disk(struct gendisk *disk) { struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; - int ret, i, j; + int ret, i; - kobject_init(&q->mq_kobj, &blk_mq_ktype); + blk_mq_sysfs_init(q); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) @@ -367,20 +418,10 @@ int blk_mq_register_disk(struct gendisk *disk) kobject_uevent(&q->mq_kobj, KOBJ_ADD); queue_for_each_hw_ctx(q, hctx, i) { - kobject_init(&hctx->kobj, &blk_mq_hw_ktype); - ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i); + hctx->flags |= BLK_MQ_F_SYSFS_UP; + ret = blk_mq_register_hctx(hctx); if (ret) break; - - if (!hctx->nr_ctx) - continue; - - hctx_for_each_ctx(hctx, ctx, j) { - kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); - ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); - if (ret) - break; - } } if (ret) { @@ -390,3 +431,26 @@ int blk_mq_register_disk(struct gendisk *disk) return 0; } + +void blk_mq_sysfs_unregister(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_unregister_hctx(hctx); +} + +int blk_mq_sysfs_register(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i, ret = 0; + + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_register_hctx(hctx); + if (ret) + break; + } + + return ret; +} diff --git a/block/blk-mq.c b/block/blk-mq.c index 21f952ab3581..71f564e8812e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1924,6 +1924,8 @@ static void blk_mq_queue_reinit(struct request_queue *q) { blk_mq_freeze_queue(q); + blk_mq_sysfs_unregister(q); + blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); /* @@ -1934,6 +1936,8 @@ static void blk_mq_queue_reinit(struct request_queue *q) blk_mq_map_swqueue(q); + blk_mq_sysfs_register(q); + blk_mq_unfreeze_queue(q); } diff --git a/block/blk-mq.h b/block/blk-mq.h index ff5e6bf0f691..de7b3bbd5bd6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -54,6 +54,12 @@ extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); +/* + * sysfs helpers + */ +extern int blk_mq_sysfs_register(struct request_queue *q); +extern void blk_mq_sysfs_unregister(struct request_queue *q); + /* * Basic implementation of sparser bitmap, allowing the user to spread * the bits over more cachelines. diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index ad3adb73cc70..c15128833100 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -130,6 +130,7 @@ enum { BLK_MQ_F_SHOULD_SORT = 1 << 1, BLK_MQ_F_TAG_SHARED = 1 << 2, BLK_MQ_F_SG_MERGE = 1 << 3, + BLK_MQ_F_SYSFS_UP = 1 << 4, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, -- cgit v1.2.3-59-g8ed1b From ee3c5db0896d85187b5f31b5482ed8fd308d31ee Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Fri, 30 May 2014 10:31:13 -0600 Subject: blk-mq: blk_mq_unregister_hctx() can be static CC: Jens Axboe Signed-off-by: Fengguang Wu Signed-off-by: Jens Axboe --- block/blk-mq-sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index e5f575ff0bf9..ed5217867555 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -327,7 +327,7 @@ static struct kobj_type blk_mq_hw_ktype = { .release = blk_mq_sysfs_release, }; -void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) +static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) { struct blk_mq_ctx *ctx; int i; @@ -341,7 +341,7 @@ void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) kobject_del(&hctx->kobj); } -int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) +static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; -- cgit v1.2.3-59-g8ed1b From c7bca4183f73f9d013ea8ae572528b48b5e1ee42 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 30 May 2014 15:41:39 -0600 Subject: block: ensure that the timer is always added Commit f793aa537866 relaxed the timer addition a little too much. If the timer isn't pending, we always need to add it. Signed-off-by: Jens Axboe --- block/blk-timeout.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 43e8b515806f..95a09590ccfd 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -224,7 +224,7 @@ void blk_add_timer(struct request *req) * modifying the timer because expires for value X * will be X + something. */ - if (diff >= HZ / 2) + if (!timer_pending(&q->timeout) || (diff >= HZ / 2)) mod_timer(&q->timeout, expiry); } -- cgit v1.2.3-59-g8ed1b From feff6894128e4bb3c903ce247edc5bd3a1b09fc9 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 30 May 2014 15:42:56 -0600 Subject: blk-mq: remember to start timeout handler for direct queue Commit 07068d5b8e added a direct-to-hw-queue mode, but this mode needs to remember to add the request timeout handler as well. Without it, we don't track timeouts for these requests. Signed-off-by: Jens Axboe --- block/blk-mq.c | 1 + 1 file changed, 1 insertion(+) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 71f564e8812e..5443aea36542 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1217,6 +1217,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_mq_bio_to_request(rq, bio); blk_mq_start_request(rq, true); + blk_add_timer(rq); /* * For OK queue, we are done. For error, kill it. Any other -- cgit v1.2.3-59-g8ed1b From ed851860b4552fc8963ecf71eab9f6f7a5c19d74 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 30 May 2014 21:20:50 -0600 Subject: blk-mq: push IPI or local end_io decision to __blk_mq_complete_request() We have callers outside of the blk-mq proper (like timeouts) that want to call __blk_mq_complete_request(), so rename the function and put the decision code for whether to use ->softirq_done_fn or blk_mq_endio() into __blk_mq_complete_request(). This also makes the interface more logical again. blk_mq_complete_request() attempts to atomically mark the request completed, and calls __blk_mq_complete_request() if successful. __blk_mq_complete_request() then just ends the request. Signed-off-by: Jens Axboe --- block/blk-mq.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 5443aea36542..0f5879c42dcd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -359,7 +359,7 @@ static void __blk_mq_complete_request_remote(void *data) rq->q->softirq_done_fn(rq); } -void __blk_mq_complete_request(struct request *rq) +static void blk_mq_ipi_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; bool shared = false; @@ -385,6 +385,16 @@ void __blk_mq_complete_request(struct request *rq) put_cpu(); } +void __blk_mq_complete_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + if (!q->softirq_done_fn) + blk_mq_end_io(rq, rq->errors); + else + blk_mq_ipi_complete_request(rq); +} + /** * blk_mq_complete_request - end I/O on a request * @rq: the request being processed @@ -399,12 +409,8 @@ void blk_mq_complete_request(struct request *rq) if (unlikely(blk_should_fake_timeout(q))) return; - if (!blk_mark_rq_complete(rq)) { - if (q->softirq_done_fn) - __blk_mq_complete_request(rq); - else - blk_mq_end_io(rq, rq->errors); - } + if (!blk_mark_rq_complete(rq)) + __blk_mq_complete_request(rq); } EXPORT_SYMBOL(blk_mq_complete_request); -- cgit v1.2.3-59-g8ed1b