aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bfq-cgroup.c6
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c94
-rw-r--r--block/blk-mq-cpumap.c10
-rw-r--r--block/blk-mq-pci.c2
-rw-r--r--block/blk-mq-rdma.c4
-rw-r--r--block/blk-mq-sched.c30
-rw-r--r--block/blk-mq-sched.h1
-rw-r--r--block/blk-mq-virtio.c4
-rw-r--r--block/blk-mq.c5
-rw-r--r--block/blk-rq-qos.c7
-rw-r--r--block/blk-sysfs.c47
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h11
-rw-r--r--block/bsg-lib.c1
-rw-r--r--block/elevator.c2
-rw-r--r--block/genhd.c4
-rw-r--r--block/partitions/ldm.c2
18 files changed, 124 insertions, 110 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index b3796a40a61a..59f46904cb11 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -1046,7 +1046,8 @@ struct blkcg_policy blkcg_policy_bfq = {
struct cftype bfq_blkcg_legacy_files[] = {
{
.name = "bfq.weight",
- .flags = CFTYPE_NOT_ON_ROOT,
+ .link_name = "weight",
+ .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
.seq_show = bfq_io_show_weight,
.write_u64 = bfq_io_set_weight_legacy,
},
@@ -1166,7 +1167,8 @@ struct cftype bfq_blkcg_legacy_files[] = {
struct cftype bfq_blkg_files[] = {
{
.name = "bfq.weight",
- .flags = CFTYPE_NOT_ON_ROOT,
+ .link_name = "weight",
+ .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
.seq_show = bfq_io_show_weight,
.write = bfq_io_set_weight,
},
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b97b479e4f64..1f7127b03490 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -881,7 +881,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
blkg_free(new_blkg);
} else {
blkg = blkg_create(pos, q, new_blkg);
- if (unlikely(IS_ERR(blkg))) {
+ if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
goto fail_unlock;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 1bf83a0df0f6..8340f69670d8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -282,35 +282,6 @@ void blk_set_queue_dying(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
-void blk_exit_queue(struct request_queue *q)
-{
- /*
- * Since the I/O scheduler exit code may access cgroup information,
- * perform I/O scheduler exit before disassociating from the block
- * cgroup controller.
- */
- if (q->elevator) {
- ioc_clear_queue(q);
- elevator_exit(q, q->elevator);
- q->elevator = NULL;
- }
-
- /*
- * Remove all references to @q from the block cgroup controller before
- * restoring @q->queue_lock to avoid that restoring this pointer causes
- * e.g. blkcg_print_blkgs() to crash.
- */
- blkcg_exit_queue(q);
-
- /*
- * Since the cgroup code may dereference the @q->backing_dev_info
- * pointer, only decrease its reference count after having removed the
- * association with the block cgroup controller.
- */
- bdi_put(q->backing_dev_info);
-}
-
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -346,17 +317,22 @@ void blk_cleanup_queue(struct request_queue *q)
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
- /*
- * I/O scheduler exit is only safe after the sysfs scheduler attribute
- * has been removed.
- */
- WARN_ON_ONCE(q->kobj.state_in_sysfs);
-
- blk_exit_queue(q);
-
if (queue_is_mq(q))
blk_mq_exit_queue(q);
+ /*
+ * In theory, request pool of sched_tags belongs to request queue.
+ * However, the current implementation requires tag_set for freeing
+ * requests, so free the pool now.
+ *
+ * Queue has become frozen, there can't be any in-queue requests, so
+ * it is safe to free requests now.
+ */
+ mutex_lock(&q->sysfs_lock);
+ if (q->elevator)
+ blk_mq_sched_free_requests(q);
+ mutex_unlock(&q->sysfs_lock);
+
percpu_ref_exit(&q->q_usage_counter);
/* @q is and will stay empty, shutdown and put */
@@ -994,22 +970,8 @@ blk_qc_t generic_make_request(struct bio *bio)
* yet.
*/
struct bio_list bio_list_on_stack[2];
- blk_mq_req_flags_t flags = 0;
- struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
- if (bio->bi_opf & REQ_NOWAIT)
- flags = BLK_MQ_REQ_NOWAIT;
- if (bio_flagged(bio, BIO_QUEUE_ENTERED))
- blk_queue_enter_live(q);
- else if (blk_queue_enter(q, flags) < 0) {
- if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
- bio_wouldblock_error(bio);
- else
- bio_io_error(bio);
- return ret;
- }
-
if (!generic_make_request_checks(bio))
goto out;
@@ -1046,22 +1008,11 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
- bool enter_succeeded = true;
-
- if (unlikely(q != bio->bi_disk->queue)) {
- if (q)
- blk_queue_exit(q);
- q = bio->bi_disk->queue;
- flags = 0;
- if (bio->bi_opf & REQ_NOWAIT)
- flags = BLK_MQ_REQ_NOWAIT;
- if (blk_queue_enter(q, flags) < 0) {
- enter_succeeded = false;
- q = NULL;
- }
- }
+ struct request_queue *q = bio->bi_disk->queue;
+ blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
+ BLK_MQ_REQ_NOWAIT : 0;
- if (enter_succeeded) {
+ if (likely(blk_queue_enter(q, flags) == 0)) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
@@ -1069,6 +1020,8 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
+ blk_queue_exit(q);
+
/* sort new bios into those for a lower level
* and those for the same level
*/
@@ -1095,8 +1048,6 @@ blk_qc_t generic_make_request(struct bio *bio)
current->bio_list = NULL; /* deactivate */
out:
- if (q)
- blk_queue_exit(q);
return ret;
}
EXPORT_SYMBOL(generic_make_request);
@@ -1200,7 +1151,9 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
struct request *rq)
{
if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
- printk(KERN_ERR "%s: over max size limit.\n", __func__);
+ printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
+ __func__, blk_rq_sectors(rq),
+ blk_queue_get_max_sectors(q, req_op(rq)));
return -EIO;
}
@@ -1212,7 +1165,8 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
*/
blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > queue_max_segments(q)) {
- printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+ printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
+ __func__, rq->nr_phys_segments, queue_max_segments(q));
return -EIO;
}
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 48bebf00a5f3..f945621a0e8f 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -42,8 +42,8 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
/*
* First do sequential mapping between CPUs and queues.
* In case we still have CPUs to map, and we have some number of
- * threads per cores then map sibling threads to the same queue for
- * performace optimizations.
+ * threads per cores then map sibling threads to the same queue
+ * for performance optimizations.
*/
if (cpu < nr_queues) {
map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu);
@@ -60,7 +60,11 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
}
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
-/*
+/**
+ * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
+ * @qmap: CPU to hardware queue map.
+ * @index: hardware queue index.
+ *
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index ad4545a2a98b..b595a94c4d16 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -13,7 +13,7 @@
/**
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
- * @set: tagset to provide the mapping for
+ * @qmap: CPU to hardware queue map.
* @pdev: PCI device associated with @set.
* @offset: Offset to use for the pci irq vector
*
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index cc921e6ba709..14f968e58b8f 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -8,8 +8,8 @@
/**
* blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
- * @set: tagset to provide the mapping for
- * @dev: rdma device associated with @set.
+ * @map: CPU to hardware queue map.
+ * @dev: rdma device to provide a mapping for.
* @first_vec: first interrupt vectors to use for queues (usually 0)
*
* This function assumes the rdma device @dev has at least as many available
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 74c6bb871f7e..500cb04901cc 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -475,14 +475,18 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
return ret;
}
+/* called in queue's release handler, tagset has gone away */
static void blk_mq_sched_tags_teardown(struct request_queue *q)
{
- struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_free_tags(set, hctx, i);
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags) {
+ blk_mq_free_rq_map(hctx->sched_tags);
+ hctx->sched_tags = NULL;
+ }
+ }
}
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
@@ -523,6 +527,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
ret = e->ops.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
+ blk_mq_sched_free_requests(q);
blk_mq_exit_sched(q, eq);
kobject_put(&eq->kobj);
return ret;
@@ -534,11 +539,30 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return 0;
err:
+ blk_mq_sched_free_requests(q);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
return ret;
}
+/*
+ * called in either blk_queue_cleanup or elevator_switch, tagset
+ * is required for freeing requests
+ */
+void blk_mq_sched_free_requests(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ lockdep_assert_held(&q->sysfs_lock);
+ WARN_ON(!q->elevator);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags)
+ blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
+ }
+}
+
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
struct blk_mq_hw_ctx *hctx;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index c7bdb52367ac..3cf92cbbd8ac 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -28,6 +28,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+void blk_mq_sched_free_requests(struct request_queue *q);
static inline bool
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index 75a52c18a8f6..488341628256 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -11,8 +11,8 @@
/**
* blk_mq_virtio_map_queues - provide a default queue mapping for virtio device
- * @set: tagset to provide the mapping for
- * @vdev: virtio device associated with @set.
+ * @qmap: CPU to hardware queue map.
+ * @vdev: virtio device to provide a mapping for.
* @first_vec: first interrupt vectors to use for queues (usually 0)
*
* This function assumes the virtio device @vdev has at least as many available
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 32b8ad3d341b..ce0f5f4ede70 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2865,7 +2865,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
goto err_exit;
if (blk_mq_alloc_ctxs(q))
- goto err_exit;
+ goto err_poll;
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
@@ -2929,6 +2929,9 @@ err_hctxs:
kfree(q->queue_hw_ctx);
err_sys_init:
blk_mq_sysfs_deinit(q);
+err_poll:
+ blk_stat_free_callback(q->poll_cb);
+ q->poll_cb = NULL;
err_exit:
q->mq_ops = NULL;
return ERR_PTR(-ENOMEM);
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 3f55b56f24bc..659ccb8b693f 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -209,9 +209,10 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
/**
* rq_qos_wait - throttle on a rqw if we need to
- * @private_data - caller provided specific data
- * @acquire_inflight_cb - inc the rqw->inflight counter if we can
- * @cleanup_cb - the callback to cleanup in case we race with a waker
+ * @rqw: rqw to throttle on
+ * @private_data: caller provided specific data
+ * @acquire_inflight_cb: inc the rqw->inflight counter if we can
+ * @cleanup_cb: the callback to cleanup in case we race with a waker
*
* This provides a uniform place for the rq_qos users to do their throttling.
* Since you can end up with a lot of things sleeping at once, this manages the
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index a16a02c52a85..977c659dcd18 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -840,6 +840,36 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
kmem_cache_free(blk_requestq_cachep, q);
}
+/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
+static void blk_exit_queue(struct request_queue *q)
+{
+ /*
+ * Since the I/O scheduler exit code may access cgroup information,
+ * perform I/O scheduler exit before disassociating from the block
+ * cgroup controller.
+ */
+ if (q->elevator) {
+ ioc_clear_queue(q);
+ __elevator_exit(q, q->elevator);
+ q->elevator = NULL;
+ }
+
+ /*
+ * Remove all references to @q from the block cgroup controller before
+ * restoring @q->queue_lock to avoid that restoring this pointer causes
+ * e.g. blkcg_print_blkgs() to crash.
+ */
+ blkcg_exit_queue(q);
+
+ /*
+ * Since the cgroup code may dereference the @q->backing_dev_info
+ * pointer, only decrease its reference count after having removed the
+ * association with the block cgroup controller.
+ */
+ bdi_put(q->backing_dev_info);
+}
+
+
/**
* __blk_release_queue - release a request queue
* @work: pointer to the release_work member of the request queue to be released
@@ -860,23 +890,10 @@ static void __blk_release_queue(struct work_struct *work)
blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb);
- if (!blk_queue_dead(q)) {
- /*
- * Last reference was dropped without having called
- * blk_cleanup_queue().
- */
- WARN_ONCE(blk_queue_init_done(q),
- "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
- q);
- blk_exit_queue(q);
- }
-
- WARN(blk_queue_root_blkg(q),
- "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
- q);
-
blk_free_queue_stats(q->stats);
+ blk_exit_queue(q);
+
blk_queue_free_zone_bitmaps(q);
if (queue_is_mq(q))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1b97a73d2fb1..9ea7c0ecad10 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1220,7 +1220,7 @@ static bool throtl_can_upgrade(struct throtl_data *td,
struct throtl_grp *this_tg);
/**
* throtl_pending_timer_fn - timer function for service_queue->pending_timer
- * @arg: the throtl_service_queue being serviced
+ * @t: the pending_timer member of the throtl_service_queue being serviced
*
* This timer is armed when a child throtl_grp with active bio's become
* pending and queued on the service_queue's pending_tree and expires when
diff --git a/block/blk.h b/block/blk.h
index e27fd1512e4b..7814aa207153 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -6,6 +6,7 @@
#include <linux/blk-mq.h>
#include <xen/xen.h>
#include "blk-mq.h"
+#include "blk-mq-sched.h"
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
@@ -50,7 +51,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
-void blk_exit_queue(struct request_queue *q);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_freeze_queue(struct request_queue *q);
@@ -177,10 +177,17 @@ void blk_insert_flush(struct request *rq);
int elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
-void elevator_exit(struct request_queue *, struct elevator_queue *);
+void __elevator_exit(struct request_queue *, struct elevator_queue *);
int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q);
+static inline void elevator_exit(struct request_queue *q,
+ struct elevator_queue *e)
+{
+ blk_mq_sched_free_requests(q);
+ __elevator_exit(q, e);
+}
+
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
#ifdef CONFIG_FAIL_IO_TIMEOUT
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index b898a1cdf872..785dd58947f1 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -354,6 +354,7 @@ static const struct blk_mq_ops bsg_mq_ops = {
* @dev: device to attach bsg device to
* @name: device to give bsg device
* @job_fn: bsg job handler
+ * @timeout: timeout handler function pointer
* @dd_job_size: size of LLD data needed for each job
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
diff --git a/block/elevator.c b/block/elevator.c
index ec55d5fc0b3e..2f17d66d0e61 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -178,7 +178,7 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
-void elevator_exit(struct request_queue *q, struct elevator_queue *e)
+void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
if (e->type->ops.exit_sched)
diff --git a/block/genhd.c b/block/genhd.c
index ad6826628e79..24654e1d83e6 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -532,8 +532,8 @@ void blk_free_devt(dev_t devt)
}
}
-/**
- * We invalidate devt by assigning NULL pointer for devt in idr.
+/*
+ * We invalidate devt by assigning NULL pointer for devt in idr.
*/
void blk_invalidate_devt(dev_t devt)
{
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 6db573f33219..fe5d970e2e60 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -19,7 +19,7 @@
#include "check.h"
#include "msdos.h"
-/**
+/*
* ldm_debug/info/error/crit - Output an error message
* @f: A printf format string containing the message
* @...: Variables to substitute into @f