aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c27
-rw-r--r--block/blk-settings.c43
-rw-r--r--block/cfq-iosched.c7
3 files changed, 47 insertions, 30 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index d17d71c71d4f..b06cf5c2a829 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -26,7 +26,6 @@
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
-#include <linux/blktrace_api.h>
#include <linux/fault-inject.h>
#define CREATE_TRACE_POINTS
@@ -498,6 +497,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
+ q->backing_dev_info.ra_pages =
+ (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+ q->backing_dev_info.state = 0;
+ q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+
err = bdi_init(&q->backing_dev_info);
if (err) {
kmem_cache_free(blk_requestq_cachep, q);
@@ -884,9 +888,10 @@ EXPORT_SYMBOL(blk_get_request);
/**
* blk_make_request - given a bio, allocate a corresponding struct request.
- *
+ * @q: target request queue
* @bio: The bio describing the memory mappings that will be submitted for IO.
* It may be a chained-bio properly constructed by block/bio layer.
+ * @gfp_mask: gfp flags to be used for memory allocation
*
* blk_make_request is the parallel of generic_make_request for BLOCK_PC
* type commands. Where the struct request needs to be farther initialized by
@@ -1872,14 +1877,14 @@ EXPORT_SYMBOL(blk_fetch_request);
/**
* blk_update_request - Special helper function for request stacking drivers
- * @rq: the request being processed
+ * @req: the request being processed
* @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete @rq
+ * @nr_bytes: number of bytes to complete @req
*
* Description:
- * Ends I/O on a number of bytes attached to @rq, but doesn't complete
- * the request structure even if @rq doesn't have leftover.
- * If @rq has leftover, sets it up for the next range of segments.
+ * Ends I/O on a number of bytes attached to @req, but doesn't complete
+ * the request structure even if @req doesn't have leftover.
+ * If @req has leftover, sets it up for the next range of segments.
*
* This special helper function is only for request stacking drivers
* (e.g. request-based dm) so that they can handle partial completion.
@@ -2145,7 +2150,7 @@ EXPORT_SYMBOL_GPL(blk_end_request);
/**
* blk_end_request_all - Helper function for drives to finish the request.
* @rq: the request to finish
- * @err: %0 for success, < %0 for error
+ * @error: %0 for success, < %0 for error
*
* Description:
* Completely finish @rq.
@@ -2166,7 +2171,7 @@ EXPORT_SYMBOL_GPL(blk_end_request_all);
/**
* blk_end_request_cur - Helper function to finish the current request chunk.
* @rq: the request to finish the current chunk for
- * @err: %0 for success, < %0 for error
+ * @error: %0 for success, < %0 for error
*
* Description:
* Complete the current consecutively mapped chunk from @rq.
@@ -2203,7 +2208,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request);
/**
* __blk_end_request_all - Helper function for drives to finish the request.
* @rq: the request to finish
- * @err: %0 for success, < %0 for error
+ * @error: %0 for success, < %0 for error
*
* Description:
* Completely finish @rq. Must be called with queue lock held.
@@ -2224,7 +2229,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request_all);
/**
* __blk_end_request_cur - Helper function to finish the current request chunk.
* @rq: the request to finish the current chunk for
- * @err: %0 for success, < %0 for error
+ * @error: %0 for success, < %0 for error
*
* Description:
* Complete the current consecutively mapped chunk from @rq. Must
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 1c4df9bf6813..7541ea4bf9fe 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -96,6 +96,31 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
/**
+ * blk_set_default_limits - reset limits to default values
+ * @limits: the queue_limits structure to reset
+ *
+ * Description:
+ * Returns a queue_limit struct to its default state. Can be used by
+ * stacking drivers like DM that stage table swaps and reuse an
+ * existing device queue.
+ */
+void blk_set_default_limits(struct queue_limits *lim)
+{
+ lim->max_phys_segments = MAX_PHYS_SEGMENTS;
+ lim->max_hw_segments = MAX_HW_SEGMENTS;
+ lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
+ lim->max_segment_size = MAX_SEGMENT_SIZE;
+ lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS;
+ lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
+ lim->bounce_pfn = BLK_BOUNCE_ANY;
+ lim->alignment_offset = 0;
+ lim->io_opt = 0;
+ lim->misaligned = 0;
+ lim->no_cluster = 0;
+}
+EXPORT_SYMBOL(blk_set_default_limits);
+
+/**
* blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected
* @mfn: the alternate make_request function
@@ -123,18 +148,8 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
* set defaults
*/
q->nr_requests = BLKDEV_MAX_RQ;
- blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
- blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
- blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
- blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
q->make_request_fn = mfn;
- q->backing_dev_info.ra_pages =
- (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
- q->backing_dev_info.state = 0;
- q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
- blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
- blk_queue_logical_block_size(q, 512);
blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q);
q->nr_batching = BLK_BATCH_REQ;
@@ -147,6 +162,8 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->unplug_timer.function = blk_unplug_timeout;
q->unplug_timer.data = (unsigned long)q;
+ blk_set_default_limits(&q->limits);
+
/*
* by default assume old behaviour and bounce for any highmem page
*/
@@ -343,7 +360,7 @@ EXPORT_SYMBOL(blk_queue_physical_block_size);
/**
* blk_queue_alignment_offset - set physical block alignment offset
* @q: the request queue for the device
- * @alignment: alignment offset in bytes
+ * @offset: alignment offset in bytes
*
* Description:
* Some devices are naturally misaligned to compensate for things like
@@ -362,7 +379,7 @@ EXPORT_SYMBOL(blk_queue_alignment_offset);
/**
* blk_queue_io_min - set minimum request size for the queue
* @q: the request queue for the device
- * @io_min: smallest I/O size in bytes
+ * @min: smallest I/O size in bytes
*
* Description:
* Some devices have an internal block size bigger than the reported
@@ -385,7 +402,7 @@ EXPORT_SYMBOL(blk_queue_io_min);
/**
* blk_queue_io_opt - set optimal request size for the queue
* @q: the request queue for the device
- * @io_opt: optimal request size in bytes
+ * @opt: optimal request size in bytes
*
* Description:
* Drivers can call this function to set the preferred I/O request
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ef2f72d42434..833ec18eaa63 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -122,7 +122,6 @@ struct cfq_data {
struct cfq_queue *async_idle_cfqq;
sector_t last_position;
- unsigned long last_end_request;
/*
* tunables, see top of file
@@ -1253,7 +1252,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
BUG_ON(cfqd->busy_queues);
- cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
+ cfq_log(cfqd, "forced_dispatch=%d", dispatched);
return dispatched;
}
@@ -2164,9 +2163,6 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
if (cfq_cfqq_sync(cfqq))
cfqd->sync_flight--;
- if (!cfq_class_idle(cfqq))
- cfqd->last_end_request = now;
-
if (sync)
RQ_CIC(rq)->last_end_request = now;
@@ -2479,7 +2475,6 @@ static void *cfq_init_queue(struct request_queue *q)
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
- cfqd->last_end_request = jiffies;
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];