aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 08:27:23 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-17 08:27:23 -0800
commit60da5bf47dd3d301a1d3bd4f0a4b9e29a184515c (patch)
tree30de83370440aae5350d9ab3fbe6583abd439ee8 /include
parentMerge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux (diff)
parentblock: export block_unplug tracepoint (diff)
downloadlinux-dev-60da5bf47dd3d301a1d3bd4f0a4b9e29a184515c.tar.xz
linux-dev-60da5bf47dd3d301a1d3bd4f0a4b9e29a184515c.zip
Merge branch 'for-3.8/core' of git://git.kernel.dk/linux-block
Pull block layer core updates from Jens Axboe: "Here are the core block IO bits for 3.8. The branch contains: - The final version of the surprise device removal fixups from Bart. - Don't hide EFI partitions under advanced partition types. It's fairly wide spread these days. This is especially dangerous for systems that have both msdos and efi partition tables, where you want to keep them in sync. - Cleanup of using -1 instead of the proper NUMA_NO_NODE - Export control of bdi flusher thread CPU mask and default to using the home node (if known) from Jeff. - Export unplug tracepoint for MD. - Core improvements from Shaohua. Reinstate the recursive merge, as the original bug has been fixed. Add plugging for discard and also fix a problem handling non pow-of-2 discard limits. There's a trivial merge in block/blk-exec.c due to a fix that went into 3.7-rc at a later point than -rc4 where this is based." * 'for-3.8/core' of git://git.kernel.dk/linux-block: block: export block_unplug tracepoint block: add plug for blkdev_issue_discard block: discard granularity might not be power of 2 deadline: Allow 0ms deadline latency, increase the read speed partitions: enable EFI/GPT support by default bsg: Remove unused function bsg_goose_queue() block: Make blk_cleanup_queue() wait until request_fn finished block: Avoid scheduling delayed work on a dead queue block: Avoid that request_fn is invoked on a dead queue block: Let blk_drain_queue() caller obtain the queue lock block: Rename queue dead flag bdi: add a user-tunable cpu_list for the bdi flusher threads block: use NUMA_NO_NODE instead of -1 block: recursive merge requests block CFQ: avoid moving request to different queue
Diffstat (limited to 'include')
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/blkdev.h17
-rw-r--r--include/linux/bsg-lib.h1
3 files changed, 17 insertions, 5 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 2a9a9abc9126..238521a19849 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -18,6 +18,7 @@
#include <linux/writeback.h>
#include <linux/atomic.h>
#include <linux/sysctl.h>
+#include <linux/mutex.h>
struct page;
struct device;
@@ -105,6 +106,9 @@ struct backing_dev_info {
struct timer_list laptop_mode_wb_timer;
+ cpumask_t *flusher_cpumask; /* used for writeback thread scheduling */
+ struct mutex flusher_cpumask_lock;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir;
struct dentry *debug_stats;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1756001210d2..acb4f7bbbd32 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -378,6 +378,12 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
+ /*
+ * Number of active block driver functions for which blk_drain_queue()
+ * must wait. Must be incremented around functions that unlock the
+ * queue_lock internally, e.g. scsi_request_fn().
+ */
+ unsigned int request_fn_active;
unsigned int rq_timeout;
struct timer_list timeout;
@@ -437,7 +443,7 @@ struct request_queue {
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
-#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
+#define QUEUE_FLAG_DYING 5 /* queue being torn down */
#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
@@ -452,6 +458,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
+#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -521,6 +528,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -1180,13 +1188,14 @@ static inline int queue_discard_alignment(struct request_queue *q)
static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
{
- unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+ sector_t alignment = sector << 9;
+ alignment = sector_div(alignment, lim->discard_granularity);
if (!lim->max_discard_sectors)
return 0;
- return (lim->discard_granularity + lim->discard_alignment - alignment)
- & (lim->discard_granularity - 1);
+ alignment = lim->discard_granularity + lim->discard_alignment - alignment;
+ return sector_div(alignment, lim->discard_granularity);
}
static inline int bdev_discard_alignment(struct block_device *bdev)
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 4d0fb3df2f4a..a226652a5a6c 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -67,6 +67,5 @@ void bsg_job_done(struct bsg_job *job, int result,
int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
bsg_job_fn *job_fn, int dd_job_size);
void bsg_request_fn(struct request_queue *q);
-void bsg_goose_queue(struct request_queue *q);
#endif