aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c4
-rw-r--r--block/blk-core.c30
-rw-r--r--block/blk-map.c8
-rw-r--r--block/blk-merge.c21
-rw-r--r--block/blk-settings.c4
-rw-r--r--block/blk-timeout.c20
-rw-r--r--block/bsg.c2
-rw-r--r--block/compat_ioctl.c31
-rw-r--r--block/elevator.c7
-rw-r--r--block/genhd.c4
-rw-r--r--block/ioctl.c7
-rw-r--r--block/scsi_ioctl.c2
12 files changed, 82 insertions, 58 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 5c99ff8d2db8..6e72d661ae42 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -161,7 +161,7 @@ static inline struct request *start_ordered(struct request_queue *q,
/*
* Prep proxy barrier request.
*/
- blkdev_dequeue_request(rq);
+ elv_dequeue_request(q, rq);
q->orig_bar_rq = rq;
rq = &q->bar_rq;
blk_rq_init(q, rq);
@@ -219,7 +219,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
* This can happen when the queue switches to
* ORDERED_NONE while this request is on it.
*/
- blkdev_dequeue_request(rq);
+ elv_dequeue_request(q, rq);
if (__blk_end_request(rq, -EOPNOTSUPP,
blk_rq_bytes(rq)))
BUG();
diff --git a/block/blk-core.c b/block/blk-core.c
index c3df30cfb3fc..c36aa98fafa3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -592,7 +592,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1 << QUEUE_FLAG_STACKABLE);
q->queue_lock = lock;
- blk_queue_segment_boundary(q, 0xffffffff);
+ blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
blk_queue_make_request(q, __make_request);
blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
@@ -1637,6 +1637,28 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
/**
+ * blkdev_dequeue_request - dequeue request and start timeout timer
+ * @req: request to dequeue
+ *
+ * Dequeue @req and start timeout timer on it. This hands off the
+ * request to the driver.
+ *
+ * Block internal functions which don't want to start timer should
+ * call elv_dequeue_request().
+ */
+void blkdev_dequeue_request(struct request *req)
+{
+ elv_dequeue_request(req->q, req);
+
+ /*
+ * We are now handing the request to the hardware, add the
+ * timeout handler.
+ */
+ blk_add_timer(req);
+}
+EXPORT_SYMBOL(blkdev_dequeue_request);
+
+/**
* __end_that_request_first - end I/O on a request
* @req: the request being processed
* @error: %0 for success, < %0 for error
@@ -1770,17 +1792,17 @@ static void end_that_request_last(struct request *req, int error)
{
struct gendisk *disk = req->rq_disk;
- blk_delete_timer(req);
-
if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req);
if (blk_queued_rq(req))
- blkdev_dequeue_request(req);
+ elv_dequeue_request(req->q, req);
if (unlikely(laptop_mode) && blk_fs_request(req))
laptop_io_completion();
+ blk_delete_timer(req);
+
/*
* Account IO completion. bar_rq isn't accounted as a normal
* IO on queueing nor completion. Accounting the containing
diff --git a/block/blk-map.c b/block/blk-map.c
index 4849fa36161e..2990447f45e9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -217,8 +217,14 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return PTR_ERR(bio);
if (bio->bi_size != len) {
+ /*
+ * Grab an extra reference to this bio, as bio_unmap_user()
+ * expects to be able to drop it twice as it happens on the
+ * normal IO completion path
+ */
+ bio_get(bio);
bio_endio(bio, 0);
- bio_unmap_user(bio);
+ __blk_rq_unmap_user(bio);
return -EINVAL;
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8681cd6f9911..b92f5b0866b0 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -222,27 +222,6 @@ new_segment:
}
EXPORT_SYMBOL(blk_rq_map_sg);
-static inline int ll_new_mergeable(struct request_queue *q,
- struct request *req,
- struct bio *bio)
-{
- int nr_phys_segs = bio_phys_segments(q, bio);
-
- if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
- req->cmd_flags |= REQ_NOMERGE;
- if (req == q->last_merge)
- q->last_merge = NULL;
- return 0;
- }
-
- /*
- * A hw segment is just getting larger, bump just the phys
- * counter.
- */
- req->nr_phys_segments += nr_phys_segs;
- return 1;
-}
-
static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 41392fbe19ff..afa55e14e278 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -125,6 +125,9 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->nr_requests = BLKDEV_MAX_RQ;
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+ blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
+ blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
+
q->make_request_fn = mfn;
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@@ -314,6 +317,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
/* zero is "infinity" */
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+ t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 972a63f848fb..69185ea9fae2 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -75,14 +75,7 @@ void blk_delete_timer(struct request *req)
{
struct request_queue *q = req->q;
- /*
- * Nothing to detach
- */
- if (!q->rq_timed_out_fn || !req->deadline)
- return;
-
list_del_init(&req->timeout_list);
-
if (list_empty(&q->timeout_list))
del_timer(&q->timeout);
}
@@ -142,7 +135,7 @@ void blk_rq_timed_out_timer(unsigned long data)
}
if (next_set && !list_empty(&q->timeout_list))
- mod_timer(&q->timeout, round_jiffies(next));
+ mod_timer(&q->timeout, round_jiffies_up(next));
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -198,17 +191,10 @@ void blk_add_timer(struct request *req)
/*
* If the timer isn't already pending or this timeout is earlier
- * than an existing one, modify the timer. Round to next nearest
+ * than an existing one, modify the timer. Round up to next nearest
* second.
*/
- expiry = round_jiffies(req->deadline);
-
- /*
- * We use ->deadline == 0 to detect whether a timer was added or
- * not, so just increase to next jiffy for that specific case
- */
- if (unlikely(!req->deadline))
- req->deadline = 1;
+ expiry = round_jiffies_up(req->deadline);
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires))
diff --git a/block/bsg.c b/block/bsg.c
index e8bd2475682a..e73e50daf3d0 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -202,6 +202,8 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+ rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0;
}
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 3d3e7a46f38c..67eb93cff699 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -677,6 +677,29 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
case DVD_WRITE_STRUCT:
case DVD_AUTH:
arg = (unsigned long)compat_ptr(arg);
+ /* These intepret arg as an unsigned long, not as a pointer,
+ * so we must not do compat_ptr() conversion. */
+ case HDIO_SET_MULTCOUNT:
+ case HDIO_SET_UNMASKINTR:
+ case HDIO_SET_KEEPSETTINGS:
+ case HDIO_SET_32BIT:
+ case HDIO_SET_NOWERR:
+ case HDIO_SET_DMA:
+ case HDIO_SET_PIO_MODE:
+ case HDIO_SET_NICE:
+ case HDIO_SET_WCACHE:
+ case HDIO_SET_ACOUSTIC:
+ case HDIO_SET_BUSSTATE:
+ case HDIO_SET_ADDRESS:
+ case CDROMEJECT_SW:
+ case CDROM_SET_OPTIONS:
+ case CDROM_CLEAR_OPTIONS:
+ case CDROM_SELECT_SPEED:
+ case CDROM_SELECT_DISC:
+ case CDROM_MEDIA_CHANGED:
+ case CDROM_DRIVE_STATUS:
+ case CDROM_LOCKDOOR:
+ case CDROM_DEBUG:
break;
default:
/* unknown ioctl number */
@@ -699,8 +722,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
struct backing_dev_info *bdi;
loff_t size;
+ /*
+ * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
+ * to updated it before every ioctl.
+ */
if (file->f_flags & O_NDELAY)
- mode |= FMODE_NDELAY_NOW;
+ mode |= FMODE_NDELAY;
+ else
+ mode &= ~FMODE_NDELAY;
switch (cmd) {
case HDIO_GETGEO:
diff --git a/block/elevator.c b/block/elevator.c
index 59173a69ebdf..a6951f76ba0c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -773,12 +773,6 @@ struct request *elv_next_request(struct request_queue *q)
*/
rq->cmd_flags |= REQ_STARTED;
blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
-
- /*
- * We are now handing the request to the hardware,
- * add the timeout handler
- */
- blk_add_timer(rq);
}
if (!q->boundary_rq || q->boundary_rq == rq) {
@@ -851,7 +845,6 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
if (blk_account_rq(rq))
q->in_flight++;
}
-EXPORT_SYMBOL(elv_dequeue_request);
int elv_queue_empty(struct request_queue *q)
{
diff --git a/block/genhd.c b/block/genhd.c
index 4e5e7493f676..2f7feda61e35 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -768,6 +768,8 @@ static int __init genhd_device_init(void)
bdev_map = kobj_map_init(base_probe, &block_class_lock);
blk_dev_init();
+ register_blkdev(BLOCK_EXT_MAJOR, "blkext");
+
#ifndef CONFIG_SYSFS_DEPRECATED
/* create top-level block dir */
block_depr = kobject_create_and_add("block", NULL);
@@ -1100,6 +1102,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
kfree(disk);
return NULL;
}
+ disk->node_id = node_id;
if (disk_expand_part_tbl(disk, 0)) {
free_part_stats(&disk->part0);
kfree(disk);
@@ -1114,7 +1117,6 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
device_initialize(disk_to_dev(disk));
INIT_WORK(&disk->async_notify,
media_change_notify_thread);
- disk->node_id = node_id;
}
return disk;
}
diff --git a/block/ioctl.c b/block/ioctl.c
index c832d639b6e2..d03985b04d67 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -18,7 +18,6 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
struct disk_part_iter piter;
long long start, length;
int partno;
- int err;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -61,10 +60,10 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
disk_part_iter_exit(&piter);
/* all seems OK */
- err = add_partition(disk, partno, start, length,
- ADDPART_FLAG_NONE);
+ part = add_partition(disk, partno, start, length,
+ ADDPART_FLAG_NONE);
mutex_unlock(&bdev->bd_mutex);
- return err;
+ return IS_ERR(part) ? PTR_ERR(part) : 0;
case BLKPG_DEL_PARTITION:
part = disk_get_part(disk, partno);
if (!part)
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 5963cf91a3a0..d0bb92cbefb9 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -208,6 +208,8 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+ rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0;
}