aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio.c43
-rw-r--r--block/blk-cgroup.c20
-rw-r--r--block/blk-core.c20
-rw-r--r--block/blk-flush.c5
-rw-r--r--block/blk-iocost.c13
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c18
-rw-r--r--block/blk.h1
-rw-r--r--block/bsg-lib.c2
-rw-r--r--block/compat_ioctl.c16
10 files changed, 95 insertions, 45 deletions
diff --git a/block/bio.c b/block/bio.c
index 9d54aa37ce6c..006bcc52a77e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -538,6 +538,45 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
}
EXPORT_SYMBOL(zero_fill_bio_iter);
+void bio_truncate(struct bio *bio, unsigned new_size)
+{
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ unsigned int done = 0;
+ bool truncated = false;
+
+ if (new_size >= bio->bi_iter.bi_size)
+ return;
+
+ if (bio_data_dir(bio) != READ)
+ goto exit;
+
+ bio_for_each_segment(bv, bio, iter) {
+ if (done + bv.bv_len > new_size) {
+ unsigned offset;
+
+ if (!truncated)
+ offset = new_size - done;
+ else
+ offset = 0;
+ zero_user(bv.bv_page, offset, bv.bv_len - offset);
+ truncated = true;
+ }
+ done += bv.bv_len;
+ }
+
+ exit:
+ /*
+ * Don't touch bvec table here and make it really immutable, since
+ * fs bio user has to retrieve all pages via bio_for_each_segment_all
+ * in its .end_bio() callback.
+ *
+ * It is enough to truncate bio by updating .bi_size since we can make
+ * correct bvec with the updated .bi_size for drivers.
+ */
+ bio->bi_iter.bi_size = new_size;
+}
+
/**
* bio_put - release a reference to a bio
* @bio: bio to release reference to
@@ -754,10 +793,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false;
- if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
+ if (bio->bi_vcnt > 0) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) {
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
+ return false;
bv->bv_len += len;
bio->bi_iter.bi_size += len;
return true;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 708dea92dac8..a229b94d5390 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1062,26 +1062,6 @@ err_unlock:
}
/**
- * blkcg_drain_queue - drain blkcg part of request_queue
- * @q: request_queue to drain
- *
- * Called from blk_drain_queue(). Responsible for draining blkcg part.
- */
-void blkcg_drain_queue(struct request_queue *q)
-{
- lockdep_assert_held(&q->queue_lock);
-
- /*
- * @q could be exiting and already have destroyed all blkgs as
- * indicated by NULL root_blkg. If so, don't confuse policies.
- */
- if (!q->root_blkg)
- return;
-
- blk_throtl_drain(q);
-}
-
-/**
* blkcg_exit_queue - exit and release blkcg part of request_queue
* @q: request_queue being released
*
diff --git a/block/blk-core.c b/block/blk-core.c
index a1e228752083..089e890ab208 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -885,11 +885,14 @@ generic_make_request_checks(struct bio *bio)
}
/*
- * For a REQ_NOWAIT based request, return -EOPNOTSUPP
- * if queue is not a request based queue.
+ * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
+ * with BLK_STS_AGAIN status in order to catch -EAGAIN and
+ * to give a chance to the caller to repeat request gracefully.
*/
- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
- goto not_supported;
+ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
+ status = BLK_STS_AGAIN;
+ goto end_io;
+ }
if (should_fail_bio(bio))
goto end_io;
@@ -1310,7 +1313,7 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
void blk_account_io_completion(struct request *req, unsigned int bytes)
{
- if (blk_do_io_stat(req)) {
+ if (req->part && blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
@@ -1328,7 +1331,8 @@ void blk_account_io_done(struct request *req, u64 now)
* normal IO on queueing nor completion. Accounting the
* containing request is enough.
*/
- if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
+ if (req->part && blk_do_io_stat(req) &&
+ !(req->rq_flags & RQF_FLUSH_SEQ)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
@@ -1792,9 +1796,9 @@ int __init blk_dev_init(void)
{
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
- FIELD_SIZEOF(struct request, cmd_flags));
+ sizeof_field(struct request, cmd_flags));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
- FIELD_SIZEOF(struct bio, bi_opf));
+ sizeof_field(struct bio, bi_opf));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 1777346baf06..3f977c517960 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -69,6 +69,7 @@
#include <linux/blkdev.h>
#include <linux/gfp.h>
#include <linux/blk-mq.h>
+#include <linux/lockdep.h>
#include "blk.h"
#include "blk-mq.h"
@@ -505,6 +506,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
INIT_LIST_HEAD(&fq->flush_queue[1]);
INIT_LIST_HEAD(&fq->flush_data_in_flight);
+ lockdep_register_key(&fq->key);
+ lockdep_set_class(&fq->mq_flush_lock, &fq->key);
+
return fq;
fail_rq:
@@ -519,6 +523,7 @@ void blk_free_flush_queue(struct blk_flush_queue *fq)
if (!fq)
return;
+ lockdep_unregister_key(&fq->key);
kfree(fq->flush_rq);
kfree(fq);
}
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index e01267f99183..27ca68621137 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
+static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
{
struct ioc *ioc = iocg->ioc;
struct blkcg_gq *blkg = iocg_to_blkg(iocg);
@@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
/* clear or maintain depending on the overage */
if (time_before_eq64(vtime, now->vnow)) {
blkcg_clear_delay(blkg);
- return;
+ return false;
}
if (!atomic_read(&blkg->use_delay) &&
time_before_eq64(vtime, now->vnow + vmargin))
- return;
+ return false;
/* use delay */
if (cost) {
@@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
if (hrtimer_is_queued(&iocg->delay_timer) &&
abs(oexpires - expires) <= margin_ns / 4)
- return;
+ return true;
hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
margin_ns / 4, HRTIMER_MODE_ABS);
+ return true;
}
static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
@@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
*/
if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
atomic64_add(abs_cost, &iocg->abs_vdebt);
- iocg_kick_delay(iocg, &now, cost);
+ if (iocg_kick_delay(iocg, &now, cost))
+ blkcg_schedule_throttle(rqos->q,
+ (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
return;
}
diff --git a/block/blk-map.c b/block/blk-map.c
index 3a62e471d81b..b0790268ed9d 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -151,7 +151,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return 0;
unmap_rq:
- __blk_rq_unmap_user(bio);
+ blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
return ret;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index d783bdc4559b..347782a24a35 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -157,16 +157,14 @@ static inline unsigned get_max_io_size(struct request_queue *q,
return sectors & (lbs - 1);
}
-static unsigned get_max_segment_size(const struct request_queue *q,
- unsigned offset)
+static inline unsigned get_max_segment_size(const struct request_queue *q,
+ struct page *start_page,
+ unsigned long offset)
{
unsigned long mask = queue_segment_boundary(q);
- /* default segment boundary mask means no boundary limit */
- if (mask == BLK_SEG_BOUNDARY_MASK)
- return queue_max_segment_size(q);
-
- return min_t(unsigned long, mask - (mask & offset) + 1,
+ offset = mask & (page_to_phys(start_page) + offset);
+ return min_t(unsigned long, mask - offset + 1,
queue_max_segment_size(q));
}
@@ -201,7 +199,8 @@ static bool bvec_split_segs(const struct request_queue *q,
unsigned seg_size = 0;
while (len && *nsegs < max_segs) {
- seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
+ seg_size = get_max_segment_size(q, bv->bv_page,
+ bv->bv_offset + total_len);
seg_size = min(seg_size, len);
(*nsegs)++;
@@ -419,7 +418,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
- unsigned len = min(get_max_segment_size(q, offset), nbytes);
+ unsigned len = min(get_max_segment_size(q, bvec->bv_page,
+ offset), nbytes);
struct page *page = bvec->bv_page;
/*
diff --git a/block/blk.h b/block/blk.h
index 6842f28c033e..0b8884353f6b 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -30,6 +30,7 @@ struct blk_flush_queue {
* at the same time
*/
struct request *orig_rq;
+ struct lock_class_key key;
spinlock_t mq_flush_lock;
};
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 347dda16c2f4..6cbb7926534c 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -266,7 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq;
struct bsg_set *bset =
container_of(q->tag_set, struct bsg_set, tag_set);
- int sts = BLK_STS_IOERR;
+ blk_status_t sts = BLK_STS_IOERR;
int ret;
blk_mq_start_request(req);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 6ca015f92766..3ed7a0f144a9 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -6,6 +6,7 @@
#include <linux/compat.h>
#include <linux/elevator.h>
#include <linux/hdreg.h>
+#include <linux/pr.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/types.h>
@@ -354,6 +355,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
* but we call blkdev_ioctl, which gets the lock for us
*/
case BLKRRPART:
+ case BLKREPORTZONE:
+ case BLKRESETZONE:
+ case BLKOPENZONE:
+ case BLKCLOSEZONE:
+ case BLKFINISHZONE:
+ case BLKGETZONESZ:
+ case BLKGETNRZONES:
return blkdev_ioctl(bdev, mode, cmd,
(unsigned long)compat_ptr(arg));
case BLKBSZSET_32:
@@ -401,6 +409,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKTRACETEARDOWN: /* compatible */
ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
return ret;
+ case IOC_PR_REGISTER:
+ case IOC_PR_RESERVE:
+ case IOC_PR_RELEASE:
+ case IOC_PR_PREEMPT:
+ case IOC_PR_PREEMPT_ABORT:
+ case IOC_PR_CLEAR:
+ return blkdev_ioctl(bdev, mode, cmd,
+ (unsigned long)compat_ptr(arg));
default:
if (disk->fops->compat_ioctl)
ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);