aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio.c11
-rw-r--r--block/blk-lib.c178
-rw-r--r--block/blk-map.c47
-rw-r--r--block/blk-mq.c5
-rw-r--r--block/blk-throttle.c5
-rw-r--r--block/partition-generic.c13
6 files changed, 86 insertions, 173 deletions
diff --git a/block/bio.c b/block/bio.c
index 807d25e466ec..0e4aa42bc30d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio)
bio_endio(__bio_chain_endio(bio));
}
-/*
- * Increment chain count for the bio. Make sure the CHAIN flag update
- * is visible before the raised count.
- */
-static inline void bio_inc_remaining(struct bio *bio)
-{
- bio_set_flag(bio, BIO_CHAIN);
- smp_mb__before_atomic();
- atomic_inc(&bio->__bi_remaining);
-}
-
/**
* bio_chain - chain bio completions
* @bio: the target bio
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9ebf65379556..23d7f301a196 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -9,82 +9,46 @@
#include "blk.h"
-struct bio_batch {
- atomic_t done;
- int error;
- struct completion *wait;
-};
-
-static void bio_batch_end_io(struct bio *bio)
+static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
+ gfp_t gfp)
{
- struct bio_batch *bb = bio->bi_private;
+ struct bio *new = bio_alloc(gfp, nr_pages);
+
+ if (bio) {
+ bio_chain(bio, new);
+ submit_bio(rw, bio);
+ }
- if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
- bb->error = bio->bi_error;
- if (atomic_dec_and_test(&bb->done))
- complete(bb->wait);
- bio_put(bio);
+ return new;
}
-/**
- * blkdev_issue_discard - queue a discard
- * @bdev: blockdev to issue discard for
- * @sector: start sector
- * @nr_sects: number of sectors to discard
- * @gfp_mask: memory allocation flags (for bio_alloc)
- * @flags: BLKDEV_IFL_* flags to control behaviour
- *
- * Description:
- * Issue a discard request for the sectors in question.
- */
-int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
{
- DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
- int type = REQ_WRITE | REQ_DISCARD;
+ struct bio *bio = *biop;
unsigned int granularity;
int alignment;
- struct bio_batch bb;
- struct bio *bio;
- int ret = 0;
- struct blk_plug plug;
if (!q)
return -ENXIO;
-
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
+ return -EOPNOTSUPP;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
- if (flags & BLKDEV_DISCARD_SECURE) {
- if (!blk_queue_secdiscard(q))
- return -EOPNOTSUPP;
- type |= REQ_SECURE;
- }
-
- atomic_set(&bb.done, 1);
- bb.error = 0;
- bb.wait = &wait;
-
- blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
sector_t end_sect, tmp;
- bio = bio_alloc(gfp_mask, 1);
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
/* Make sure bi_size doesn't overflow */
req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
- /*
+ /**
* If splitting a request, and the next starting sector would be
* misaligned, stop the discard at the previous aligned sector.
*/
@@ -98,18 +62,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
req_sects = end_sect - sector;
}
+ bio = next_bio(bio, type, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
- bio->bi_private = &bb;
bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
sector = end_sect;
- atomic_inc(&bb.done);
- submit_bio(type, bio);
-
/*
* We can loop for a long time in here, if someone does
* full device discards (like mkfs). Be nice and allow
@@ -118,14 +78,44 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*/
cond_resched();
}
- blk_finish_plug(&plug);
- /* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
- wait_for_completion_io(&wait);
+ *biop = bio;
+ return 0;
+}
+EXPORT_SYMBOL(__blkdev_issue_discard);
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev: blockdev to issue discard for
+ * @sector: start sector
+ * @nr_sects: number of sectors to discard
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @flags: BLKDEV_IFL_* flags to control behaviour
+ *
+ * Description:
+ * Issue a discard request for the sectors in question.
+ */
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+{
+ int type = REQ_WRITE | REQ_DISCARD;
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ int ret;
+
+ if (flags & BLKDEV_DISCARD_SECURE)
+ type |= REQ_SECURE;
+
+ blk_start_plug(&plug);
+ ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
+ &bio);
+ if (!ret && bio) {
+ ret = submit_bio_wait(type, bio);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
+ blk_finish_plug(&plug);
- if (bb.error)
- return bb.error;
return ret;
}
EXPORT_SYMBOL(blkdev_issue_discard);
@@ -145,11 +135,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask,
struct page *page)
{
- DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
unsigned int max_write_same_sectors;
- struct bio_batch bb;
- struct bio *bio;
+ struct bio *bio = NULL;
int ret = 0;
if (!q)
@@ -158,21 +146,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9;
- atomic_set(&bb.done, 1);
- bb.error = 0;
- bb.wait = &wait;
-
while (nr_sects) {
- bio = bio_alloc(gfp_mask, 1);
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
+ bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
- bio->bi_private = &bb;
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
@@ -186,18 +163,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bio->bi_iter.bi_size = nr_sects << 9;
nr_sects = 0;
}
-
- atomic_inc(&bb.done);
- submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
}
- /* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
- wait_for_completion_io(&wait);
-
- if (bb.error)
- return bb.error;
- return ret;
+ if (bio)
+ ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+ return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -216,28 +186,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask)
{
int ret;
- struct bio *bio;
- struct bio_batch bb;
+ struct bio *bio = NULL;
unsigned int sz;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- atomic_set(&bb.done, 1);
- bb.error = 0;
- bb.wait = &wait;
- ret = 0;
while (nr_sects != 0) {
- bio = bio_alloc(gfp_mask,
- min(nr_sects, (sector_t)BIO_MAX_PAGES));
- if (!bio) {
- ret = -ENOMEM;
- break;
- }
-
+ bio = next_bio(bio, WRITE,
+ min(nr_sects, (sector_t)BIO_MAX_PAGES),
+ gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
- bio->bi_end_io = bio_batch_end_io;
- bio->bi_private = &bb;
while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@@ -247,18 +204,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if (ret < (sz << 9))
break;
}
- ret = 0;
- atomic_inc(&bb.done);
- submit_bio(WRITE, bio);
}
- /* Wait for bios in-flight */
- if (!atomic_dec_and_test(&bb.done))
- wait_for_completion_io(&wait);
-
- if (bb.error)
- return bb.error;
- return ret;
+ if (bio)
+ return submit_bio_wait(WRITE, bio);
+ return 0;
}
/**
diff --git a/block/blk-map.c b/block/blk-map.c
index a54f0543b956..b9f88b7751fb 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -9,24 +9,6 @@
#include "blk.h"
-static bool iovec_gap_to_prv(struct request_queue *q,
- struct iovec *prv, struct iovec *cur)
-{
- unsigned long prev_end;
-
- if (!queue_virt_boundary(q))
- return false;
-
- if (prv->iov_base == NULL && prv->iov_len == 0)
- /* prv is not set - don't check */
- return false;
-
- prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
-
- return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
- prev_end & queue_virt_boundary(q));
-}
-
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio)
{
@@ -125,31 +107,18 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)
{
- struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
- bool copy = (q->dma_pad_mask & iter->count) || map_data;
+ bool copy = false;
+ unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL;
struct iov_iter i;
int ret;
- if (!iter || !iter->count)
- return -EINVAL;
-
- iov_for_each(iov, i, *iter) {
- unsigned long uaddr = (unsigned long) iov.iov_base;
-
- if (!iov.iov_len)
- return -EINVAL;
-
- /*
- * Keep going so we check length of all segments
- */
- if ((uaddr & queue_dma_alignment(q)) ||
- iovec_gap_to_prv(q, &prv, &iov))
- copy = true;
-
- prv.iov_base = iov.iov_base;
- prv.iov_len = iov.iov_len;
- }
+ if (map_data)
+ copy = true;
+ else if (iov_iter_alignment(iter) & align)
+ copy = true;
+ else if (queue_virt_boundary(q))
+ copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
i = *iter;
do {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1699baf39b78..7df9c9263b21 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1122,8 +1122,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
init_request_from_bio(rq, bio);
- if (blk_do_io_stat(rq))
- blk_account_io_start(rq, 1);
+ blk_account_io_start(rq, 1);
}
static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
@@ -1496,7 +1495,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
int to_do;
void *p;
- while (left < order_to_size(this_order - 1) && this_order)
+ while (this_order && left < order_to_size(this_order - 1))
this_order--;
do {
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 2149a1ddbacf..47a3e540631a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -211,15 +211,14 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
*
* The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
* throtl_grp; otherwise, just "throtl".
- *
- * TODO: this should be made a function and name formatting should happen
- * after testing whether blktrace is enabled.
*/
#define throtl_log(sq, fmt, args...) do { \
struct throtl_grp *__tg = sq_to_tg((sq)); \
struct throtl_data *__td = sq_to_td((sq)); \
\
(void)__td; \
+ if (likely(!blk_trace_note_message_enabled(__td->queue))) \
+ break; \
if ((__tg)) { \
char __pbuf[128]; \
\
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 2c6ae2aed2c4..d7eb77e1e3a8 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -361,15 +361,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
goto out_del;
}
+ err = hd_ref_init(p);
+ if (err) {
+ if (flags & ADDPART_FLAG_WHOLEDISK)
+ goto out_remove_file;
+ goto out_del;
+ }
+
/* everything is up and running, commence */
rcu_assign_pointer(ptbl->part[partno], p);
/* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD);
-
- if (!hd_ref_init(p))
- return p;
+ return p;
out_free_info:
free_part_info(p);
@@ -378,6 +383,8 @@ out_free_stats:
out_free:
kfree(p);
return ERR_PTR(err);
+out_remove_file:
+ device_remove_file(pdev, &dev_attr_whole_disk);
out_del:
kobject_put(p->holder_dir);
device_del(pdev);