aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-08-12 17:39:57 +0200
committerJens Axboe <axboe@kernel.dk>2019-08-22 07:14:38 -0600
commit384209cd5b93a926321fafe880ed05b1bca97260 (patch)
tree0fedcc88c21228a10ec49eb7304bd7b959c20015 /block
parentblock: improve the gap check in __bio_add_pc_page (diff)
downloadlinux-dev-384209cd5b93a926321fafe880ed05b1bca97260.tar.xz
linux-dev-384209cd5b93a926321fafe880ed05b1bca97260.zip
block: create a bio_try_merge_pc_page helper
Passsthrough bio handling should be the same as normal bio handling, except that we need to take hardware limitations into account. Thus use the common try_merge implementation after checking the hardware limits. This changes behavior in that we now also check segment and dma boundary settings for same page merges, which is a little more work but has no effect as those need to be larger than the page size. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c34
1 files changed, 10 insertions, 24 deletions
diff --git a/block/bio.c b/block/bio.c
index 537d71a30e56..c1782df36dff 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -646,25 +646,20 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
return true;
}
-/*
- * Check if the @page can be added to the current segment(@bv), and make
- * sure to call it only if page_is_mergeable(@bv, @page) is true
- */
-static bool can_add_page_to_seg(struct request_queue *q,
- struct bio_vec *bv, struct page *page, unsigned len,
- unsigned offset)
+static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned len, unsigned offset,
+ bool *same_page)
{
+ struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
unsigned long mask = queue_segment_boundary(q);
phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
if ((addr1 | mask) != (addr2 | mask))
return false;
-
if (bv->bv_len + len > queue_max_segment_size(q))
return false;
-
- return true;
+ return __bio_try_merge_page(bio, page, len, offset, same_page);
}
/**
@@ -700,26 +695,18 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
return 0;
if (bio->bi_vcnt > 0) {
- bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
-
- if (page == bvec->bv_page &&
- offset == bvec->bv_offset + bvec->bv_len) {
- if (put_same_page)
+ if (bio_try_merge_pc_page(q, bio, page, len, offset,
+ &same_page)) {
+ if (put_same_page && same_page)
put_page(page);
- bvec->bv_len += len;
- goto done;
- }
-
- if (page_is_mergeable(bvec, page, len, offset, &same_page) &&
- can_add_page_to_seg(q, bvec, page, len, offset)) {
- bvec->bv_len += len;
- goto done;
+ return len;
}
/*
* If the queue doesn't support SG gaps and adding this segment
* would create a gap, disallow it.
*/
+ bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (bvec_gap_to_prev(q, bvec, offset))
return 0;
}
@@ -735,7 +722,6 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
bvec->bv_len = len;
bvec->bv_offset = offset;
bio->bi_vcnt++;
- done:
bio->bi_iter.bi_size += len;
return len;
}