aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-05-31 10:49:43 -0700
committerDavid S. Miller <davem@davemloft.net>2019-05-31 10:49:43 -0700
commitb4b12b0d2f02613101a7a667ef7b7cc8d388e597 (patch)
treeae2d52198929797821961fd0b5923b5ea1a89fe1 /block
parentnetfilter: nf_conntrack_bridge: fix CONFIG_IPV6=y (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
downloadlinux-dev-b4b12b0d2f02613101a7a667ef7b7cc8d388e597.tar.xz
linux-dev-b4b12b0d2f02613101a7a667ef7b7cc8d388e597.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The phylink conflict was between a bug fix by Russell King to make sure we have a consistent PHY interface mode, and a change in net-next to pull some code in phylink_resolve() into the helper functions phylink_mac_link_{up,down}() On the dp83867 side it's mostly overlapping changes, with the 'net' side removing a condition that was supposed to trigger for RGMII but because of how it was coded never actually could trigger. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-merge.c134
-rw-r--r--block/blk-mq.c19
-rw-r--r--block/blk-settings.c11
4 files changed, 36 insertions, 131 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 419d600e6637..1bf83a0df0f6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -413,7 +413,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
smp_rmb();
wait_event(q->mq_freeze_wq,
- (atomic_read(&q->mq_freeze_depth) == 0 &&
+ (!q->mq_freeze_depth &&
(pm || (blk_pm_request_resume(q),
!blk_queue_pm_only(q)))) ||
blk_queue_dying(q));
@@ -503,6 +503,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
spin_lock_init(&q->queue_lock);
init_waitqueue_head(&q->mq_freeze_wq);
+ mutex_init(&q->mq_freeze_lock);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 21e87a714a73..17713d7d98d5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,23 +12,6 @@
#include "blk.h"
-/*
- * Check if the two bvecs from two bios can be merged to one segment. If yes,
- * no need to check gap between the two bios since the 1st bio and the 1st bvec
- * in the 2nd bio can be handled in one segment.
- */
-static inline bool bios_segs_mergeable(struct request_queue *q,
- struct bio *prev, struct bio_vec *prev_last_bv,
- struct bio_vec *next_first_bv)
-{
- if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv))
- return false;
- if (prev->bi_seg_back_size + next_first_bv->bv_len >
- queue_max_segment_size(q))
- return false;
- return true;
-}
-
static inline bool bio_will_gap(struct request_queue *q,
struct request *prev_rq, struct bio *prev, struct bio *next)
{
@@ -60,7 +43,7 @@ static inline bool bio_will_gap(struct request_queue *q,
*/
bio_get_last_bvec(prev, &pb);
bio_get_first_bvec(next, &nb);
- if (bios_segs_mergeable(q, prev, &pb, &nb))
+ if (biovec_phys_mergeable(q, &pb, &nb))
return false;
return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
}
@@ -179,8 +162,7 @@ static unsigned get_max_segment_size(struct request_queue *q,
* variables.
*/
static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
- unsigned *nsegs, unsigned *last_seg_size,
- unsigned *front_seg_size, unsigned *sectors, unsigned max_segs)
+ unsigned *nsegs, unsigned *sectors, unsigned max_segs)
{
unsigned len = bv->bv_len;
unsigned total_len = 0;
@@ -202,28 +184,12 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
break;
}
- if (!new_nsegs)
- return !!len;
-
- /* update front segment size */
- if (!*nsegs) {
- unsigned first_seg_size;
-
- if (new_nsegs == 1)
- first_seg_size = get_max_segment_size(q, bv->bv_offset);
- else
- first_seg_size = queue_max_segment_size(q);
-
- if (*front_seg_size < first_seg_size)
- *front_seg_size = first_seg_size;
+ if (new_nsegs) {
+ *nsegs += new_nsegs;
+ if (sectors)
+ *sectors += total_len >> 9;
}
- /* update other varibles */
- *last_seg_size = seg_size;
- *nsegs += new_nsegs;
- if (sectors)
- *sectors += total_len >> 9;
-
/* split in the middle of the bvec if len != 0 */
return !!len;
}
@@ -235,8 +201,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
- unsigned seg_size = 0, nsegs = 0, sectors = 0;
- unsigned front_seg_size = bio->bi_seg_front_size;
+ unsigned nsegs = 0, sectors = 0;
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
@@ -260,8 +225,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
/* split in the middle of bvec */
bv.bv_len = (max_sectors - sectors) << 9;
bvec_split_segs(q, &bv, &nsegs,
- &seg_size,
- &front_seg_size,
&sectors, max_segs);
}
goto split;
@@ -275,12 +238,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
nsegs++;
- seg_size = bv.bv_len;
sectors += bv.bv_len >> 9;
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
- } else if (bvec_split_segs(q, &bv, &nsegs, &seg_size,
- &front_seg_size, &sectors, max_segs)) {
+ } else if (bvec_split_segs(q, &bv, &nsegs, &sectors,
+ max_segs)) {
goto split;
}
}
@@ -295,10 +255,6 @@ split:
bio = new;
}
- bio->bi_seg_front_size = front_seg_size;
- if (seg_size > bio->bi_seg_back_size)
- bio->bi_seg_back_size = seg_size;
-
return do_split ? new : NULL;
}
@@ -353,18 +309,13 @@ EXPORT_SYMBOL(blk_queue_split);
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
- struct bio_vec uninitialized_var(bv), bvprv = { NULL };
- unsigned int seg_size, nr_phys_segs;
- unsigned front_seg_size;
- struct bio *fbio, *bbio;
+ unsigned int nr_phys_segs = 0;
struct bvec_iter iter;
- bool new_bio = false;
+ struct bio_vec bv;
if (!bio)
return 0;
- front_seg_size = bio->bi_seg_front_size;
-
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
@@ -374,42 +325,11 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
return 1;
}
- fbio = bio;
- seg_size = 0;
- nr_phys_segs = 0;
for_each_bio(bio) {
- bio_for_each_bvec(bv, bio, iter) {
- if (new_bio) {
- if (seg_size + bv.bv_len
- > queue_max_segment_size(q))
- goto new_segment;
- if (!biovec_phys_mergeable(q, &bvprv, &bv))
- goto new_segment;
-
- seg_size += bv.bv_len;
-
- if (nr_phys_segs == 1 && seg_size >
- front_seg_size)
- front_seg_size = seg_size;
-
- continue;
- }
-new_segment:
- bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
- &front_seg_size, NULL, UINT_MAX);
- new_bio = false;
- }
- bbio = bio;
- if (likely(bio->bi_iter.bi_size)) {
- bvprv = bv;
- new_bio = true;
- }
+ bio_for_each_bvec(bv, bio, iter)
+ bvec_split_segs(q, &bv, &nr_phys_segs, NULL, UINT_MAX);
}
- fbio->bi_seg_front_size = front_seg_size;
- if (seg_size > bbio->bi_seg_back_size)
- bbio->bi_seg_back_size = seg_size;
-
return nr_phys_segs;
}
@@ -429,24 +349,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
bio_set_flag(bio, BIO_SEG_VALID);
}
-static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
- struct bio *nxt)
-{
- struct bio_vec end_bv = { NULL }, nxt_bv;
-
- if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
- queue_max_segment_size(q))
- return 0;
-
- if (!bio_has_data(bio))
- return 1;
-
- bio_get_last_bvec(bio, &end_bv);
- bio_get_first_bvec(nxt, &nxt_bv);
-
- return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
-}
-
static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
struct scatterlist *sglist)
{
@@ -706,8 +608,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
int total_phys_segments;
- unsigned int seg_size =
- req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
if (req_gap_back_merge(req, next->bio))
return 0;
@@ -720,14 +620,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
- if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
- if (req->nr_phys_segments == 1)
- req->bio->bi_seg_front_size = seg_size;
- if (next->nr_phys_segments == 1)
- next->biotail->bi_seg_back_size = seg_size;
- total_phys_segments--;
- }
-
if (total_phys_segments > queue_max_segments(q))
return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 08a6248d8536..32b8ad3d341b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -144,13 +144,14 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
void blk_freeze_queue_start(struct request_queue *q)
{
- int freeze_depth;
-
- freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
- if (freeze_depth == 1) {
+ mutex_lock(&q->mq_freeze_lock);
+ if (++q->mq_freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
+ mutex_unlock(&q->mq_freeze_lock);
if (queue_is_mq(q))
blk_mq_run_hw_queues(q, false);
+ } else {
+ mutex_unlock(&q->mq_freeze_lock);
}
}
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -199,14 +200,14 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q)
{
- int freeze_depth;
-
- freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
- WARN_ON_ONCE(freeze_depth < 0);
- if (!freeze_depth) {
+ mutex_lock(&q->mq_freeze_lock);
+ q->mq_freeze_depth--;
+ WARN_ON_ONCE(q->mq_freeze_depth < 0);
+ if (!q->mq_freeze_depth) {
percpu_ref_resurrect(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
+ mutex_unlock(&q->mq_freeze_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 3facc41476be..2ae348c101a0 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -310,6 +310,9 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
__func__, max_size);
}
+ /* see blk_queue_virt_boundary() for the explanation */
+ WARN_ON_ONCE(q->limits.virt_boundary_mask);
+
q->limits.max_segment_size = max_size;
}
EXPORT_SYMBOL(blk_queue_max_segment_size);
@@ -742,6 +745,14 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
{
q->limits.virt_boundary_mask = mask;
+
+ /*
+ * Devices that require a virtual boundary do not support scatter/gather
+ * I/O natively, but instead require a descriptor list entry for each
+ * page (which might not be idential to the Linux PAGE_SIZE). Because
+ * of that they are not limited by our notion of "segment size".
+ */
+ q->limits.max_segment_size = UINT_MAX;
}
EXPORT_SYMBOL(blk_queue_virt_boundary);