aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-04-14 09:42:24 +0200
committerJens Axboe <axboe@kernel.dk>2020-04-22 10:47:35 -0600
commitcc97923a5bccc776851c242b61015faf288d5c22 (patch)
treed6510d4f326674e96bdf70c4f07472f492537db8 /block/blk-merge.c
parentscsi: merge scsi_init_sgtable into scsi_init_io (diff)
downloadlinux-dev-cc97923a5bccc776851c242b61015faf288d5c22.tar.xz
linux-dev-cc97923a5bccc776851c242b61015faf288d5c22.zip
block: move dma drain handling to scsi
Don't burden the common block code with with specifics of the libata DMA draining mechanism. Instead move most of the code to the scsi midlayer. That also means the nr_phys_segments adjustments in the blk-mq fast path can go away entirely, given that SCSI never looks at nr_phys_segments after mapping the request to a scatterlist. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c14
1 files changed, 0 insertions, 14 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index ee618cdb141e..25f5a5e00ee6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -539,20 +539,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
rq->extra_len += pad_len;
}
- if (q->dma_drain_size && q->dma_drain_needed(rq)) {
- if (op_is_write(req_op(rq)))
- memset(q->dma_drain_buffer, 0, q->dma_drain_size);
-
- sg_unmark_end(*last_sg);
- *last_sg = sg_next(*last_sg);
- sg_set_page(*last_sg, virt_to_page(q->dma_drain_buffer),
- q->dma_drain_size,
- ((unsigned long)q->dma_drain_buffer) &
- (PAGE_SIZE - 1));
- nsegs++;
- rq->extra_len += q->dma_drain_size;
- }
-
if (*last_sg)
sg_mark_end(*last_sg);