aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/common/videobuf2
diff options
context:
space:
mode:
authorSergey Senozhatsky <senozhatsky@chromium.org>2020-05-14 18:01:50 +0200
committerMauro Carvalho Chehab <mchehab+huawei@kernel.org>2020-06-23 13:39:01 +0200
commitd4db5eb57cab049d378fbfb7ee842857009a8679 (patch)
tree639621e61189c898fb5a5b3bfcbb9763e4083572 /drivers/media/common/videobuf2
parentmedia: videobuf2: add begin/end cpu_access callbacks to dma-contig (diff)
downloadlinux-dev-d4db5eb57cab049d378fbfb7ee842857009a8679.tar.xz
linux-dev-d4db5eb57cab049d378fbfb7ee842857009a8679.zip
media: videobuf2: add begin/end cpu_access callbacks to dma-sg
Provide begin_cpu_access() and end_cpu_access() dma_buf_ops callbacks for cache synchronisation on exported buffers. V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers. dma-sg allocates memory using the page allocator directly, so there is no memory consistency guarantee. Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Diffstat (limited to 'drivers/media/common/videobuf2')
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 92072a08af25..595137e358e7 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -120,6 +120,12 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
buf->num_pages = size >> PAGE_SHIFT;
buf->dma_sgt = &buf->sg_table;
+ /*
+ * NOTE: dma-sg allocates memory using the page allocator directly, so
+ * there is no memory consistency guarantee, hence dma-sg ignores DMA
+ * attributes passed from the upper layer. That means that
+ * V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers.
+ */
buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
GFP_KERNEL | __GFP_ZERO);
if (!buf->pages)
@@ -469,6 +475,28 @@ static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
vb2_dma_sg_put(dbuf->priv);
}
+static int
+vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ return 0;
+}
+
+static int
+vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ return 0;
+}
+
static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
{
struct vb2_dma_sg_buf *buf = dbuf->priv;
@@ -487,6 +515,8 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
.detach = vb2_dma_sg_dmabuf_ops_detach,
.map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
.unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
+ .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
+ .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
.vmap = vb2_dma_sg_dmabuf_ops_vmap,
.mmap = vb2_dma_sg_dmabuf_ops_mmap,
.release = vb2_dma_sg_dmabuf_ops_release,