aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/android
diff options
context:
space:
mode:
authorLaura Abbott <labbott@redhat.com>2017-04-18 11:27:06 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-04-18 20:43:14 +0200
commit62b3a094cb9e4a3c5a5be4a20b72e0ced3af0e31 (patch)
tree1def6fdf42e4debb28d2bdc06664771df1cd54cb /drivers/staging/android
parentstaging: android: ion: Use CMA APIs directly (diff)
downloadlinux-dev-62b3a094cb9e4a3c5a5be4a20b72e0ced3af0e31.tar.xz
linux-dev-62b3a094cb9e4a3c5a5be4a20b72e0ced3af0e31.zip
staging: android: ion: Stop butchering the DMA address
Now that we have proper caching, stop setting the DMA address manually. It should be set after properly calling dma_map. Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/android')
-rw-r--r--drivers/staging/android/ion/ion.c17
1 files changed, 1 insertions, 16 deletions
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 3d979ef543f6..65638f509f6c 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -81,8 +81,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
{
struct ion_buffer *buffer;
struct sg_table *table;
- struct scatterlist *sg;
- int i, ret;
+ int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
@@ -119,20 +118,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
INIT_LIST_HEAD(&buffer->vmas);
INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
- /*
- * this will set up dma addresses for the sglist -- it is not
- * technically correct as per the dma api -- a specific
- * device isn't really taking ownership here. However, in practice on
- * our systems the only dma_address space is physical addresses.
- * Additionally, we can't afford the overhead of invalidating every
- * allocation via dma_map_sg. The implicit contract here is that
- * memory coming from the heaps is ready for dma, ie if it has a
- * cached mapping that mapping has been invalidated
- */
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- sg_dma_address(sg) = sg_phys(sg);
- sg_dma_len(sg) = sg->length;
- }
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);