aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/android/ion/ion.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/android/ion/ion.c')
-rw-r--r--drivers/staging/android/ion/ion.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 56604f41ec48..296d347660fc 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -250,7 +250,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
our systems the only dma_address space is physical addresses.
Additionally, we can't afford the overhead of invalidating every
allocation via dma_map_sg. The implicit contract here is that
- memory comming from the heaps is ready for dma, ie if it has a
+ memory coming from the heaps is ready for dma, ie if it has a
cached mapping that mapping has been invalidated */
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
sg_dma_address(sg) = sg_phys(sg);
@@ -263,8 +263,7 @@ err:
heap->ops->unmap_dma(heap, buffer);
heap->ops->free(buffer);
err1:
- if (buffer->pages)
- vfree(buffer->pages);
+ vfree(buffer->pages);
err2:
kfree(buffer);
return ERR_PTR(ret);
@@ -276,8 +275,7 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
- if (buffer->pages)
- vfree(buffer->pages);
+ vfree(buffer->pages);
kfree(buffer);
}
@@ -902,7 +900,7 @@ void ion_pages_sync_for_device(struct device *dev, struct page *page,
sg_set_page(&sg, page, size, 0);
/*
* This is not correct - sg_dma_address needs a dma_addr_t that is valid
- * for the the targeted device, but this works on the currently targeted
+ * for the targeted device, but this works on the currently targeted
* hardware.
*/
sg_dma_address(&sg) = page_to_phys(page);