aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/android/ion/ion.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/android/ion/ion.c')
-rw-r--r--drivers/staging/android/ion/ion.c49
1 files changed, 27 insertions, 22 deletions
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 6e8d8392ca38..e237e9f3312d 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1,5 +1,5 @@
/*
-
+ *
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
@@ -213,10 +213,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
"heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
- heap->ops->free(buffer);
- kfree(buffer);
- return ERR_CAST(table);
+ ret = -EINVAL;
+ goto err1;
}
+
buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
@@ -226,7 +226,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
if (!buffer->pages) {
ret = -ENOMEM;
- goto err1;
+ goto err;
}
for_each_sg(table->sgl, sg, table->nents, i) {
@@ -235,23 +235,22 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
for (j = 0; j < sg->length / PAGE_SIZE; j++)
buffer->pages[k++] = page++;
}
-
- if (ret)
- goto err;
}
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
- /* this will set up dma addresses for the sglist -- it is not
- technically correct as per the dma api -- a specific
- device isn't really taking ownership here. However, in practice on
- our systems the only dma_address space is physical addresses.
- Additionally, we can't afford the overhead of invalidating every
- allocation via dma_map_sg. The implicit contract here is that
- memory coming from the heaps is ready for dma, ie if it has a
- cached mapping that mapping has been invalidated */
+ /*
+ * this will set up dma addresses for the sglist -- it is not
+ * technically correct as per the dma api -- a specific
+ * device isn't really taking ownership here. However, in practice on
+ * our systems the only dma_address space is physical addresses.
+ * Additionally, we can't afford the overhead of invalidating every
+ * allocation via dma_map_sg. The implicit contract here is that
+ * memory coming from the heaps is ready for dma, ie if it has a
+ * cached mapping that mapping has been invalidated
+ */
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
sg_dma_address(sg) = sg_phys(sg);
mutex_lock(&dev->buffer_lock);
@@ -261,9 +260,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
err:
heap->ops->unmap_dma(heap, buffer);
- heap->ops->free(buffer);
err1:
- vfree(buffer->pages);
+ heap->ops->free(buffer);
err2:
kfree(buffer);
return ERR_PTR(ret);
@@ -753,8 +751,10 @@ struct ion_client *ion_client_create(struct ion_device *dev,
get_task_struct(current->group_leader);
task_lock(current->group_leader);
pid = task_pid_nr(current->group_leader);
- /* don't bother to store task struct for kernel threads,
- they can't be killed anyway */
+ /*
+ * don't bother to store task struct for kernel threads,
+ * they can't be killed anyway
+ */
if (current->group_leader->flags & PF_KTHREAD) {
put_task_struct(current->group_leader);
task = NULL;
@@ -1521,8 +1521,10 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
heap->dev = dev;
down_write(&dev->lock);
- /* use negative heap->id to reverse the priority -- when traversing
- the list later attempt higher id numbers first */
+ /*
+ * use negative heap->id to reverse the priority -- when traversing
+ * the list later attempt higher id numbers first
+ */
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664,
@@ -1555,6 +1557,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
up_write(&dev->lock);
}
+EXPORT_SYMBOL(ion_device_add_heap);
struct ion_device *ion_device_create(long (*custom_ioctl)
(struct ion_client *client,
@@ -1604,6 +1607,7 @@ debugfs_done:
idev->clients = RB_ROOT;
return idev;
}
+EXPORT_SYMBOL(ion_device_create);
void ion_device_destroy(struct ion_device *dev)
{
@@ -1612,6 +1616,7 @@ void ion_device_destroy(struct ion_device *dev)
/* XXX need to free the heaps and clients ? */
kfree(dev);
}
+EXPORT_SYMBOL(ion_device_destroy);
void __init ion_reserve(struct ion_platform_data *data)
{