aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorTodd Kjos <tkjos@android.com>2019-02-08 10:35:20 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-02-12 10:43:57 +0100
commitbde4a19fc04f5f46298c86b1acb7a4af1d5f138d (patch)
tree1062d8eaaf1dbfc760c295387390de1f4022e64c /drivers/android/binder_alloc.c
parentbinder: remove user_buffer_offset (diff)
downloadlinux-dev-bde4a19fc04f5f46298c86b1acb7a4af1d5f138d.tar.xz
linux-dev-bde4a19fc04f5f46298c86b1acb7a4af1d5f138d.zip
binder: use userspace pointer as base of buffer space
Now that alloc->buffer points to the userspace vm_area rename buffer->data to buffer->user_data and rename local pointers that hold user addresses. Also use the "__user" tag to annotate all user pointers so sparse can flag cases where user pointer vaues are copied to kernel pointers. Refactor code to use offsets instead of user pointers. Signed-off-by: Todd Kjos <tkjos@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c87
1 files changed, 45 insertions, 42 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 0e7f0aa967c3..000dd4d145ba 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -69,9 +69,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return (u8 *)alloc->buffer +
- alloc->buffer_size - (u8 *)buffer->data;
- return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+ return alloc->buffer + alloc->buffer_size - buffer->user_data;
+ return binder_buffer_next(buffer)->user_data - buffer->user_data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -121,9 +120,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (new_buffer->data < buffer->data)
+ if (new_buffer->user_data < buffer->user_data)
p = &parent->rb_left;
- else if (new_buffer->data > buffer->data)
+ else if (new_buffer->user_data > buffer->user_data)
p = &parent->rb_right;
else
BUG();
@@ -138,17 +137,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- void *uptr;
+ void __user *uptr;
- uptr = (void *)user_ptr;
+ uptr = (void __user *)user_ptr;
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (uptr < buffer->data)
+ if (uptr < buffer->user_data)
n = n->rb_left;
- else if (uptr > buffer->data)
+ else if (uptr > buffer->user_data)
n = n->rb_right;
else {
/*
@@ -188,9 +187,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end)
+ void __user *start, void __user *end)
{
- void *page_addr;
+ void __user *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
struct vm_area_struct *vma = NULL;
@@ -357,8 +356,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_buffer *buffer;
size_t buffer_size;
struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
+ void __user *has_page_addr;
+ void __user *end_page_addr;
size_t size, data_offsets_size;
int ret;
@@ -456,15 +455,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
"%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
alloc->pid, size, buffer, buffer_size);
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ has_page_addr = (void __user *)
+ (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
WARN_ON(n && buffer_size != size);
end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
- ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
+ ret = binder_update_page_range(alloc, 1, (void __user *)
+ PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
if (ret)
return ERR_PTR(ret);
@@ -477,7 +476,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
__func__, alloc->pid);
goto err_alloc_buf_struct_failed;
}
- new_buffer->data = (u8 *)buffer->data + size;
+ new_buffer->user_data = (u8 __user *)buffer->user_data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
@@ -503,8 +502,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
return buffer;
err_alloc_buf_struct_failed:
- binder_update_page_range(alloc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ binder_update_page_range(alloc, 0, (void __user *)
+ PAGE_ALIGN((uintptr_t)buffer->user_data),
end_page_addr);
return ERR_PTR(-ENOMEM);
}
@@ -539,14 +538,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
return buffer;
}
-static void *buffer_start_page(struct binder_buffer *buffer)
+static void __user *buffer_start_page(struct binder_buffer *buffer)
{
- return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+ return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
}
-static void *prev_buffer_end_page(struct binder_buffer *buffer)
+static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+ return (void __user *)
+ (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
@@ -561,7 +561,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer->data, prev->data);
+ alloc->pid, buffer->user_data,
+ prev->user_data);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
@@ -571,23 +572,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK share page with %pK\n",
alloc->pid,
- buffer->data,
- next->data);
+ buffer->user_data,
+ next->user_data);
}
}
- if (PAGE_ALIGNED(buffer->data)) {
+ if (PAGE_ALIGNED(buffer->user_data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer start %pK is page aligned\n",
- alloc->pid, buffer->data);
+ alloc->pid, buffer->user_data);
to_free = false;
}
if (to_free) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
- alloc->pid, buffer->data,
- prev->data, next ? next->data : NULL);
+ alloc->pid, buffer->user_data,
+ prev->user_data,
+ next ? next->user_data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE);
}
@@ -613,8 +615,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->data < alloc->buffer);
- BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->user_data < alloc->buffer);
+ BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -625,8 +627,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
}
binder_update_page_range(alloc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
+ (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
+ (void __user *)(((uintptr_t)
+ buffer->user_data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
@@ -692,7 +695,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_already_mapped;
}
- alloc->buffer = (void *)vma->vm_start;
+ alloc->buffer = (void __user *)vma->vm_start;
mutex_unlock(&binder_alloc_mmap_lock);
alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
@@ -712,7 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
goto err_alloc_buf_struct_failed;
}
- buffer->data = alloc->buffer;
+ buffer->user_data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
@@ -773,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
+ void __user *page_addr;
bool on_lru;
if (!alloc->pages[i].page_ptr)
@@ -804,7 +807,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix,
struct binder_buffer *buffer)
{
seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
+ prefix, buffer->debug_id, buffer->user_data,
buffer->data_size, buffer->offsets_size,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
@@ -1056,7 +1059,7 @@ static inline bool check_buffer(struct binder_alloc *alloc,
* @pgoffp: address to copy final page offset to
*
* Lookup the struct page corresponding to the address
- * at @buffer_offset into @buffer->data. If @pgoffp is not
+ * at @buffer_offset into @buffer->user_data. If @pgoffp is not
* NULL, the byte-offset into the page is written there.
*
* The caller is responsible to ensure that the offset points
@@ -1073,7 +1076,7 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
- (buffer->data - alloc->buffer);
+ (buffer->user_data - alloc->buffer);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
struct binder_lru_page *lru_page;