aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c232
1 files changed, 159 insertions, 73 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2d8b9b91dee0..1c39cfce32fa 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -208,12 +208,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
- mm = alloc->vma_vm_mm;
+ if (need_mm && mmget_not_zero(alloc->mm))
+ mm = alloc->mm;
if (mm) {
- down_read(&mm->mmap_sem);
- vma = alloc->vma;
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, alloc->vma_addr);
}
if (!vma && need_mm) {
@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return 0;
@@ -303,39 +303,64 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
+static inline struct vm_area_struct *binder_alloc_get_vma(
+ struct binder_alloc *alloc)
+{
+ struct vm_area_struct *vma = NULL;
+
+ if (alloc->vma_addr)
+ vma = vma_lookup(alloc->mm, alloc->vma_addr);
+
+ return vma;
+}
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
+static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
{
- if (vma)
- alloc->vma_vm_mm = vma->vm_mm;
/*
- * If we see alloc->vma is not NULL, buffer data structures set up
- * completely. Look at smp_rmb side binder_alloc_get_vma.
- * We also want to guarantee new alloc->vma_vm_mm is always visible
- * if alloc->vma is set.
+ * Find the amount and size of buffers allocated by the current caller;
+ * The idea is that once we cross the threshold, whoever is responsible
+ * for the low async space is likely to try to send another async txn,
+ * and at some point we'll catch them in the act. This is more efficient
+ * than keeping a map per pid.
*/
- smp_wmb();
- alloc->vma = vma;
-}
+ struct rb_node *n;
+ struct binder_buffer *buffer;
+ size_t total_alloc_size = 0;
+ size_t num_buffers = 0;
-static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
-{
- struct vm_area_struct *vma = NULL;
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ if (buffer->pid != pid)
+ continue;
+ if (!buffer->async_transaction)
+ continue;
+ total_alloc_size += binder_alloc_buffer_size(alloc, buffer)
+ + sizeof(struct binder_buffer);
+ num_buffers++;
+ }
- if (alloc->vma) {
- /* Look at description in binder_alloc_set_vma */
- smp_rmb();
- vma = alloc->vma;
+ /*
+ * Warn if this pid has more than 50 transactions, or more than 50% of
+ * async space (which is 25% of total buffer size). Oneway spam is only
+ * detected when the threshold is exceeded.
+ */
+ if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+ "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
+ alloc->pid, pid, num_buffers, total_alloc_size);
+ if (!alloc->oneway_spam_detected) {
+ alloc->oneway_spam_detected = true;
+ return true;
+ }
}
- return vma;
+ return false;
}
static struct binder_buffer *binder_alloc_new_buf_locked(
@@ -343,7 +368,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -354,12 +380,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
+ mmap_read_lock(alloc->mm);
if (!binder_alloc_get_vma(alloc)) {
+ mmap_read_unlock(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
+ mmap_read_unlock(alloc->mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -486,11 +515,23 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
+ buffer->pid = pid;
+ buffer->oneway_spam_suspect = false;
if (is_async) {
alloc->free_async_space -= size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
alloc->pid, size, alloc->free_async_space);
+ if (alloc->free_async_space < alloc->buffer_size / 10) {
+ /*
+ * Start detecting spammers once we have less than 20%
+ * of async space left (which is less than 10% of total
+ * buffer size).
+ */
+ buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid);
+ } else {
+ alloc->oneway_spam_detected = false;
+ }
}
return buffer;
@@ -508,6 +549,7 @@ err_alloc_buf_struct_failed:
* @offsets_size: user specified buffer offset
* @extra_buffers_size: size of extra space for meta-data (eg, security context)
* @is_async: buffer for async transaction
+ * @pid: pid to attribute allocation to (used for debugging)
*
* Allocate a new buffer given the requested sizes. Returns
* the kernel version of the buffer pointer. The size allocated
@@ -520,13 +562,14 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
- int is_async)
+ int is_async,
+ int pid)
{
struct binder_buffer *buffer;
mutex_lock(&alloc->mutex);
buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
- extra_buffers_size, is_async);
+ extra_buffers_size, is_async, pid);
mutex_unlock(&alloc->mutex);
return buffer;
}
@@ -547,6 +590,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
{
struct binder_buffer *prev, *next = NULL;
bool to_free = true;
+
BUG_ON(alloc->buffers.next == &buffer->entry);
prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
@@ -612,7 +656,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
- alloc->free_async_space += size + sizeof(struct binder_buffer);
+ alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
@@ -646,16 +690,30 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_insert_free_buffer(alloc, buffer);
}
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
/**
* binder_alloc_free_buf() - free a binder buffer
* @alloc: binder_alloc for this proc
* @buffer: kernel pointer to buffer
*
- * Free the buffer allocated via binder_alloc_new_buffer()
+ * Free the buffer allocated via binder_alloc_new_buf()
*/
void binder_alloc_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
+ /*
+ * We could eliminate the call to binder_alloc_clear_buf()
+ * from binder_alloc_deferred_release() by moving this to
+ * binder_alloc_free_buf_locked(). However, that could
+ * increase contention for the alloc mutex if clear_on_free
+ * is used frequently for large buffers. The mutex is not
+ * needed for correctness here.
+ */
+ if (buffer->clear_on_free) {
+ binder_alloc_clear_buf(alloc, buffer);
+ buffer->clear_on_free = false;
+ }
mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
mutex_unlock(&alloc->mutex);
@@ -714,8 +772,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
- binder_alloc_set_vma(alloc, vma);
- mmgrab(alloc->vma_vm_mm);
+ alloc->vma_addr = vma->vm_start;
return 0;
@@ -744,7 +801,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
- BUG_ON(alloc->vma);
+ BUG_ON(alloc->vma_addr &&
+ vma_lookup(alloc->mm, alloc->vma_addr));
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -752,6 +810,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
/* Transaction should already have been freed */
BUG_ON(buffer->transaction);
+ if (buffer->clear_on_free) {
+ binder_alloc_clear_buf(alloc, buffer);
+ buffer->clear_on_free = false;
+ }
binder_free_buf_locked(alloc, buffer);
buffers++;
}
@@ -790,8 +852,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
kfree(alloc->pages);
}
mutex_unlock(&alloc->mutex);
- if (alloc->vma_vm_mm)
- mmdrop(alloc->vma_vm_mm);
+ if (alloc->mm)
+ mmdrop(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -847,17 +909,25 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
- if (binder_alloc_get_vma(alloc) != NULL) {
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
- }
+
+ mmap_read_lock(alloc->mm);
+ if (binder_alloc_get_vma(alloc) == NULL) {
+ mmap_read_unlock(alloc->mm);
+ goto uninitialized;
+ }
+
+ mmap_read_unlock(alloc->mm);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
}
+
+uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
@@ -892,7 +962,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ alloc->vma_addr = 0;
}
/**
@@ -929,11 +999,11 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
- mm = alloc->vma_vm_mm;
+ mm = alloc->mm;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!down_read_trylock(&mm->mmap_sem))
- goto err_down_read_mmap_sem_failed;
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
vma = binder_alloc_get_vma(alloc);
list_lru_isolate(lru, item);
@@ -946,8 +1016,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- up_read(&mm->mmap_sem);
- mmput(mm);
+ mmap_read_unlock(mm);
+ mmput_async(mm);
trace_binder_unmap_kernel_start(alloc, index);
@@ -960,7 +1030,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
-err_down_read_mmap_sem_failed:
+err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
err_page_already_freed:
@@ -972,18 +1042,14 @@ err_get_alloc_mutex_failed:
static unsigned long
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
- unsigned long ret = list_lru_count(&binder_alloc_lru);
- return ret;
+ return list_lru_count(&binder_alloc_lru);
}
static unsigned long
binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- unsigned long ret;
-
- ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
NULL, sc->nr_to_scan);
- return ret;
}
static struct shrinker binder_shrinker = {
@@ -1002,6 +1068,8 @@ static struct shrinker binder_shrinker = {
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->pid = current->group_leader->pid;
+ alloc->mm = current->mm;
+ mmgrab(alloc->mm);
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
@@ -1011,7 +1079,7 @@ int binder_alloc_shrinker_init(void)
int ret = list_lru_init(&binder_alloc_lru);
if (ret == 0) {
- ret = register_shrinker(&binder_shrinker);
+ ret = register_shrinker(&binder_shrinker, "android-binder");
if (ret)
list_lru_destroy(&binder_alloc_lru);
}
@@ -1086,6 +1154,33 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
}
/**
+ * binder_alloc_clear_buf() - zero out buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: binder buffer to be cleared
+ *
+ * memset the given buffer to 0
+ */
+static void binder_alloc_clear_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t bytes = binder_alloc_buffer_size(alloc, buffer);
+ binder_size_t buffer_offset = 0;
+
+ while (bytes) {
+ unsigned long size;
+ struct page *page;
+ pgoff_t pgoff;
+
+ page = binder_alloc_get_page(alloc, buffer,
+ buffer_offset, &pgoff);
+ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
+ memset_page(page, pgoff, 0, size);
+ bytes -= size;
+ buffer_offset += size;
+ }
+}
+
+/**
* binder_alloc_copy_user_to_buffer() - copy src user to tgt user
* @alloc: binder_alloc for this proc
* @buffer: binder buffer to be accessed
@@ -1117,9 +1212,9 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- kptr = kmap(page) + pgoff;
+ kptr = kmap_local_page(page) + pgoff;
ret = copy_from_user(kptr, from, size);
- kunmap(page);
+ kunmap_local(kptr);
if (ret)
return bytes - size + ret;
bytes -= size;
@@ -1144,23 +1239,14 @@ static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
unsigned long size;
struct page *page;
pgoff_t pgoff;
- void *tmpptr;
- void *base_ptr;
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- base_ptr = kmap_atomic(page);
- tmpptr = base_ptr + pgoff;
if (to_buffer)
- memcpy(tmpptr, ptr, size);
+ memcpy_to_page(page, pgoff, ptr, size);
else
- memcpy(ptr, tmpptr, size);
- /*
- * kunmap_atomic() takes care of flushing the cache
- * if this device has VIVT cache arch
- */
- kunmap_atomic(base_ptr);
+ memcpy_from_page(ptr, page, pgoff, size);
bytes -= size;
pgoff = 0;
ptr = ptr + size;