aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder_alloc_selftest.c
diff options
context:
space:
mode:
authorSherry Yang <sherryy@android.com>2017-08-23 08:46:42 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-08-28 16:47:17 +0200
commitf2517eb76f1f2f7f89761f9db2b202e89931738c (patch)
treed15ad6ea85358e44e7ff92b7188dfa21e36560d9 /drivers/android/binder_alloc_selftest.c
parentandroid: binder: Move buffer out of area shared with user space (diff)
downloadlinux-dev-f2517eb76f1f2f7f89761f9db2b202e89931738c.tar.xz
linux-dev-f2517eb76f1f2f7f89761f9db2b202e89931738c.zip
android: binder: Add global lru shrinker to binder
Hold on to the pages allocated and mapped for transaction buffers until the system is under memory pressure. When that happens, use linux shrinker to free pages. Without using shrinker, patch "android: binder: Move buffer out of area shared with user space" will cause a significant slow down for small transactions that fit into the first page because free list buffer header used to be inlined with buffer data. In addition to prevent the performance regression for small transactions, this patch improves the performance for transactions that take up more than one page. Modify alloc selftest to work with the shrinker change. Test: Run memory intensive applications (Chrome and Camera) to trigger shrinker callbacks. Binder frees memory as expected. Test: Run binderThroughputTest with high memory pressure option enabled. Signed-off-by: Sherry Yang <sherryy@android.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/android/binder_alloc_selftest.c')
-rw-r--r--drivers/android/binder_alloc_selftest.c68
1 files changed, 54 insertions, 14 deletions
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
index 0bf72079a9da..8bd7bcef967d 100644
--- a/drivers/android/binder_alloc_selftest.c
+++ b/drivers/android/binder_alloc_selftest.c
@@ -109,9 +109,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
page_addr = buffer->data;
for (; page_addr < end; page_addr += PAGE_SIZE) {
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
- if (!alloc->pages[page_index]) {
- pr_err("incorrect alloc state at page index %d\n",
- page_index);
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
return false;
}
}
@@ -137,28 +139,63 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
static void binder_selftest_free_buf(struct binder_alloc *alloc,
struct binder_buffer *buffers[],
- size_t *sizes, int *seq)
+ size_t *sizes, int *seq, size_t end)
{
int i;
for (i = 0; i < BUFFER_NUM; i++)
binder_alloc_free_buf(alloc, buffers[seq[i]]);
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
- if ((!alloc->pages[i]) == (i == 0)) {
- pr_err("incorrect free state at page index %d\n", i);
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
binder_selftest_failures++;
}
}
}
static void binder_selftest_alloc_free(struct binder_alloc *alloc,
- size_t *sizes, int *seq)
+ size_t *sizes, int *seq, size_t end)
{
struct binder_buffer *buffers[BUFFER_NUM];
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
- binder_selftest_free_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
}
static bool is_dup(int *seq, int index, int val)
@@ -174,19 +211,20 @@ static bool is_dup(int *seq, int index, int val)
/* Generate BUFFER_NUM factorial free orders. */
static void binder_selftest_free_seq(struct binder_alloc *alloc,
- size_t *sizes, int *seq, int index)
+ size_t *sizes, int *seq,
+ int index, size_t end)
{
int i;
if (index == BUFFER_NUM) {
- binder_selftest_alloc_free(alloc, sizes, seq);
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
return;
}
for (i = 0; i < BUFFER_NUM; i++) {
if (is_dup(seq, index, i))
continue;
seq[index] = i;
- binder_selftest_free_seq(alloc, sizes, seq, index + 1);
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
}
}
@@ -211,8 +249,9 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc,
* we need one giant buffer before getting to the last page.
*/
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
- binder_selftest_free_seq(alloc, front_sizes, seq, 0);
- binder_selftest_free_seq(alloc, back_sizes, seq, 0);
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
}
static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
@@ -246,7 +285,8 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
*
* Allocate BUFFER_NUM buffers to cover all page alignment cases,
* then free them in all orders possible. Check that pages are
- * allocated after buffer alloc and freed after freeing buffer.
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
*/
void binder_selftest_alloc(struct binder_alloc *alloc)
{