aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/android/binder.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r--drivers/android/binder.c317
1 files changed, 224 insertions, 93 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index fddf76ef5bd6..764b63a5aade 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -141,7 +141,7 @@ enum {
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
-module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+module_param_named(debug_mask, binder_debug_mask, uint, 0644);
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, 0444);
@@ -150,7 +150,7 @@ static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;
static int binder_set_stop_on_user_error(const char *val,
- struct kernel_param *kp)
+ const struct kernel_param *kp)
{
int ret;
@@ -160,7 +160,7 @@ static int binder_set_stop_on_user_error(const char *val,
return ret;
}
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
- param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+ param_get_int, &binder_stop_on_user_error, 0644);
#define binder_debug(mask, x...) \
do { \
@@ -249,7 +249,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
unsigned int cur = atomic_inc_return(&log->cur);
if (cur >= ARRAY_SIZE(log->entry))
- log->full = 1;
+ log->full = true;
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
WRITE_ONCE(e->debug_id_done, 0);
/*
@@ -482,7 +482,8 @@ enum binder_deferred_state {
* @tsk task_struct for group_leader of process
* (invariant after initialized)
* @files files_struct for process
- * (invariant after initialized)
+ * (protected by @files_lock)
+ * @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -492,8 +493,6 @@ enum binder_deferred_state {
* (protected by @inner_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
- * @wait: wait queue head to wait for proc work
- * (invariant after initialized)
* @stats: per-process binder statistics
* (atomics, no lock needed)
* @delivered_death: list of delivered death notification
@@ -530,12 +529,12 @@ struct binder_proc {
int pid;
struct task_struct *tsk;
struct files_struct *files;
+ struct mutex files_lock;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
struct list_head todo;
- wait_queue_head_t wait;
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;
@@ -577,6 +576,8 @@ enum {
* (protected by @proc->inner_lock)
* @todo: list of work to do for this thread
* (protected by @proc->inner_lock)
+ * @process_todo: whether work in @todo should be processed
+ * (protected by @proc->inner_lock)
* @return_error: transaction errors reported by this thread
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
@@ -602,6 +603,7 @@ struct binder_thread {
bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
+ bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
wait_queue_head_t wait;
@@ -787,6 +789,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
return ret;
}
+/**
+ * binder_enqueue_work_ilocked() - Add an item to the work list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
static void
binder_enqueue_work_ilocked(struct binder_work *work,
struct list_head *target_list)
@@ -797,22 +809,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
}
/**
- * binder_enqueue_work() - Add an item to the work list
- * @proc: binder_proc associated with list
+ * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
+ * @thread: thread to queue work to
* @work: struct binder_work to add to list
- * @target_list: list to add work to
*
- * Adds the work to the specified list. Asserts that work
- * is not already on a list.
+ * Adds the work to the todo list of the thread. Doesn't set the process_todo
+ * flag, which means that (if it wasn't already set) the thread will go to
+ * sleep without handling this work when it calls read.
+ *
+ * Requires the proc->inner_lock to be held.
*/
static void
-binder_enqueue_work(struct binder_proc *proc,
- struct binder_work *work,
- struct list_head *target_list)
+binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
{
- binder_inner_proc_lock(proc);
- binder_enqueue_work_ilocked(work, target_list);
- binder_inner_proc_unlock(proc);
+ binder_enqueue_work_ilocked(work, &thread->todo);
+}
+
+/**
+ * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ *
+ * Requires the proc->inner_lock to be held.
+ */
+static void
+binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_enqueue_work_ilocked(work, &thread->todo);
+ thread->process_todo = true;
+}
+
+/**
+ * binder_enqueue_thread_work() - Add an item to the thread work list
+ * @thread: thread to queue work to
+ * @work: struct binder_work to add to list
+ *
+ * Adds the work to the todo list of the thread, and enables processing
+ * of the todo queue.
+ */
+static void
+binder_enqueue_thread_work(struct binder_thread *thread,
+ struct binder_work *work)
+{
+ binder_inner_proc_lock(thread->proc);
+ binder_enqueue_thread_work_ilocked(thread, work);
+ binder_inner_proc_unlock(thread->proc);
}
static void
@@ -877,20 +923,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
- struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
+ int ret;
- if (files == NULL)
- return -ESRCH;
-
- if (!lock_task_sighand(proc->tsk, &irqs))
- return -EMFILE;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ ret = -ESRCH;
+ goto err;
+ }
+ if (!lock_task_sighand(proc->tsk, &irqs)) {
+ ret = -EMFILE;
+ goto err;
+ }
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- return __alloc_fd(files, 0, rlim_cur, flags);
+ ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
+err:
+ mutex_unlock(&proc->files_lock);
+ return ret;
}
/*
@@ -899,8 +951,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
+ mutex_lock(&proc->files_lock);
if (proc->files)
__fd_install(proc->files, fd, file);
+ mutex_unlock(&proc->files_lock);
}
/*
@@ -910,9 +964,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
int retval;
- if (proc->files == NULL)
- return -ESRCH;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ retval = -ESRCH;
+ goto err;
+ }
retval = __close_fd(proc->files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
@@ -920,14 +976,15 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
-
+err:
+ mutex_unlock(&proc->files_lock);
return retval;
}
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
- return !binder_worklist_empty_ilocked(&thread->todo) ||
+ return thread->process_todo ||
thread->looper_need_return ||
(do_proc_work &&
!binder_worklist_empty_ilocked(&thread->proc->todo));
@@ -1215,6 +1272,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
binder_dequeue_work_ilocked(&node->work);
+ /*
+ * Note: this function is the only place where we queue
+ * directly to a thread->todo without using the
+ * corresponding binder_enqueue_thread_work() helper
+ * functions; in this case it's ok to not set the
+ * process_todo flag, since we know this node work will
+ * always be followed by other work that starts queue
+ * processing: in case of synchronous transactions, a
+ * BR_REPLY or BR_ERROR; in case of oneway
+ * transactions, a BR_TRANSACTION_COMPLETE.
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
@@ -1226,6 +1294,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->debug_id);
return -EINVAL;
}
+ /*
+ * See comment above
+ */
binder_enqueue_work_ilocked(&node->work, target_list);
}
}
@@ -1915,13 +1986,19 @@ static void binder_send_failed_reply(struct binder_transaction *t,
binder_pop_transaction_ilocked(target_thread, t);
if (target_thread->reply_error.cmd == BR_OK) {
target_thread->reply_error.cmd = error_code;
- binder_enqueue_work_ilocked(
- &target_thread->reply_error.work,
- &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ target_thread,
+ &target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait);
} else {
- WARN(1, "Unexpected reply error: %u\n",
- target_thread->reply_error.cmd);
+ /*
+ * Cannot get here for normal operation, but
+ * we can if multiple synchronous transactions
+ * are sent without blocking for responses.
+ * Just ignore the 2nd error in this case.
+ */
+ pr_warn("Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
binder_inner_proc_unlock(target_thread->proc);
binder_thread_dec_tmpref(target_thread);
@@ -1948,6 +2025,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
}
/**
+ * binder_cleanup_transaction() - cleans up undelivered transaction
+ * @t: transaction that needs to be cleaned up
+ * @reason: reason the transaction wasn't delivered
+ * @error_code: error to return to caller (if synchronous call)
+ */
+static void binder_cleanup_transaction(struct binder_transaction *t,
+ const char *reason,
+ uint32_t error_code)
+{
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
+ binder_send_failed_reply(t, error_code);
+ } else {
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered transaction %d, %s\n",
+ t->debug_id, reason);
+ binder_free_transaction(t);
+ }
+}
+
+/**
* binder_validate_object() - checks for a valid metadata object in a buffer.
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the buffer at which to validate an object.
@@ -2102,7 +2199,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d buffer release %d, size %zd-%zd, failed at %p\n",
+ "%d buffer release %d, size %zd-%zd, failed at %pK\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at);
@@ -2192,7 +2289,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
off_start,
offp - off_start);
if (!parent) {
- pr_err("transaction release %d bad parent offset",
+ pr_err("transaction release %d bad parent offset\n",
debug_id);
continue;
}
@@ -2536,20 +2633,18 @@ static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
- struct list_head *target_list = NULL;
struct binder_node *node = t->buffer->target_node;
bool oneway = !!(t->flags & TF_ONE_WAY);
- bool wakeup = true;
+ bool pending_async = false;
BUG_ON(!node);
binder_node_lock(node);
if (oneway) {
BUG_ON(thread);
if (node->has_async_transaction) {
- target_list = &node->async_todo;
- wakeup = false;
+ pending_async = true;
} else {
- node->has_async_transaction = 1;
+ node->has_async_transaction = true;
}
}
@@ -2561,19 +2656,17 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return false;
}
- if (!thread && !target_list)
+ if (!thread && !pending_async)
thread = binder_select_thread_ilocked(proc);
if (thread)
- target_list = &thread->todo;
- else if (!target_list)
- target_list = &proc->todo;
+ binder_enqueue_thread_work_ilocked(thread, &t->work);
+ else if (!pending_async)
+ binder_enqueue_work_ilocked(&t->work, &proc->todo);
else
- BUG_ON(target_list != &node->async_todo);
-
- binder_enqueue_work_ilocked(&t->work, target_list);
+ binder_enqueue_work_ilocked(&t->work, &node->async_todo);
- if (wakeup)
+ if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
binder_inner_proc_unlock(proc);
@@ -3068,10 +3161,10 @@ static void binder_transaction(struct binder_proc *proc,
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- binder_enqueue_work(proc, tcomplete, &thread->todo);
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
+ binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc);
@@ -3079,13 +3172,21 @@ static void binder_transaction(struct binder_proc *proc,
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
- binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_enqueue_thread_work_ilocked(target_thread, &t->work);
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
+ /*
+ * Defer the TRANSACTION_COMPLETE, so we don't return to
+ * userspace immediately; this allows the target process to
+ * immediately start processing this transaction, reducing
+ * latency. We will then return the TRANSACTION_COMPLETE when
+ * the target replies (or there is an error).
+ */
+ binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
@@ -3099,6 +3200,7 @@ static void binder_transaction(struct binder_proc *proc,
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
+ binder_enqueue_thread_work(thread, tcomplete);
if (!binder_proc_transaction(t, target_proc, NULL))
goto err_dead_proc_or_thread;
}
@@ -3177,15 +3279,11 @@ err_invalid_target_handle:
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
- binder_enqueue_work(thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(thread, &thread->return_error.work);
}
}
@@ -3391,7 +3489,7 @@ static int binder_thread_write(struct binder_proc *proc,
w = binder_dequeue_work_head_ilocked(
&buf_node->async_todo);
if (!w) {
- buf_node->has_async_transaction = 0;
+ buf_node->has_async_transaction = false;
} else {
binder_enqueue_work_ilocked(
w, &proc->todo);
@@ -3489,10 +3587,9 @@ static int binder_thread_write(struct binder_proc *proc,
WARN_ON(thread->return_error.cmd !=
BR_OK);
thread->return_error.cmd = BR_ERROR;
- binder_enqueue_work(
- thread->proc,
- &thread->return_error.work,
- &thread->todo);
+ binder_enqueue_thread_work(
+ thread,
+ &thread->return_error.work);
binder_debug(
BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
@@ -3572,9 +3669,9 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work,
- &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread,
+ &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3614,7 +3711,7 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+ "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
@@ -3629,8 +3726,8 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_work_ilocked(
- &death->work, &thread->todo);
+ binder_enqueue_thread_work_ilocked(
+ thread, &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3804,6 +3901,8 @@ retry:
break;
}
w = binder_dequeue_work_head_ilocked(list);
+ if (binder_worklist_empty_ilocked(&thread->todo))
+ thread->process_todo = false;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
@@ -4015,12 +4114,20 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "put_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "copy_to_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(tr);
@@ -4090,15 +4197,9 @@ static void binder_release_work(struct binder_proc *proc,
struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work);
- if (t->buffer->target_node &&
- !(t->flags & TF_ONE_WAY)) {
- binder_send_failed_reply(t, BR_DEAD_REPLY);
- } else {
- binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
- "undelivered transaction %d\n",
- t->debug_id);
- binder_free_transaction(t);
- }
+
+ binder_cleanup_transaction(t, "process died.",
+ BR_DEAD_REPLY);
} break;
case BINDER_WORK_RETURN_ERROR: {
struct binder_error *e = container_of(
@@ -4267,8 +4368,29 @@ static int binder_thread_release(struct binder_proc *proc,
if (t)
spin_lock(&t->lock);
}
+
+ /*
+ * If this thread used poll, make sure we remove the waitqueue
+ * from any epoll data structures holding it with POLLFREE.
+ * waitqueue_active() is safe to use here because we're holding
+ * the inner lock.
+ */
+ if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
+ waitqueue_active(&thread->wait)) {
+ wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
+ }
+
binder_inner_proc_unlock(thread->proc);
+ /*
+ * This is needed to avoid races between wake_up_poll() above and
+ * and ep_remove_waitqueue() called for other reasons (eg the epoll file
+ * descriptor being closed); ep_remove_waitqueue() holds an RCU read
+ * lock, so we can be sure it's done after calling synchronize_rcu().
+ */
+ if (thread->looper & BINDER_LOOPER_STATE_POLL)
+ synchronize_rcu();
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
binder_release_work(proc, &thread->todo);
@@ -4276,7 +4398,7 @@ static int binder_thread_release(struct binder_proc *proc,
return active_transactions;
}
-static unsigned int binder_poll(struct file *filp,
+static __poll_t binder_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct binder_proc *proc = filp->private_data;
@@ -4284,6 +4406,8 @@ static unsigned int binder_poll(struct file *filp,
bool wait_for_proc_work;
thread = binder_get_thread(proc);
+ if (!thread)
+ return POLLERR;
binder_inner_proc_lock(thread->proc);
thread->looper |= BINDER_LOOPER_STATE_POLL;
@@ -4294,7 +4418,7 @@ static unsigned int binder_poll(struct file *filp,
poll_wait(filp, &thread->wait, wait);
if (binder_has_work(thread, wait_for_proc_work))
- return POLLIN;
+ return EPOLLIN;
return 0;
}
@@ -4605,11 +4729,13 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
if (ret)
return ret;
+ mutex_lock(&proc->files_lock);
proc->files = get_files_struct(current);
+ mutex_unlock(&proc->files_lock);
return 0;
err_bad_arg:
- pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
@@ -4619,7 +4745,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
struct binder_proc *proc;
struct binder_device *binder_dev;
- binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
@@ -4629,6 +4755,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
+ mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4657,7 +4784,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
* anyway print all contexts that a given PID has, so this
* is not a problem.
*/
- proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+ proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
binder_debugfs_dir_entry_proc,
(void *)(unsigned long)proc->pid,
&binder_proc_fops);
@@ -4881,9 +5008,11 @@ static void binder_deferred_func(struct work_struct *work)
files = NULL;
if (defer & BINDER_DEFERRED_PUT_FILES) {
+ mutex_lock(&proc->files_lock);
files = proc->files;
if (files)
proc->files = NULL;
+ mutex_unlock(&proc->files_lock);
}
if (defer & BINDER_DEFERRED_FLUSH)
@@ -4922,7 +5051,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
@@ -4946,7 +5075,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
}
if (buffer->target_node)
seq_printf(m, " node %d", buffer->target_node->debug_id);
- seq_printf(m, " size %zd:%zd data %p\n",
+ seq_printf(m, " size %zd:%zd data %pK\n",
buffer->data_size, buffer->offsets_size,
buffer->data);
}
@@ -5484,7 +5613,9 @@ static int __init binder_init(void)
struct binder_device *device;
struct hlist_node *tmp;
- binder_alloc_shrinker_init();
+ ret = binder_alloc_shrinker_init();
+ if (ret)
+ return ret;
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
@@ -5496,27 +5627,27 @@ static int __init binder_init(void)
if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_state_fops);
debugfs_create_file("stats",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_stats_fops);
debugfs_create_file("transactions",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
NULL,
&binder_transactions_fops);
debugfs_create_file("transaction_log",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log,
&binder_transaction_log_fops);
debugfs_create_file("failed_transaction_log",
- S_IRUGO,
+ 0444,
binder_debugfs_dir_entry_root,
&binder_transaction_log_failed,
&binder_transaction_log_fops);