diff options
Diffstat (limited to 'drivers/android/binder.c')
-rw-r--r-- | drivers/android/binder.c | 1700 |
1 files changed, 1051 insertions, 649 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index e47c8a4c83db..880224ec6abb 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -68,11 +68,9 @@ #include <linux/sizes.h> #include <uapi/linux/android/binder.h> -#include <uapi/linux/android/binderfs.h> -#include <asm/cacheflush.h> +#include <linux/cacheflush.h> -#include "binder_alloc.h" #include "binder_internal.h" #include "binder_trace.h" @@ -135,18 +133,45 @@ static int binder_set_stop_on_user_error(const char *val, module_param_call(stop_on_user_error, binder_set_stop_on_user_error, param_get_int, &binder_stop_on_user_error, 0644); -#define binder_debug(mask, x...) \ - do { \ - if (binder_debug_mask & mask) \ - pr_info_ratelimited(x); \ - } while (0) +static __printf(2, 3) void binder_debug(int mask, const char *format, ...) +{ + struct va_format vaf; + va_list args; + + if (binder_debug_mask & mask) { + va_start(args, format); + vaf.va = &args; + vaf.fmt = format; + pr_info_ratelimited("%pV", &vaf); + va_end(args); + } +} + +#define binder_txn_error(x...) \ + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x) + +static __printf(1, 2) void binder_user_error(const char *format, ...) +{ + struct va_format vaf; + va_list args; + + if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) { + va_start(args, format); + vaf.va = &args; + vaf.fmt = format; + pr_info_ratelimited("%pV", &vaf); + va_end(args); + } + + if (binder_stop_on_user_error) + binder_stop_on_user_error = 2; +} -#define binder_user_error(x...) \ +#define binder_set_extended_error(ee, _id, _command, _param) \ do { \ - if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ - pr_info_ratelimited(x); \ - if (binder_stop_on_user_error) \ - binder_stop_on_user_error = 2; \ + (ee)->id = _id; \ + (ee)->command = _command; \ + (ee)->param = _param; \ } while (0) #define to_flat_binder_object(hdr) \ @@ -160,24 +185,6 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error, #define to_binder_fd_array_object(hdr) \ container_of(hdr, struct binder_fd_array_object, hdr) -enum binder_stat_types { - BINDER_STAT_PROC, - BINDER_STAT_THREAD, - BINDER_STAT_NODE, - BINDER_STAT_REF, - BINDER_STAT_DEATH, - BINDER_STAT_TRANSACTION, - BINDER_STAT_TRANSACTION_COMPLETE, - BINDER_STAT_COUNT -}; - -struct binder_stats { - atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; - atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; - atomic_t obj_created[BINDER_STAT_COUNT]; - atomic_t obj_deleted[BINDER_STAT_COUNT]; -}; - static struct binder_stats binder_stats; static inline void binder_stats_deleted(enum binder_stat_types type) @@ -190,8 +197,32 @@ static inline void binder_stats_created(enum binder_stat_types type) atomic_inc(&binder_stats.obj_created[type]); } -struct binder_transaction_log binder_transaction_log; -struct binder_transaction_log binder_transaction_log_failed; +struct binder_transaction_log_entry { + int debug_id; + int debug_id_done; + int call_type; + int from_proc; + int from_thread; + int target_handle; + int to_proc; + int to_thread; + int to_node; + int data_size; + int offsets_size; + int return_error_line; + uint32_t return_error; + uint32_t return_error_param; + char context_name[BINDERFS_MAX_NAME + 1]; +}; + +struct binder_transaction_log { + atomic_t cur; + bool full; + struct binder_transaction_log_entry entry[32]; +}; + +static struct binder_transaction_log binder_transaction_log; +static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) @@ -213,278 +244,11 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( return e; } -/** - * struct binder_work - work enqueued on a worklist - * @entry: node enqueued on list - * @type: type of work to be performed - * - * There are separate work lists for proc, thread, and node (async). - */ -struct binder_work { - struct list_head entry; - - enum { - BINDER_WORK_TRANSACTION = 1, - BINDER_WORK_TRANSACTION_COMPLETE, - BINDER_WORK_RETURN_ERROR, - BINDER_WORK_NODE, - BINDER_WORK_DEAD_BINDER, - BINDER_WORK_DEAD_BINDER_AND_CLEAR, - BINDER_WORK_CLEAR_DEATH_NOTIFICATION, - } type; -}; - -struct binder_error { - struct binder_work work; - uint32_t cmd; -}; - -/** - * struct binder_node - binder node bookkeeping - * @debug_id: unique ID for debugging - * (invariant after initialized) - * @lock: lock for node fields - * @work: worklist element for node work - * (protected by @proc->inner_lock) - * @rb_node: element for proc->nodes tree - * (protected by @proc->inner_lock) - * @dead_node: element for binder_dead_nodes list - * (protected by binder_dead_nodes_lock) - * @proc: binder_proc that owns this node - * (invariant after initialized) - * @refs: list of references on this node - * (protected by @lock) - * @internal_strong_refs: used to take strong references when - * initiating a transaction - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @local_weak_refs: weak user refs from local process - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @local_strong_refs: strong user refs from local process - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @tmp_refs: temporary kernel refs - * (protected by @proc->inner_lock while @proc - * is valid, and by binder_dead_nodes_lock - * if @proc is NULL. During inc/dec and node release - * it is also protected by @lock to provide safety - * as the node dies and @proc becomes NULL) - * @ptr: userspace pointer for node - * (invariant, no lock needed) - * @cookie: userspace cookie for node - * (invariant, no lock needed) - * @has_strong_ref: userspace notified of strong ref - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @pending_strong_ref: userspace has acked notification of strong ref - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @has_weak_ref: userspace notified of weak ref - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @pending_weak_ref: userspace has acked notification of weak ref - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @has_async_transaction: async transaction to node in progress - * (protected by @lock) - * @accept_fds: file descriptor operations supported for node - * (invariant after initialized) - * @min_priority: minimum scheduling priority - * (invariant after initialized) - * @txn_security_ctx: require sender's security context - * (invariant after initialized) - * @async_todo: list of async work items - * (protected by @proc->inner_lock) - * - * Bookkeeping structure for binder nodes. - */ -struct binder_node { - int debug_id; - spinlock_t lock; - struct binder_work work; - union { - struct rb_node rb_node; - struct hlist_node dead_node; - }; - struct binder_proc *proc; - struct hlist_head refs; - int internal_strong_refs; - int local_weak_refs; - int local_strong_refs; - int tmp_refs; - binder_uintptr_t ptr; - binder_uintptr_t cookie; - struct { - /* - * bitfield elements protected by - * proc inner_lock - */ - u8 has_strong_ref:1; - u8 pending_strong_ref:1; - u8 has_weak_ref:1; - u8 pending_weak_ref:1; - }; - struct { - /* - * invariant after initialization - */ - u8 accept_fds:1; - u8 txn_security_ctx:1; - u8 min_priority; - }; - bool has_async_transaction; - struct list_head async_todo; -}; - -struct binder_ref_death { - /** - * @work: worklist element for death notifications - * (protected by inner_lock of the proc that - * this ref belongs to) - */ - struct binder_work work; - binder_uintptr_t cookie; -}; - -/** - * struct binder_ref_data - binder_ref counts and id - * @debug_id: unique ID for the ref - * @desc: unique userspace handle for ref - * @strong: strong ref count (debugging only if not locked) - * @weak: weak ref count (debugging only if not locked) - * - * Structure to hold ref count and ref id information. Since - * the actual ref can only be accessed with a lock, this structure - * is used to return information about the ref to callers of - * ref inc/dec functions. - */ -struct binder_ref_data { - int debug_id; - uint32_t desc; - int strong; - int weak; -}; - -/** - * struct binder_ref - struct to track references on nodes - * @data: binder_ref_data containing id, handle, and current refcounts - * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree - * @rb_node_node: node for lookup by @node in proc's rb_tree - * @node_entry: list entry for node->refs list in target node - * (protected by @node->lock) - * @proc: binder_proc containing ref - * @node: binder_node of target node. When cleaning up a - * ref for deletion in binder_cleanup_ref, a non-NULL - * @node indicates the node must be freed - * @death: pointer to death notification (ref_death) if requested - * (protected by @node->lock) - * - * Structure to track references from procA to target node (on procB). This - * structure is unsafe to access without holding @proc->outer_lock. - */ -struct binder_ref { - /* Lookups needed: */ - /* node + proc => ref (transaction) */ - /* desc + proc => ref (transaction, inc/dec ref) */ - /* node => refs + procs (proc exit) */ - struct binder_ref_data data; - struct rb_node rb_node_desc; - struct rb_node rb_node_node; - struct hlist_node node_entry; - struct binder_proc *proc; - struct binder_node *node; - struct binder_ref_death *death; -}; - enum binder_deferred_state { BINDER_DEFERRED_FLUSH = 0x01, BINDER_DEFERRED_RELEASE = 0x02, }; -/** - * struct binder_proc - binder process bookkeeping - * @proc_node: element for binder_procs list - * @threads: rbtree of binder_threads in this proc - * (protected by @inner_lock) - * @nodes: rbtree of binder nodes associated with - * this proc ordered by node->ptr - * (protected by @inner_lock) - * @refs_by_desc: rbtree of refs ordered by ref->desc - * (protected by @outer_lock) - * @refs_by_node: rbtree of refs ordered by ref->node - * (protected by @outer_lock) - * @waiting_threads: threads currently waiting for proc work - * (protected by @inner_lock) - * @pid PID of group_leader of process - * (invariant after initialized) - * @tsk task_struct for group_leader of process - * (invariant after initialized) - * @deferred_work_node: element for binder_deferred_list - * (protected by binder_deferred_lock) - * @deferred_work: bitmap of deferred work to perform - * (protected by binder_deferred_lock) - * @is_dead: process is dead and awaiting free - * when outstanding transactions are cleaned up - * (protected by @inner_lock) - * @todo: list of work for this process - * (protected by @inner_lock) - * @stats: per-process binder statistics - * (atomics, no lock needed) - * @delivered_death: list of delivered death notification - * (protected by @inner_lock) - * @max_threads: cap on number of binder threads - * (protected by @inner_lock) - * @requested_threads: number of binder threads requested but not - * yet started. In current implementation, can - * only be 0 or 1. - * (protected by @inner_lock) - * @requested_threads_started: number binder threads started - * (protected by @inner_lock) - * @tmp_ref: temporary reference to indicate proc is in use - * (protected by @inner_lock) - * @default_priority: default scheduler priority - * (invariant after initialized) - * @debugfs_entry: debugfs node - * @alloc: binder allocator bookkeeping - * @context: binder_context for this proc - * (invariant after initialized) - * @inner_lock: can nest under outer_lock and/or node lock - * @outer_lock: no nesting under innor or node lock - * Lock order: 1) outer, 2) node, 3) inner - * @binderfs_entry: process-specific binderfs log file - * - * Bookkeeping structure for binder processes - */ -struct binder_proc { - struct hlist_node proc_node; - struct rb_root threads; - struct rb_root nodes; - struct rb_root refs_by_desc; - struct rb_root refs_by_node; - struct list_head waiting_threads; - int pid; - struct task_struct *tsk; - struct hlist_node deferred_work_node; - int deferred_work; - bool is_dead; - - struct list_head todo; - struct binder_stats stats; - struct list_head delivered_death; - int max_threads; - int requested_threads; - int requested_threads_started; - int tmp_ref; - long default_priority; - struct dentry *debugfs_entry; - struct binder_alloc alloc; - struct binder_context *context; - spinlock_t inner_lock; - spinlock_t outer_lock; - struct dentry *binderfs_entry; -}; - enum { BINDER_LOOPER_STATE_REGISTERED = 0x01, BINDER_LOOPER_STATE_ENTERED = 0x02, @@ -495,125 +259,6 @@ enum { }; /** - * struct binder_thread - binder thread bookkeeping - * @proc: binder process for this thread - * (invariant after initialization) - * @rb_node: element for proc->threads rbtree - * (protected by @proc->inner_lock) - * @waiting_thread_node: element for @proc->waiting_threads list - * (protected by @proc->inner_lock) - * @pid: PID for this thread - * (invariant after initialization) - * @looper: bitmap of looping state - * (only accessed by this thread) - * @looper_needs_return: looping thread needs to exit driver - * (no lock needed) - * @transaction_stack: stack of in-progress transactions for this thread - * (protected by @proc->inner_lock) - * @todo: list of work to do for this thread - * (protected by @proc->inner_lock) - * @process_todo: whether work in @todo should be processed - * (protected by @proc->inner_lock) - * @return_error: transaction errors reported by this thread - * (only accessed by this thread) - * @reply_error: transaction errors reported by target thread - * (protected by @proc->inner_lock) - * @wait: wait queue for thread work - * @stats: per-thread statistics - * (atomics, no lock needed) - * @tmp_ref: temporary reference to indicate thread is in use - * (atomic since @proc->inner_lock cannot - * always be acquired) - * @is_dead: thread is dead and awaiting free - * when outstanding transactions are cleaned up - * (protected by @proc->inner_lock) - * - * Bookkeeping structure for binder threads. - */ -struct binder_thread { - struct binder_proc *proc; - struct rb_node rb_node; - struct list_head waiting_thread_node; - int pid; - int looper; /* only modified by this thread */ - bool looper_need_return; /* can be written by other thread */ - struct binder_transaction *transaction_stack; - struct list_head todo; - bool process_todo; - struct binder_error return_error; - struct binder_error reply_error; - wait_queue_head_t wait; - struct binder_stats stats; - atomic_t tmp_ref; - bool is_dead; -}; - -/** - * struct binder_txn_fd_fixup - transaction fd fixup list element - * @fixup_entry: list entry - * @file: struct file to be associated with new fd - * @offset: offset in buffer data to this fixup - * - * List element for fd fixups in a transaction. Since file - * descriptors need to be allocated in the context of the - * target process, we pass each fd to be processed in this - * struct. - */ -struct binder_txn_fd_fixup { - struct list_head fixup_entry; - struct file *file; - size_t offset; -}; - -struct binder_transaction { - int debug_id; - struct binder_work work; - struct binder_thread *from; - struct binder_transaction *from_parent; - struct binder_proc *to_proc; - struct binder_thread *to_thread; - struct binder_transaction *to_parent; - unsigned need_reply:1; - /* unsigned is_dead:1; */ /* not used at the moment */ - - struct binder_buffer *buffer; - unsigned int code; - unsigned int flags; - long priority; - long saved_priority; - kuid_t sender_euid; - struct list_head fd_fixups; - binder_uintptr_t security_ctx; - /** - * @lock: protects @from, @to_proc, and @to_thread - * - * @from, @to_proc, and @to_thread can be set to NULL - * during thread teardown - */ - spinlock_t lock; -}; - -/** - * struct binder_object - union of flat binder object types - * @hdr: generic object header - * @fbo: binder object (nodes and refs) - * @fdo: file descriptor object - * @bbo: binder buffer pointer - * @fdao: file descriptor array - * - * Used for type-independent object copies - */ -struct binder_object { - union { - struct binder_object_header hdr; - struct flat_binder_object fbo; - struct binder_fd_object fdo; - struct binder_buffer_object bbo; - struct binder_fd_array_object fdao; - }; -}; - -/** * binder_proc_lock() - Acquire outer lock for given binder_proc * @proc: struct binder_proc to acquire * @@ -885,27 +530,6 @@ static struct binder_work *binder_dequeue_work_head_ilocked( return w; } -/** - * binder_dequeue_work_head() - Dequeues the item at head of list - * @proc: binder_proc associated with list - * @list: list to dequeue head - * - * Removes the head of the list if there are items on the list - * - * Return: pointer dequeued binder_work, NULL if list was empty - */ -static struct binder_work *binder_dequeue_work_head( - struct binder_proc *proc, - struct list_head *list) -{ - struct binder_work *w; - - binder_inner_proc_lock(proc); - w = binder_dequeue_work_head_ilocked(list); - binder_inner_proc_unlock(proc); - return w; -} - static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); static void binder_free_thread(struct binder_thread *thread); @@ -1761,6 +1385,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc, } ret = binder_inc_ref_olocked(ref, strong, target_list); *rdata = ref->data; + if (ret && ref == new_ref) { + /* + * Cleanup the failed reference here as the target + * could now be dead and have already released its + * references by now. Calling on the new reference + * with strong=0 and a tmp_refs will not decrement + * the node. The new_ref gets kfree'd below. + */ + binder_cleanup_ref_olocked(new_ref); + ref = NULL; + } + binder_proc_unlock(proc); if (new_ref && ref != new_ref) /* @@ -1908,21 +1544,45 @@ static void binder_free_txn_fixups(struct binder_transaction *t) list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { fput(fixup->file); + if (fixup->target_fd >= 0) + put_unused_fd(fixup->target_fd); list_del(&fixup->fixup_entry); kfree(fixup); } } +static void binder_txn_latency_free(struct binder_transaction *t) +{ + int from_proc, from_thread, to_proc, to_thread; + + spin_lock(&t->lock); + from_proc = t->from ? t->from->proc->pid : 0; + from_thread = t->from ? t->from->pid : 0; + to_proc = t->to_proc ? t->to_proc->pid : 0; + to_thread = t->to_thread ? t->to_thread->pid : 0; + spin_unlock(&t->lock); + + trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread); +} + static void binder_free_transaction(struct binder_transaction *t) { struct binder_proc *target_proc = t->to_proc; if (target_proc) { binder_inner_proc_lock(target_proc); + target_proc->outstanding_txns--; + if (target_proc->outstanding_txns < 0) + pr_warn("%s: Unexpected outstanding_txns %d\n", + __func__, target_proc->outstanding_txns); + if (!target_proc->outstanding_txns && target_proc->is_frozen) + wake_up_interruptible_all(&target_proc->freeze_wait); if (t->buffer) t->buffer->transaction = NULL; binder_inner_proc_unlock(target_proc); } + if (trace_binder_txn_latency_free_enabled()) + binder_txn_latency_free(t); /* * If the transaction has no target_proc, then * t->buffer->transaction has already been cleared. @@ -1969,9 +1629,8 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_thread_dec_tmpref(target_thread); binder_free_transaction(t); return; - } else { - __release(&target_thread->proc->inner_lock); } + __release(&target_thread->proc->inner_lock); next = t->from_parent; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, @@ -2014,15 +1673,21 @@ static void binder_cleanup_transaction(struct binder_transaction *t, /** * binder_get_object() - gets object and checks for valid metadata * @proc: binder_proc owning the buffer + * @u: sender's user pointer to base of buffer * @buffer: binder_buffer that we're parsing. * @offset: offset in the @buffer at which to validate an object. * @object: struct binder_object to read into * - * Return: If there's a valid metadata object at @offset in @buffer, the + * Copy the binder object at the given offset into @object. If @u is + * provided then the copy is from the sender's buffer. If not, then + * it is copied from the target's @buffer. + * + * Return: If there's a valid metadata object at @offset, the * size of that object. Otherwise, it returns zero. The object * is read into the struct binder_object pointed to by @object. */ static size_t binder_get_object(struct binder_proc *proc, + const void __user *u, struct binder_buffer *buffer, unsigned long offset, struct binder_object *object) @@ -2032,10 +1697,16 @@ static size_t binder_get_object(struct binder_proc *proc, size_t object_size = 0; read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); - if (offset > buffer->data_size || read_size < sizeof(*hdr) || - binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, - offset, read_size)) + if (offset > buffer->data_size || read_size < sizeof(*hdr)) return 0; + if (u) { + if (copy_from_user(object, u + offset, read_size)) + return 0; + } else { + if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, + offset, read_size)) + return 0; + } /* Ok, now see if we read a complete object. */ hdr = &object->hdr; @@ -2108,7 +1779,7 @@ static struct binder_buffer_object *binder_validate_ptr( b, buffer_offset, sizeof(object_offset))) return NULL; - object_size = binder_get_object(proc, b, object_offset, object); + object_size = binder_get_object(proc, NULL, b, object_offset, object); if (!object_size || object->hdr.type != BINDER_TYPE_PTR) return NULL; if (object_offsetp) @@ -2173,7 +1844,8 @@ static bool binder_validate_fixup(struct binder_proc *proc, unsigned long buffer_offset; struct binder_object last_object; struct binder_buffer_object *last_bbo; - size_t object_size = binder_get_object(proc, b, last_obj_offset, + size_t object_size = binder_get_object(proc, NULL, b, + last_obj_offset, &last_object); if (object_size != sizeof(*last_bbo)) return false; @@ -2248,16 +1920,19 @@ static void binder_deferred_fd_close(int fd) if (!twcb) return; init_task_work(&twcb->twork, binder_do_fd_close); - __close_fd_get_file(fd, &twcb->file); + twcb->file = close_fd_get_file(fd); if (twcb->file) { + // pin it until binder_do_fd_close(); see comments there + get_file(twcb->file); filp_close(twcb->file, current->files); - task_work_add(current, &twcb->twork, true); + task_work_add(current, &twcb->twork, TWA_RESUME); } else { kfree(twcb); } } static void binder_transaction_buffer_release(struct binder_proc *proc, + struct binder_thread *thread, struct binder_buffer *buffer, binder_size_t failed_at, bool is_failure) @@ -2275,7 +1950,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, binder_dec_node(buffer->target_node, 1, 0); off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); - off_end_offset = is_failure ? failed_at : + off_end_offset = is_failure && failed_at ? failed_at : off_start_offset + buffer->offsets_size; for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; buffer_offset += sizeof(binder_size_t)) { @@ -2287,7 +1962,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, buffer, buffer_offset, sizeof(object_offset))) - object_size = binder_get_object(proc, buffer, + object_size = binder_get_object(proc, NULL, buffer, object_offset, &object); if (object_size == 0) { pr_err("transaction release %d bad object at offset %lld, size %zd\n", @@ -2338,15 +2013,13 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, case BINDER_TYPE_FD: { /* * No need to close the file here since user-space - * closes it for for successfully delivered + * closes it for successfully delivered * transactions. For transactions that weren't * delivered, the new fd was never allocated so * there is no need to close and the fput on the * file is done when the transaction is torn * down. */ - WARN_ON(failed_at && - proc->tsk == current->group_leader); } break; case BINDER_TYPE_PTR: /* @@ -2363,9 +2036,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, binder_size_t fd_buf_size; binder_size_t num_valid; - if (proc->tsk != current->group_leader) { + if (is_failure) { /* - * Nothing to do if running in sender context * The fd fixups have not been applied so no * fds need to be closed. */ @@ -2419,8 +2091,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, &proc->alloc, &fd, buffer, offset, sizeof(fd)); WARN_ON(err); - if (!err) + if (!err) { binder_deferred_fd_close(fd); + /* + * Need to make sure the thread goes + * back to userspace to complete the + * deferred close + */ + if (thread) + thread->looper_need_return = true; + } } } break; default: @@ -2455,7 +2135,7 @@ static int binder_translate_binder(struct flat_binder_object *fp, ret = -EINVAL; goto done; } - if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { ret = -EPERM; goto done; } @@ -2501,7 +2181,7 @@ static int binder_translate_handle(struct flat_binder_object *fp, proc->pid, thread->pid, fp->handle); return -EINVAL; } - if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { ret = -EPERM; goto done; } @@ -2589,7 +2269,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset, ret = -EBADF; goto err_fget; } - ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file); + ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); if (ret < 0) { ret = -EPERM; goto err_security; @@ -2607,6 +2287,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset, } fixup->file = file; fixup->offset = fd_offset; + fixup->target_fd = -1; trace_binder_transaction_fd_send(t, fd, fixup->offset); list_add_tail(&fixup->fixup_entry, &t->fd_fixups); @@ -2620,16 +2301,266 @@ err_fd_not_accepted: return ret; } -static int binder_translate_fd_array(struct binder_fd_array_object *fda, +/** + * struct binder_ptr_fixup - data to be fixed-up in target buffer + * @offset offset in target buffer to fixup + * @skip_size bytes to skip in copy (fixup will be written later) + * @fixup_data data to write at fixup offset + * @node list node + * + * This is used for the pointer fixup list (pf) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_ptr_fixup { + binder_size_t offset; + size_t skip_size; + binder_uintptr_t fixup_data; + struct list_head node; +}; + +/** + * struct binder_sg_copy - scatter-gather data to be copied + * @offset offset in target buffer + * @sender_uaddr user address in source buffer + * @length bytes to copy + * @node list node + * + * This is used for the sg copy list (sgc) which is created and consumed + * during binder_transaction() and is only accessed locally. No + * locking is necessary. + * + * The list is ordered by @offset. + */ +struct binder_sg_copy { + binder_size_t offset; + const void __user *sender_uaddr; + size_t length; + struct list_head node; +}; + +/** + * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data + * @alloc: binder_alloc associated with @buffer + * @buffer: binder buffer in target process + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Processes all elements of @sgc_head, applying fixups from @pf_head + * and copying the scatter-gather data from the source process' user + * buffer to the target's buffer. It is expected that the list creation + * and processing all occurs during binder_transaction() so these lists + * are only accessed in local context. + * + * Return: 0=success, else -errno + */ +static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, + struct binder_buffer *buffer, + struct list_head *sgc_head, + struct list_head *pf_head) +{ + int ret = 0; + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *tmppf; + struct binder_ptr_fixup *pf = + list_first_entry_or_null(pf_head, struct binder_ptr_fixup, + node); + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + size_t bytes_copied = 0; + + while (bytes_copied < sgc->length) { + size_t copy_size; + size_t bytes_left = sgc->length - bytes_copied; + size_t offset = sgc->offset + bytes_copied; + + /* + * We copy up to the fixup (pointed to by pf) + */ + copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) + : bytes_left; + if (!ret && copy_size) + ret = binder_alloc_copy_user_to_buffer( + alloc, buffer, + offset, + sgc->sender_uaddr + bytes_copied, + copy_size); + bytes_copied += copy_size; + if (copy_size != bytes_left) { + BUG_ON(!pf); + /* we stopped at a fixup offset */ + if (pf->skip_size) { + /* + * we are just skipping. This is for + * BINDER_TYPE_FDA where the translated + * fds will be fixed up when we get + * to target context. + */ + bytes_copied += pf->skip_size; + } else { + /* apply the fixup indicated by pf */ + if (!ret) + ret = binder_alloc_copy_to_buffer( + alloc, buffer, + pf->offset, + &pf->fixup_data, + sizeof(pf->fixup_data)); + bytes_copied += sizeof(pf->fixup_data); + } + list_del(&pf->node); + kfree(pf); + pf = list_first_entry_or_null(pf_head, + struct binder_ptr_fixup, node); + } + } + list_del(&sgc->node); + kfree(sgc); + } + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + BUG_ON(pf->skip_size == 0); + list_del(&pf->node); + kfree(pf); + } + BUG_ON(!list_empty(sgc_head)); + + return ret > 0 ? -EINVAL : ret; +} + +/** + * binder_cleanup_deferred_txn_lists() - free specified lists + * @sgc_head: list_head of scatter-gather copy list + * @pf_head: list_head of pointer fixup list + * + * Called to clean up @sgc_head and @pf_head if there is an + * error. + */ +static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, + struct list_head *pf_head) +{ + struct binder_sg_copy *sgc, *tmpsgc; + struct binder_ptr_fixup *pf, *tmppf; + + list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { + list_del(&sgc->node); + kfree(sgc); + } + list_for_each_entry_safe(pf, tmppf, pf_head, node) { + list_del(&pf->node); + kfree(pf); + } +} + +/** + * binder_defer_copy() - queue a scatter-gather buffer for copy + * @sgc_head: list_head of scatter-gather copy list + * @offset: binder buffer offset in target process + * @sender_uaddr: user address in source process + * @length: bytes to copy + * + * Specify a scatter-gather block to be copied. The actual copy must + * be deferred until all the needed fixups are identified and queued. + * Then the copy and fixups are done together so un-translated values + * from the source are never visible in the target buffer. + * + * We are guaranteed that repeated calls to this function will have + * monotonically increasing @offset values so the list will naturally + * be ordered. + * + * Return: 0=success, else -errno + */ +static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, + const void __user *sender_uaddr, size_t length) +{ + struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); + + if (!bc) + return -ENOMEM; + + bc->offset = offset; + bc->sender_uaddr = sender_uaddr; + bc->length = length; + INIT_LIST_HEAD(&bc->node); + + /* + * We are guaranteed that the deferred copies are in-order + * so just add to the tail. + */ + list_add_tail(&bc->node, sgc_head); + + return 0; +} + +/** + * binder_add_fixup() - queue a fixup to be applied to sg copy + * @pf_head: list_head of binder ptr fixup list + * @offset: binder buffer offset in target process + * @fixup: bytes to be copied for fixup + * @skip_size: bytes to skip when copying (fixup will be applied later) + * + * Add the specified fixup to a list ordered by @offset. When copying + * the scatter-gather buffers, the fixup will be copied instead of + * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup + * will be applied later (in target process context), so we just skip + * the bytes specified by @skip_size. If @skip_size is 0, we copy the + * value in @fixup. + * + * This function is called *mostly* in @offset order, but there are + * exceptions. Since out-of-order inserts are relatively uncommon, + * we insert the new element by searching backward from the tail of + * the list. + * + * Return: 0=success, else -errno + */ +static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, + binder_uintptr_t fixup, size_t skip_size) +{ + struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); + struct binder_ptr_fixup *tmppf; + + if (!pf) + return -ENOMEM; + + pf->offset = offset; + pf->fixup_data = fixup; + pf->skip_size = skip_size; + INIT_LIST_HEAD(&pf->node); + + /* Fixups are *mostly* added in-order, but there are some + * exceptions. Look backwards through list for insertion point. + */ + list_for_each_entry_reverse(tmppf, pf_head, node) { + if (tmppf->offset < pf->offset) { + list_add(&pf->node, &tmppf->node); + return 0; + } + } + /* + * if we get here, then the new offset is the lowest so + * insert at the head + */ + list_add(&pf->node, pf_head); + return 0; +} + +static int binder_translate_fd_array(struct list_head *pf_head, + struct binder_fd_array_object *fda, + const void __user *sender_ubuffer, struct binder_buffer_object *parent, + struct binder_buffer_object *sender_uparent, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { binder_size_t fdi, fd_buf_size; binder_size_t fda_offset; + const void __user *sender_ufda_base; struct binder_proc *proc = thread->proc; - struct binder_proc *target_proc = t->to_proc; + int ret; + + if (fda->num_fds == 0) + return 0; fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { @@ -2653,29 +2584,36 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, */ fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + fda->parent_offset; - if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { + sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + + fda->parent_offset; + + if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || + !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); return -EINVAL; } + ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); + if (ret) + return ret; + for (fdi = 0; fdi < fda->num_fds; fdi++) { u32 fd; - int ret; binder_size_t offset = fda_offset + fdi * sizeof(fd); + binder_size_t sender_uoffset = fdi * sizeof(fd); - ret = binder_alloc_copy_from_buffer(&target_proc->alloc, - &fd, t->buffer, - offset, sizeof(fd)); + ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); if (!ret) ret = binder_translate_fd(fd, offset, t, thread, in_reply_to); - if (ret < 0) - return ret; + if (ret) + return ret > 0 ? -EINVAL : ret; } return 0; } -static int binder_fixup_parent(struct binder_transaction *t, +static int binder_fixup_parent(struct list_head *pf_head, + struct binder_transaction *t, struct binder_thread *thread, struct binder_buffer_object *bp, binder_size_t off_start_offset, @@ -2721,14 +2659,57 @@ static int binder_fixup_parent(struct binder_transaction *t, } buffer_offset = bp->parent_offset + (uintptr_t)parent->buffer - (uintptr_t)b->user_data; - if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, - &bp->buffer, sizeof(bp->buffer))) { - binder_user_error("%d:%d got transaction with invalid parent offset\n", - proc->pid, thread->pid); - return -EINVAL; - } + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); +} - return 0; +/** + * binder_can_update_transaction() - Can a txn be superseded by an updated one? + * @t1: the pending async txn in the frozen process + * @t2: the new async txn to supersede the outdated pending one + * + * Return: true if t2 can supersede t1 + * false if t2 can not supersede t1 + */ +static bool binder_can_update_transaction(struct binder_transaction *t1, + struct binder_transaction *t2) +{ + if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) != + (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc) + return false; + if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && + t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid && + t1->buffer->target_node->ptr == t2->buffer->target_node->ptr && + t1->buffer->target_node->cookie == t2->buffer->target_node->cookie) + return true; + return false; +} + +/** + * binder_find_outdated_transaction_ilocked() - Find the outdated transaction + * @t: new async transaction + * @target_list: list to find outdated transaction + * + * Return: the outdated transaction if found + * NULL if no outdated transacton can be found + * + * Requires the proc->inner_lock to be held. + */ +static struct binder_transaction * +binder_find_outdated_transaction_ilocked(struct binder_transaction *t, + struct list_head *target_list) +{ + struct binder_work *w; + + list_for_each_entry(w, target_list, entry) { + struct binder_transaction *t_queued; + + if (w->type != BINDER_WORK_TRANSACTION) + continue; + t_queued = container_of(w, struct binder_transaction, work); + if (binder_can_update_transaction(t_queued, t)) + return t_queued; + } + return NULL; } /** @@ -2745,53 +2726,88 @@ static int binder_fixup_parent(struct binder_transaction *t, * If the @thread parameter is not NULL, the transaction is always queued * to the waitlist of that specific thread. * - * Return: true if the transactions was successfully queued - * false if the target process or thread is dead + * Return: 0 if the transaction was successfully queued + * BR_DEAD_REPLY if the target process or thread is dead + * BR_FROZEN_REPLY if the target process or thread is frozen */ -static bool binder_proc_transaction(struct binder_transaction *t, +static int binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { struct binder_node *node = t->buffer->target_node; bool oneway = !!(t->flags & TF_ONE_WAY); bool pending_async = false; + struct binder_transaction *t_outdated = NULL; BUG_ON(!node); binder_node_lock(node); if (oneway) { BUG_ON(thread); - if (node->has_async_transaction) { + if (node->has_async_transaction) pending_async = true; - } else { + else node->has_async_transaction = true; - } } binder_inner_proc_lock(proc); + if (proc->is_frozen) { + proc->sync_recv |= !oneway; + proc->async_recv |= oneway; + } - if (proc->is_dead || (thread && thread->is_dead)) { + if ((proc->is_frozen && !oneway) || proc->is_dead || + (thread && thread->is_dead)) { binder_inner_proc_unlock(proc); binder_node_unlock(node); - return false; + return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; } if (!thread && !pending_async) thread = binder_select_thread_ilocked(proc); - if (thread) + if (thread) { binder_enqueue_thread_work_ilocked(thread, &t->work); - else if (!pending_async) + } else if (!pending_async) { binder_enqueue_work_ilocked(&t->work, &proc->todo); - else + } else { + if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) { + t_outdated = binder_find_outdated_transaction_ilocked(t, + &node->async_todo); + if (t_outdated) { + binder_debug(BINDER_DEBUG_TRANSACTION, + "txn %d supersedes %d\n", + t->debug_id, t_outdated->debug_id); + list_del_init(&t_outdated->work.entry); + proc->outstanding_txns--; + } + } binder_enqueue_work_ilocked(&t->work, &node->async_todo); + } if (!pending_async) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); + proc->outstanding_txns++; binder_inner_proc_unlock(proc); binder_node_unlock(node); - return true; + /* + * To reduce potential contention, free the outdated transaction and + * buffer after releasing the locks. + */ + if (t_outdated) { + struct binder_buffer *buffer = t_outdated->buffer; + + t_outdated->buffer = NULL; + buffer->transaction = NULL; + trace_binder_transaction_update_buffer_release(buffer); + binder_transaction_buffer_release(proc, NULL, buffer, 0, 0); + binder_alloc_free_buf(&proc->alloc, buffer); + kfree(t_outdated); + binder_stats_deleted(BINDER_STAT_TRANSACTION); + } + + return 0; } /** @@ -2836,6 +2852,24 @@ static struct binder_node *binder_get_node_refs_for_txn( return target_node; } +static void binder_set_txn_from_error(struct binder_transaction *t, int id, + uint32_t command, int32_t param) +{ + struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); + + if (!from) { + /* annotation for sparse */ + __release(&from->proc->inner_lock); + return; + } + + /* don't override existing errors */ + if (from->ee.command == BR_OK) + binder_set_extended_error(&from->ee, id, command, param); + binder_inner_proc_unlock(from->proc); + binder_thread_dec_tmpref(from); +} + static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, @@ -2849,6 +2883,7 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t off_start_offset, off_end_offset; binder_size_t off_min; binder_size_t sg_buf_offset, sg_buf_end_offset; + binder_size_t user_offset = 0; struct binder_proc *target_proc = NULL; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; @@ -2863,6 +2898,12 @@ static void binder_transaction(struct binder_proc *proc, int t_debug_id = atomic_inc_return(&binder_last_id); char *secctx = NULL; u32 secctx_sz = 0; + struct list_head sgc_head; + struct list_head pf_head; + const void __user *user_buffer = (const void __user *) + (uintptr_t)tr->data.ptr.buffer; + INIT_LIST_HEAD(&sgc_head); + INIT_LIST_HEAD(&pf_head); e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -2874,6 +2915,10 @@ static void binder_transaction(struct binder_proc *proc, e->offsets_size = tr->offsets_size; strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); + binder_inner_proc_lock(proc); + binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); + binder_inner_proc_unlock(proc); + if (reply) { binder_inner_proc_lock(proc); in_reply_to = thread->transaction_stack; @@ -2909,6 +2954,8 @@ static void binder_transaction(struct binder_proc *proc, if (target_thread == NULL) { /* annotation for sparse */ __release(&target_thread->proc->inner_lock); + binder_txn_error("%d:%d reply target not found\n", + thread->pid, proc->pid); return_error = BR_DEAD_REPLY; return_error_line = __LINE__; goto err_dead_binder; @@ -2949,8 +2996,8 @@ static void binder_transaction(struct binder_proc *proc, ref->node, &target_proc, &return_error); } else { - binder_user_error("%d:%d got transaction to invalid handle\n", - proc->pid, thread->pid); + binder_user_error("%d:%d got transaction to invalid handle, %u\n", + proc->pid, thread->pid, tr->target.handle); return_error = BR_FAILED_REPLY; } binder_proc_unlock(proc); @@ -2974,6 +3021,8 @@ static void binder_transaction(struct binder_proc *proc, } } if (!target_node) { + binder_txn_error("%d:%d cannot find target node\n", + thread->pid, proc->pid); /* * return_error is set above */ @@ -2982,8 +3031,18 @@ static void binder_transaction(struct binder_proc *proc, goto err_dead_binder; } e->to_node = target_node->debug_id; - if (security_binder_transaction(proc->tsk, - target_proc->tsk) < 0) { + if (WARN_ON(proc == target_proc)) { + binder_txn_error("%d:%d self transactions not allowed\n", + thread->pid, proc->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_invalid_target_handle; + } + if (security_binder_transaction(proc->cred, + target_proc->cred) < 0) { + binder_txn_error("%d:%d transaction credentials failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EPERM; return_error_line = __LINE__; @@ -3055,6 +3114,8 @@ static void binder_transaction(struct binder_proc *proc, /* TODO: reuse incoming transaction for reply */ t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { + binder_txn_error("%d:%d cannot allocate transaction\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -ENOMEM; return_error_line = __LINE__; @@ -3066,6 +3127,8 @@ static void binder_transaction(struct binder_proc *proc, tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { + binder_txn_error("%d:%d cannot allocate work for transaction\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -ENOMEM; return_error_line = __LINE__; @@ -3109,9 +3172,11 @@ static void binder_transaction(struct binder_proc *proc, u32 secid; size_t added_size; - security_task_getsecid(proc->tsk, &secid); + security_cred_getsecid(proc->cred, &secid); ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); if (ret) { + binder_txn_error("%d:%d failed to get security context\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3120,9 +3185,10 @@ static void binder_transaction(struct binder_proc *proc, added_size = ALIGN(secctx_sz, sizeof(u64)); extra_buffers_size += added_size; if (extra_buffers_size < added_size) { - /* integer overflow of extra_buffers_size */ + binder_txn_error("%d:%d integer overflow of extra_buffers_size\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; - return_error_param = EINVAL; + return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_extra_size; } @@ -3132,11 +3198,17 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, - !reply && (t->flags & TF_ONE_WAY)); + !reply && (t->flags & TF_ONE_WAY), current->tgid); if (IS_ERR(t->buffer)) { - /* - * -ESRCH indicates VMA cleared. The target is dying. - */ + char *s; + + ret = PTR_ERR(t->buffer); + s = (ret == -ESRCH) ? ": vma cleared, target dead or dying" + : (ret == -ENOSPC) ? ": no space left" + : (ret == -ENOMEM) ? ": memory allocation failed" + : ""; + binder_txn_error("cannot allocate buffer%s", s); + return_error_param = PTR_ERR(t->buffer); return_error = return_error_param == -ESRCH ? BR_DEAD_REPLY : BR_FAILED_REPLY; @@ -3165,23 +3237,11 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; + t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); trace_binder_transaction_alloc_buf(t->buffer); if (binder_alloc_copy_user_to_buffer( &target_proc->alloc, - t->buffer, 0, - (const void __user *) - (uintptr_t)tr->data.ptr.buffer, - tr->data_size)) { - binder_user_error("%d:%d got transaction with invalid data ptr\n", - proc->pid, thread->pid); - return_error = BR_FAILED_REPLY; - return_error_param = -EFAULT; - return_error_line = __LINE__; - goto err_copy_data_failed; - } - if (binder_alloc_copy_user_to_buffer( - &target_proc->alloc, t->buffer, ALIGN(tr->data_size, sizeof(void *)), (const void __user *) @@ -3224,19 +3284,41 @@ static void binder_transaction(struct binder_proc *proc, size_t object_size; struct binder_object object; binder_size_t object_offset; + binder_size_t copy_size; if (binder_alloc_copy_from_buffer(&target_proc->alloc, &object_offset, t->buffer, buffer_offset, sizeof(object_offset))) { + binder_txn_error("%d:%d copy offset from buffer failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_offset; } - object_size = binder_get_object(target_proc, t->buffer, - object_offset, &object); + + /* + * Copy the source user buffer up to the next object + * that will be processed. + */ + copy_size = object_offset - user_offset; + if (copy_size && (user_offset > object_offset || + binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + copy_size))) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + object_size = binder_get_object(target_proc, user_buffer, + t->buffer, object_offset, &object); if (object_size == 0 || object_offset < off_min) { binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", proc->pid, thread->pid, @@ -3248,6 +3330,11 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } + /* + * Set offset to the next buffer fragment to be + * copied + */ + user_offset = object_offset + object_size; hdr = &object.hdr; off_min = object_offset + object_size; @@ -3264,6 +3351,8 @@ static void binder_transaction(struct binder_proc *proc, t->buffer, object_offset, fp, sizeof(*fp))) { + binder_txn_error("%d:%d translate binder failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3281,6 +3370,8 @@ static void binder_transaction(struct binder_proc *proc, t->buffer, object_offset, fp, sizeof(*fp))) { + binder_txn_error("%d:%d translate handle failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3301,6 +3392,8 @@ static void binder_transaction(struct binder_proc *proc, t->buffer, object_offset, fp, sizeof(*fp))) { + binder_txn_error("%d:%d translate fd failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3310,6 +3403,8 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_FDA: { struct binder_object ptr_object; binder_size_t parent_offset; + struct binder_object user_object; + size_t user_parent_size; struct binder_fd_array_object *fda = to_binder_fd_array_object(hdr); size_t num_valid = (buffer_offset - off_start_offset) / @@ -3341,11 +3436,37 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_parent; } - ret = binder_translate_fd_array(fda, parent, t, thread, - in_reply_to); - if (ret < 0) { + /* + * We need to read the user version of the parent + * object to get the original user offset + */ + user_parent_size = + binder_get_object(proc, user_buffer, t->buffer, + parent_offset, &user_object); + if (user_parent_size != sizeof(user_object.bbo)) { + binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", + proc->pid, thread->pid, + user_parent_size, + sizeof(user_object.bbo)); return_error = BR_FAILED_REPLY; - return_error_param = ret; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_bad_parent; + } + ret = binder_translate_fd_array(&pf_head, fda, + user_buffer, parent, + &user_object.bbo, t, + thread, in_reply_to); + if (!ret) + ret = binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, + object_offset, + fda, sizeof(*fda)); + if (ret) { + binder_txn_error("%d:%d translate fd array failed\n", + thread->pid, proc->pid); + return_error = BR_FAILED_REPLY; + return_error_param = ret > 0 ? -EINVAL : ret; return_error_line = __LINE__; goto err_translate_failed; } @@ -3367,19 +3488,16 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - if (binder_alloc_copy_user_to_buffer( - &target_proc->alloc, - t->buffer, - sg_buf_offset, - (const void __user *) - (uintptr_t)bp->buffer, - bp->length)) { - binder_user_error("%d:%d got transaction with invalid offsets ptr\n", - proc->pid, thread->pid); - return_error_param = -EFAULT; + ret = binder_defer_copy(&sgc_head, sg_buf_offset, + (const void __user *)(uintptr_t)bp->buffer, + bp->length); + if (ret) { + binder_txn_error("%d:%d deferred copy failed\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; + return_error_param = ret; return_error_line = __LINE__; - goto err_copy_data_failed; + goto err_translate_failed; } /* Fixup buffer pointer to target proc address space */ bp->buffer = (uintptr_t) @@ -3388,7 +3506,8 @@ static void binder_transaction(struct binder_proc *proc, num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); - ret = binder_fixup_parent(t, thread, bp, + ret = binder_fixup_parent(&pf_head, t, + thread, bp, off_start_offset, num_valid, last_fixup_obj_off, @@ -3398,6 +3517,8 @@ static void binder_transaction(struct binder_proc *proc, t->buffer, object_offset, bp, sizeof(*bp))) { + binder_txn_error("%d:%d failed to fixup parent\n", + thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; @@ -3415,19 +3536,48 @@ static void binder_transaction(struct binder_proc *proc, goto err_bad_object_type; } } - tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + /* Done processing objects, copy the rest of the buffer */ + if (binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, user_offset, + user_buffer + user_offset, + tr->data_size - user_offset)) { + binder_user_error("%d:%d got transaction with invalid data ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + + ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, + &sgc_head, &pf_head); + if (ret) { + binder_user_error("%d:%d got transaction with invalid offsets ptr\n", + proc->pid, thread->pid); + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_copy_data_failed; + } + if (t->buffer->oneway_spam_suspect) + tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; + else + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; t->work.type = BINDER_WORK_TRANSACTION; if (reply) { binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); if (target_thread->is_dead) { + return_error = BR_DEAD_REPLY; binder_inner_proc_unlock(target_proc); goto err_dead_proc_or_thread; } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); binder_enqueue_thread_work_ilocked(target_thread, &t->work); + target_proc->outstanding_txns++; binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); binder_free_transaction(in_reply_to); @@ -3446,7 +3596,9 @@ static void binder_transaction(struct binder_proc *proc, t->from_parent = thread->transaction_stack; thread->transaction_stack = t; binder_inner_proc_unlock(proc); - if (!binder_proc_transaction(t, target_proc, target_thread)) { + return_error = binder_proc_transaction(t, + target_proc, target_thread); + if (return_error) { binder_inner_proc_lock(proc); binder_pop_transaction_ilocked(thread, t); binder_inner_proc_unlock(proc); @@ -3456,7 +3608,8 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); binder_enqueue_thread_work(thread, tcomplete); - if (!binder_proc_transaction(t, target_proc, NULL)) + return_error = binder_proc_transaction(t, target_proc, NULL); + if (return_error) goto err_dead_proc_or_thread; } if (target_thread) @@ -3473,7 +3626,8 @@ static void binder_transaction(struct binder_proc *proc, return; err_dead_proc_or_thread: - return_error = BR_DEAD_REPLY; + binder_txn_error("%d:%d dead process or thread\n", + thread->pid, proc->pid); return_error_line = __LINE__; binder_dequeue_work(proc, tcomplete); err_translate_failed: @@ -3481,9 +3635,10 @@ err_bad_object_type: err_bad_offset: err_bad_parent: err_copy_data_failed: + binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); - binder_transaction_buffer_release(target_proc, t->buffer, + binder_transaction_buffer_release(target_proc, NULL, t->buffer, buffer_offset, true); if (target_node) binder_dec_node_tmpref(target_node); @@ -3498,6 +3653,8 @@ err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: + if (trace_binder_txn_latency_free_enabled()) + binder_txn_latency_free(t); kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: @@ -3506,21 +3663,26 @@ err_bad_call_stack: err_empty_call_stack: err_dead_binder: err_invalid_target_handle: - if (target_thread) - binder_thread_dec_tmpref(target_thread); - if (target_proc) - binder_proc_dec_tmpref(target_proc); if (target_node) { binder_dec_node(target_node, 1, 0); binder_dec_node_tmpref(target_node); } binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", - proc->pid, thread->pid, return_error, return_error_param, + "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n", + proc->pid, thread->pid, reply ? "reply" : + (tr->flags & TF_ONE_WAY ? "async" : "call"), + target_proc ? target_proc->pid : 0, + target_thread ? target_thread->pid : 0, + t_debug_id, return_error, return_error_param, (u64)tr->data_size, (u64)tr->offsets_size, return_error_line); + if (target_thread) + binder_thread_dec_tmpref(target_thread); + if (target_proc) + binder_proc_dec_tmpref(target_proc); + { struct binder_transaction_log_entry *fe; @@ -3540,10 +3702,16 @@ err_invalid_target_handle: BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { + binder_set_txn_from_error(in_reply_to, t_debug_id, + return_error, return_error_param); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); } else { + binder_inner_proc_lock(proc); + binder_set_extended_error(&thread->ee, t_debug_id, + return_error, return_error_param); + binder_inner_proc_unlock(proc); thread->return_error.cmd = return_error; binder_enqueue_thread_work(thread, &thread->return_error.work); } @@ -3553,6 +3721,7 @@ err_invalid_target_handle: * binder_free_buf() - free the specified buffer * @proc: binder proc that owns buffer * @buffer: buffer to be freed + * @is_failure: failed to send transaction * * If buffer for an async transaction, enqueue the next async * transaction from the node. @@ -3560,7 +3729,9 @@ err_invalid_target_handle: * Cleanup buffer and free it. */ static void -binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) +binder_free_buf(struct binder_proc *proc, + struct binder_thread *thread, + struct binder_buffer *buffer, bool is_failure) { binder_inner_proc_lock(proc); if (buffer->transaction) { @@ -3588,7 +3759,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) binder_node_inner_unlock(buf_node); } trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, buffer, 0, false); + binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure); binder_alloc_free_buf(&proc->alloc, buffer); } @@ -3633,12 +3804,20 @@ static int binder_thread_write(struct binder_proc *proc, ret = -1; if (increment && !target) { struct binder_node *ctx_mgr_node; + mutex_lock(&context->context_mgr_node_lock); ctx_mgr_node = context->binder_context_mgr_node; - if (ctx_mgr_node) + if (ctx_mgr_node) { + if (ctx_mgr_node->proc == proc) { + binder_user_error("%d:%d context manager tried to acquire desc 0\n", + proc->pid, thread->pid); + mutex_unlock(&context->context_mgr_node_lock); + return -EINVAL; + } ret = binder_inc_ref_for_node( proc, ctx_mgr_node, strong, NULL, &rdata); + } mutex_unlock(&context->context_mgr_node_lock); } if (ret) @@ -3782,7 +3961,7 @@ static int binder_thread_write(struct binder_proc *proc, proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); - binder_free_buf(proc, buffer); + binder_free_buf(proc, thread, buffer, false); break; } @@ -4022,7 +4201,7 @@ static int binder_thread_write(struct binder_proc *proc, } break; default: - pr_err("%d:%d unknown command %d\n", + pr_err("%d:%d unknown command %u\n", proc->pid, thread->pid, cmd); return -EINVAL; } @@ -4080,10 +4259,9 @@ static int binder_wait_for_work(struct binder_thread *thread, struct binder_proc *proc = thread->proc; int ret = 0; - freezer_do_not_count(); binder_inner_proc_lock(proc); for (;;) { - prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); + prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE); if (binder_has_work_ilocked(thread, do_proc_work)) break; if (do_proc_work) @@ -4094,13 +4272,12 @@ static int binder_wait_for_work(struct binder_thread *thread, binder_inner_proc_lock(proc); list_del_init(&thread->waiting_thread_node); if (signal_pending(current)) { - ret = -ERESTARTSYS; + ret = -EINTR; break; } } finish_wait(&thread->wait, &wait); binder_inner_proc_unlock(proc); - freezer_count(); return ret; } @@ -4113,10 +4290,9 @@ static int binder_wait_for_work(struct binder_thread *thread, * Now that we are in the context of the transaction target * process, we can allocate and install fds. Process the * list of fds to translate and fixup the buffer with the - * new fds. + * new fds first and only then install the files. * - * If we fail to allocate an fd, then free the resources by - * fput'ing files that have not been processed and ksys_close'ing + * If we fail to allocate an fd, skip the install and release * any fds that have already been allocated. */ static int binder_apply_fd_fixups(struct binder_proc *proc, @@ -4133,41 +4309,31 @@ static int binder_apply_fd_fixups(struct binder_proc *proc, "failed fd fixup txn %d fd %d\n", t->debug_id, fd); ret = -ENOMEM; - break; + goto err; } binder_debug(BINDER_DEBUG_TRANSACTION, "fd fixup txn %d fd %d\n", t->debug_id, fd); trace_binder_transaction_fd_recv(t, fd, fixup->offset); - fd_install(fd, fixup->file); - fixup->file = NULL; + fixup->target_fd = fd; if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, fixup->offset, &fd, sizeof(u32))) { ret = -EINVAL; - break; + goto err; } } list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { - if (fixup->file) { - fput(fixup->file); - } else if (ret) { - u32 fd; - int err; - - err = binder_alloc_copy_from_buffer(&proc->alloc, &fd, - t->buffer, - fixup->offset, - sizeof(fd)); - WARN_ON(err); - if (!err) - binder_deferred_fd_close(fd); - } + fd_install(fixup->target_fd, fixup->file); list_del(&fixup->fixup_entry); kfree(fixup); } return ret; + +err: + binder_free_txn_fixups(t); + return ret; } static int binder_thread_read(struct binder_proc *proc, @@ -4273,9 +4439,14 @@ retry: binder_stat_br(proc, thread, cmd); } break; - case BINDER_WORK_TRANSACTION_COMPLETE: { + case BINDER_WORK_TRANSACTION_COMPLETE: + case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { + if (proc->oneway_spam_detection_enabled && + w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) + cmd = BR_ONEWAY_SPAM_SUSPECT; + else + cmd = BR_TRANSACTION_COMPLETE; binder_inner_proc_unlock(proc); - cmd = BR_TRANSACTION_COMPLETE; kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); if (put_user(cmd, (uint32_t __user *)ptr)) @@ -4470,7 +4641,7 @@ retry: buffer->transaction = NULL; binder_cleanup_transaction(t, "fd fixups failed", BR_FAILED_REPLY); - binder_free_buf(proc, buffer); + binder_free_buf(proc, thread, buffer, true); binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", proc->pid, thread->pid, @@ -4523,7 +4694,7 @@ retry: trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, - "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", + "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : (cmd == BR_TRANSACTION_SEC_CTX) ? @@ -4576,13 +4747,17 @@ static void binder_release_work(struct binder_proc *proc, struct list_head *list) { struct binder_work *w; + enum binder_work_type wtype; while (1) { - w = binder_dequeue_work_head(proc, list); + binder_inner_proc_lock(proc); + w = binder_dequeue_work_head_ilocked(list); + wtype = w ? w->type : 0; + binder_inner_proc_unlock(proc); if (!w) return; - switch (w->type) { + switch (wtype) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; @@ -4616,9 +4791,11 @@ static void binder_release_work(struct binder_proc *proc, kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } break; + case BINDER_WORK_NODE: + break; default: pr_err("unexpected work type, %d, not freed\n", - w->type); + wtype); break; } } @@ -4659,6 +4836,7 @@ static struct binder_thread *binder_get_thread_ilocked( thread->return_error.cmd = BR_OK; thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; thread->reply_error.cmd = BR_OK; + thread->ee.command = BR_OK; INIT_LIST_HEAD(&new_thread->waiting_thread_node); return thread; } @@ -4686,10 +4864,21 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc) static void binder_free_proc(struct binder_proc *proc) { + struct binder_device *device; + BUG_ON(!list_empty(&proc->todo)); BUG_ON(!list_empty(&proc->delivered_death)); + if (proc->outstanding_txns) + pr_warn("%s: Unexpected outstanding_txns %d\n", + __func__, proc->outstanding_txns); + device = container_of(proc->context, struct binder_device, context); + if (refcount_dec_and_test(&device->ref)) { + kfree(proc->context->name); + kfree(device); + } binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); + put_cred(proc->cred); binder_stats_deleted(BINDER_STAT_PROC); kfree(proc); } @@ -4744,6 +4933,7 @@ static int binder_thread_release(struct binder_proc *proc, (t->to_thread == thread) ? "in" : "out"); if (t->to_thread == thread) { + thread->proc->outstanding_txns--; t->to_proc = NULL; t->to_thread = NULL; if (t->buffer) { @@ -4766,23 +4956,20 @@ static int binder_thread_release(struct binder_proc *proc, __release(&t->lock); /* - * If this thread used poll, make sure we remove the waitqueue - * from any epoll data structures holding it with POLLFREE. - * waitqueue_active() is safe to use here because we're holding - * the inner lock. + * If this thread used poll, make sure we remove the waitqueue from any + * poll data structures holding it. */ - if ((thread->looper & BINDER_LOOPER_STATE_POLL) && - waitqueue_active(&thread->wait)) { - wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); - } + if (thread->looper & BINDER_LOOPER_STATE_POLL) + wake_up_pollfree(&thread->wait); binder_inner_proc_unlock(thread->proc); /* - * This is needed to avoid races between wake_up_poll() above and - * and ep_remove_waitqueue() called for other reasons (eg the epoll file - * descriptor being closed); ep_remove_waitqueue() holds an RCU read - * lock, so we can be sure it's done after calling synchronize_rcu(). + * This is needed to avoid races between wake_up_pollfree() above and + * someone else removing the last entry from the queue for other reasons + * (e.g. ep_remove_wait_queue() being called due to an epoll file + * descriptor being closed). Such other users hold an RCU read lock, so + * we can be sure they're done after we call synchronize_rcu(). */ if (thread->looper & BINDER_LOOPER_STATE_POLL) synchronize_rcu(); @@ -4900,7 +5087,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp, ret = -EBUSY; goto out; } - ret = security_binder_set_context_mgr(proc->tsk); + ret = security_binder_set_context_mgr(proc->cred); if (ret < 0) goto out; if (uid_valid(context->binder_context_mgr_uid)) { @@ -4994,6 +5181,116 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, return 0; } +static bool binder_txns_pending_ilocked(struct binder_proc *proc) +{ + struct rb_node *n; + struct binder_thread *thread; + + if (proc->outstanding_txns > 0) + return true; + + for (n = rb_first(&proc->threads); n; n = rb_next(n)) { + thread = rb_entry(n, struct binder_thread, rb_node); + if (thread->transaction_stack) + return true; + } + return false; +} + +static int binder_ioctl_freeze(struct binder_freeze_info *info, + struct binder_proc *target_proc) +{ + int ret = 0; + + if (!info->enable) { + binder_inner_proc_lock(target_proc); + target_proc->sync_recv = false; + target_proc->async_recv = false; + target_proc->is_frozen = false; + binder_inner_proc_unlock(target_proc); + return 0; + } + + /* + * Freezing the target. Prevent new transactions by + * setting frozen state. If timeout specified, wait + * for transactions to drain. + */ + binder_inner_proc_lock(target_proc); + target_proc->sync_recv = false; + target_proc->async_recv = false; + target_proc->is_frozen = true; + binder_inner_proc_unlock(target_proc); + + if (info->timeout_ms > 0) + ret = wait_event_interruptible_timeout( + target_proc->freeze_wait, + (!target_proc->outstanding_txns), + msecs_to_jiffies(info->timeout_ms)); + + /* Check pending transactions that wait for reply */ + if (ret >= 0) { + binder_inner_proc_lock(target_proc); + if (binder_txns_pending_ilocked(target_proc)) + ret = -EAGAIN; + binder_inner_proc_unlock(target_proc); + } + + if (ret < 0) { + binder_inner_proc_lock(target_proc); + target_proc->is_frozen = false; + binder_inner_proc_unlock(target_proc); + } + + return ret; +} + +static int binder_ioctl_get_freezer_info( + struct binder_frozen_status_info *info) +{ + struct binder_proc *target_proc; + bool found = false; + __u32 txns_pending; + + info->sync_recv = 0; + info->async_recv = 0; + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { + if (target_proc->pid == info->pid) { + found = true; + binder_inner_proc_lock(target_proc); + txns_pending = binder_txns_pending_ilocked(target_proc); + info->sync_recv |= target_proc->sync_recv | + (txns_pending << 1); + info->async_recv |= target_proc->async_recv; + binder_inner_proc_unlock(target_proc); + } + } + mutex_unlock(&binder_procs_lock); + + if (!found) + return -EINVAL; + + return 0; +} + +static int binder_ioctl_get_extended_error(struct binder_thread *thread, + void __user *ubuf) +{ + struct binder_extended_error ee; + + binder_inner_proc_lock(thread->proc); + ee = thread->ee; + binder_set_extended_error(&thread->ee, 0, BR_OK, 0); + binder_inner_proc_unlock(thread->proc); + + if (copy_to_user(ubuf, &ee, sizeof(ee))) + return -EFAULT; + + return 0; +} + static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; @@ -5112,6 +5409,101 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; } + case BINDER_FREEZE: { + struct binder_freeze_info info; + struct binder_proc **target_procs = NULL, *target_proc; + int target_procs_count = 0, i = 0; + + ret = 0; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { + if (target_proc->pid == info.pid) + target_procs_count++; + } + + if (target_procs_count == 0) { + mutex_unlock(&binder_procs_lock); + ret = -EINVAL; + goto err; + } + + target_procs = kcalloc(target_procs_count, + sizeof(struct binder_proc *), + GFP_KERNEL); + + if (!target_procs) { + mutex_unlock(&binder_procs_lock); + ret = -ENOMEM; + goto err; + } + + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { + if (target_proc->pid != info.pid) + continue; + + binder_inner_proc_lock(target_proc); + target_proc->tmp_ref++; + binder_inner_proc_unlock(target_proc); + + target_procs[i++] = target_proc; + } + mutex_unlock(&binder_procs_lock); + + for (i = 0; i < target_procs_count; i++) { + if (ret >= 0) + ret = binder_ioctl_freeze(&info, + target_procs[i]); + + binder_proc_dec_tmpref(target_procs[i]); + } + + kfree(target_procs); + + if (ret < 0) + goto err; + break; + } + case BINDER_GET_FROZEN_INFO: { + struct binder_frozen_status_info info; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + ret = binder_ioctl_get_freezer_info(&info); + if (ret < 0) + goto err; + + if (copy_to_user(ubuf, &info, sizeof(info))) { + ret = -EFAULT; + goto err; + } + break; + } + case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { + uint32_t enable; + + if (copy_from_user(&enable, ubuf, sizeof(enable))) { + ret = -EFAULT; + goto err; + } + binder_inner_proc_lock(proc); + proc->oneway_spam_detection_enabled = (bool)enable; + binder_inner_proc_unlock(proc); + break; + } + case BINDER_GET_EXTENDED_ERROR: + ret = binder_ioctl_get_extended_error(thread, ubuf); + if (ret < 0) + goto err; + break; default: ret = -EINVAL; goto err; @@ -5121,7 +5513,7 @@ err: if (thread) thread->looper_need_return = false; wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); - if (ret && ret != -ERESTARTSYS) + if (ret && ret != -EINTR) pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); err_unlocked: trace_binder_ioctl_done(ret); @@ -5164,9 +5556,7 @@ static const struct vm_operations_struct binder_vm_ops = { static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { - int ret; struct binder_proc *proc = filp->private_data; - const char *failure_string; if (proc->tsk != current->group_leader) return -EINVAL; @@ -5178,9 +5568,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) (unsigned long)pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { - ret = -EPERM; - failure_string = "bad vm_flags"; - goto err_bad_arg; + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, + proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); + return -EPERM; } vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP; vma->vm_flags &= ~VM_MAYWRITE; @@ -5188,15 +5578,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; - ret = binder_alloc_mmap_handler(&proc->alloc, vma); - if (ret) - return ret; - return 0; - -err_bad_arg: - pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, - proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); - return ret; + return binder_alloc_mmap_handler(&proc->alloc, vma); } static int binder_open(struct inode *nodp, struct file *filp) @@ -5217,7 +5599,9 @@ static int binder_open(struct inode *nodp, struct file *filp) spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; + proc->cred = get_cred(filp->f_cred); INIT_LIST_HEAD(&proc->todo); + init_waitqueue_head(&proc->freeze_wait); proc->default_priority = task_nice(current); /* binderfs stashes devices in i_private */ if (is_binderfs_device(nodp)) { @@ -5406,7 +5790,6 @@ static int binder_node_release(struct binder_node *node, int refs) static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; - struct binder_device *device; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; @@ -5423,12 +5806,6 @@ static void binder_deferred_release(struct binder_proc *proc) context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); - device = container_of(proc->context, struct binder_device, context); - if (refcount_dec_and_test(&device->ref)) { - kfree(context->name); - kfree(device); - } - proc->context = NULL; binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we @@ -5437,6 +5814,9 @@ static void binder_deferred_release(struct binder_proc *proc) proc->tmp_ref++; proc->is_dead = true; + proc->is_frozen = false; + proc->sync_recv = false; + proc->async_recv = false; threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { @@ -5787,7 +6167,9 @@ static const char * const binder_return_strings[] = { "BR_FINISHED", "BR_DEAD_BINDER", "BR_CLEAR_DEATH_NOTIFICATION_DONE", - "BR_FAILED_REPLY" + "BR_FAILED_REPLY", + "BR_FROZEN_REPLY", + "BR_ONEWAY_SPAM_SUSPECT", }; static const char * const binder_command_strings[] = { @@ -5928,8 +6310,7 @@ static void print_binder_proc_stats(struct seq_file *m, print_binder_stats(m, " ", &proc->stats); } - -int binder_state_show(struct seq_file *m, void *unused) +static int state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct binder_node *node; @@ -5968,7 +6349,7 @@ int binder_state_show(struct seq_file *m, void *unused) return 0; } -int binder_stats_show(struct seq_file *m, void *unused) +static int stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -5984,7 +6365,7 @@ int binder_stats_show(struct seq_file *m, void *unused) return 0; } -int binder_transactions_show(struct seq_file *m, void *unused) +static int transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; @@ -6040,7 +6421,7 @@ static void print_binder_transaction_log_entry(struct seq_file *m, "\n" : " (incomplete)\n"); } -int binder_transaction_log_show(struct seq_file *m, void *unused) +static int transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; unsigned int log_cur = atomic_read(&log->cur); @@ -6072,6 +6453,45 @@ const struct file_operations binder_fops = { .release = binder_release, }; +DEFINE_SHOW_ATTRIBUTE(state); +DEFINE_SHOW_ATTRIBUTE(stats); +DEFINE_SHOW_ATTRIBUTE(transactions); +DEFINE_SHOW_ATTRIBUTE(transaction_log); + +const struct binder_debugfs_entry binder_debugfs_entries[] = { + { + .name = "state", + .mode = 0444, + .fops = &state_fops, + .data = NULL, + }, + { + .name = "stats", + .mode = 0444, + .fops = &stats_fops, + .data = NULL, + }, + { + .name = "transactions", + .mode = 0444, + .fops = &transactions_fops, + .data = NULL, + }, + { + .name = "transaction_log", + .mode = 0444, + .fops = &transaction_log_fops, + .data = &binder_transaction_log, + }, + { + .name = "failed_transaction_log", + .mode = 0444, + .fops = &transaction_log_fops, + .data = &binder_transaction_log_failed, + }, + {} /* terminator */ +}; + static int __init init_binder_device(const char *name) { int ret; @@ -6117,36 +6537,18 @@ static int __init binder_init(void) atomic_set(&binder_transaction_log_failed.cur, ~0U); binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); - if (binder_debugfs_dir_entry_root) + if (binder_debugfs_dir_entry_root) { + const struct binder_debugfs_entry *db_entry; + + binder_for_each_debugfs_entry(db_entry) + debugfs_create_file(db_entry->name, + db_entry->mode, + binder_debugfs_dir_entry_root, + db_entry->data, + db_entry->fops); + binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", binder_debugfs_dir_entry_root); - - if (binder_debugfs_dir_entry_root) { - debugfs_create_file("state", - 0444, - binder_debugfs_dir_entry_root, - NULL, - &binder_state_fops); - debugfs_create_file("stats", - 0444, - binder_debugfs_dir_entry_root, - NULL, - &binder_stats_fops); - debugfs_create_file("transactions", - 0444, - binder_debugfs_dir_entry_root, - NULL, - &binder_transactions_fops); - debugfs_create_file("transaction_log", - 0444, - binder_debugfs_dir_entry_root, - &binder_transaction_log, - &binder_transaction_log_fops); - debugfs_create_file("failed_transaction_log", - 0444, - binder_debugfs_dir_entry_root, - &binder_transaction_log_failed, - &binder_transaction_log_fops); } if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && |