diff options
Diffstat (limited to 'drivers/android')
-rw-r--r-- | drivers/android/binder.c | 239 | ||||
-rw-r--r-- | drivers/android/binder_alloc.c | 15 | ||||
-rw-r--r-- | drivers/android/binder_alloc.h | 8 | ||||
-rw-r--r-- | drivers/android/binder_internal.h | 24 |
4 files changed, 264 insertions, 22 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index c119736ca56a..61d34e1dc59c 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1506,6 +1506,12 @@ static void binder_free_transaction(struct binder_transaction *t) if (target_proc) { binder_inner_proc_lock(target_proc); + target_proc->outstanding_txns--; + if (target_proc->outstanding_txns < 0) + pr_warn("%s: Unexpected outstanding_txns %d\n", + __func__, target_proc->outstanding_txns); + if (!target_proc->outstanding_txns && target_proc->is_frozen) + wake_up_interruptible_all(&target_proc->freeze_wait); if (t->buffer) t->buffer->transaction = NULL; binder_inner_proc_unlock(target_proc); @@ -2331,10 +2337,11 @@ static int binder_fixup_parent(struct binder_transaction *t, * If the @thread parameter is not NULL, the transaction is always queued * to the waitlist of that specific thread. * - * Return: true if the transactions was successfully queued - * false if the target process or thread is dead + * Return: 0 if the transaction was successfully queued + * BR_DEAD_REPLY if the target process or thread is dead + * BR_FROZEN_REPLY if the target process or thread is frozen */ -static bool binder_proc_transaction(struct binder_transaction *t, +static int binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { @@ -2353,11 +2360,16 @@ static bool binder_proc_transaction(struct binder_transaction *t, } binder_inner_proc_lock(proc); + if (proc->is_frozen) { + proc->sync_recv |= !oneway; + proc->async_recv |= oneway; + } - if (proc->is_dead || (thread && thread->is_dead)) { + if ((proc->is_frozen && !oneway) || proc->is_dead || + (thread && thread->is_dead)) { binder_inner_proc_unlock(proc); binder_node_unlock(node); - return false; + return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; } if (!thread && !pending_async) @@ -2373,10 +2385,11 @@ static bool binder_proc_transaction(struct binder_transaction *t, if (!pending_async) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); + proc->outstanding_txns++; binder_inner_proc_unlock(proc); binder_node_unlock(node); - return true; + return 0; } /** @@ -2700,7 +2713,16 @@ static void binder_transaction(struct binder_proc *proc, u32 secid; size_t added_size; - security_task_getsecid(proc->tsk, &secid); + /* + * Arguably this should be the task's subjective LSM secid but + * we can't reliably access the subjective creds of a task + * other than our own so we must use the objective creds, which + * are safe to access. The downside is that if a task is + * temporarily overriding it's creds it will not be reflected + * here; however, it isn't clear that binder would handle that + * case well anyway. + */ + security_task_getsecid_obj(proc->tsk, &secid); ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); if (ret) { return_error = BR_FAILED_REPLY; @@ -3007,19 +3029,25 @@ static void binder_transaction(struct binder_proc *proc, goto err_bad_object_type; } } - tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + if (t->buffer->oneway_spam_suspect) + tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; + else + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; t->work.type = BINDER_WORK_TRANSACTION; if (reply) { binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); - if (target_thread->is_dead) { + if (target_thread->is_dead || target_proc->is_frozen) { + return_error = target_thread->is_dead ? + BR_DEAD_REPLY : BR_FROZEN_REPLY; binder_inner_proc_unlock(target_proc); goto err_dead_proc_or_thread; } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); binder_enqueue_thread_work_ilocked(target_thread, &t->work); + target_proc->outstanding_txns++; binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); binder_free_transaction(in_reply_to); @@ -3038,7 +3066,9 @@ static void binder_transaction(struct binder_proc *proc, t->from_parent = thread->transaction_stack; thread->transaction_stack = t; binder_inner_proc_unlock(proc); - if (!binder_proc_transaction(t, target_proc, target_thread)) { + return_error = binder_proc_transaction(t, + target_proc, target_thread); + if (return_error) { binder_inner_proc_lock(proc); binder_pop_transaction_ilocked(thread, t); binder_inner_proc_unlock(proc); @@ -3048,7 +3078,8 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); binder_enqueue_thread_work(thread, tcomplete); - if (!binder_proc_transaction(t, target_proc, NULL)) + return_error = binder_proc_transaction(t, target_proc, NULL); + if (return_error) goto err_dead_proc_or_thread; } if (target_thread) @@ -3065,7 +3096,6 @@ static void binder_transaction(struct binder_proc *proc, return; err_dead_proc_or_thread: - return_error = BR_DEAD_REPLY; return_error_line = __LINE__; binder_dequeue_work(proc, tcomplete); err_translate_failed: @@ -3696,7 +3726,7 @@ static int binder_wait_for_work(struct binder_thread *thread, binder_inner_proc_lock(proc); list_del_init(&thread->waiting_thread_node); if (signal_pending(current)) { - ret = -ERESTARTSYS; + ret = -EINTR; break; } } @@ -3875,9 +3905,14 @@ retry: binder_stat_br(proc, thread, cmd); } break; - case BINDER_WORK_TRANSACTION_COMPLETE: { + case BINDER_WORK_TRANSACTION_COMPLETE: + case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { + if (proc->oneway_spam_detection_enabled && + w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) + cmd = BR_ONEWAY_SPAM_SUSPECT; + else + cmd = BR_TRANSACTION_COMPLETE; binder_inner_proc_unlock(proc); - cmd = BR_TRANSACTION_COMPLETE; kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); if (put_user(cmd, (uint32_t __user *)ptr)) @@ -4298,6 +4333,9 @@ static void binder_free_proc(struct binder_proc *proc) BUG_ON(!list_empty(&proc->todo)); BUG_ON(!list_empty(&proc->delivered_death)); + if (proc->outstanding_txns) + pr_warn("%s: Unexpected outstanding_txns %d\n", + __func__, proc->outstanding_txns); device = container_of(proc->context, struct binder_device, context); if (refcount_dec_and_test(&device->ref)) { kfree(proc->context->name); @@ -4359,6 +4397,7 @@ static int binder_thread_release(struct binder_proc *proc, (t->to_thread == thread) ? "in" : "out"); if (t->to_thread == thread) { + thread->proc->outstanding_txns--; t->to_proc = NULL; t->to_thread = NULL; if (t->buffer) { @@ -4609,6 +4648,76 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, return 0; } +static int binder_ioctl_freeze(struct binder_freeze_info *info, + struct binder_proc *target_proc) +{ + int ret = 0; + + if (!info->enable) { + binder_inner_proc_lock(target_proc); + target_proc->sync_recv = false; + target_proc->async_recv = false; + target_proc->is_frozen = false; + binder_inner_proc_unlock(target_proc); + return 0; + } + + /* + * Freezing the target. Prevent new transactions by + * setting frozen state. If timeout specified, wait + * for transactions to drain. + */ + binder_inner_proc_lock(target_proc); + target_proc->sync_recv = false; + target_proc->async_recv = false; + target_proc->is_frozen = true; + binder_inner_proc_unlock(target_proc); + + if (info->timeout_ms > 0) + ret = wait_event_interruptible_timeout( + target_proc->freeze_wait, + (!target_proc->outstanding_txns), + msecs_to_jiffies(info->timeout_ms)); + + if (!ret && target_proc->outstanding_txns) + ret = -EAGAIN; + + if (ret < 0) { + binder_inner_proc_lock(target_proc); + target_proc->is_frozen = false; + binder_inner_proc_unlock(target_proc); + } + + return ret; +} + +static int binder_ioctl_get_freezer_info( + struct binder_frozen_status_info *info) +{ + struct binder_proc *target_proc; + bool found = false; + + info->sync_recv = 0; + info->async_recv = 0; + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { + if (target_proc->pid == info->pid) { + found = true; + binder_inner_proc_lock(target_proc); + info->sync_recv |= target_proc->sync_recv; + info->async_recv |= target_proc->async_recv; + binder_inner_proc_unlock(target_proc); + } + } + mutex_unlock(&binder_procs_lock); + + if (!found) + return -EINVAL; + + return 0; +} + static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; @@ -4727,6 +4836,96 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; } + case BINDER_FREEZE: { + struct binder_freeze_info info; + struct binder_proc **target_procs = NULL, *target_proc; + int target_procs_count = 0, i = 0; + + ret = 0; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { + if (target_proc->pid == info.pid) + target_procs_count++; + } + + if (target_procs_count == 0) { + mutex_unlock(&binder_procs_lock); + ret = -EINVAL; + goto err; + } + + target_procs = kcalloc(target_procs_count, + sizeof(struct binder_proc *), + GFP_KERNEL); + + if (!target_procs) { + mutex_unlock(&binder_procs_lock); + ret = -ENOMEM; + goto err; + } + + hlist_for_each_entry(target_proc, &binder_procs, proc_node) { + if (target_proc->pid != info.pid) + continue; + + binder_inner_proc_lock(target_proc); + target_proc->tmp_ref++; + binder_inner_proc_unlock(target_proc); + + target_procs[i++] = target_proc; + } + mutex_unlock(&binder_procs_lock); + + for (i = 0; i < target_procs_count; i++) { + if (ret >= 0) + ret = binder_ioctl_freeze(&info, + target_procs[i]); + + binder_proc_dec_tmpref(target_procs[i]); + } + + kfree(target_procs); + + if (ret < 0) + goto err; + break; + } + case BINDER_GET_FROZEN_INFO: { + struct binder_frozen_status_info info; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + ret = binder_ioctl_get_freezer_info(&info); + if (ret < 0) + goto err; + + if (copy_to_user(ubuf, &info, sizeof(info))) { + ret = -EFAULT; + goto err; + } + break; + } + case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { + uint32_t enable; + + if (copy_from_user(&enable, ubuf, sizeof(enable))) { + ret = -EINVAL; + goto err; + } + binder_inner_proc_lock(proc); + proc->oneway_spam_detection_enabled = (bool)enable; + binder_inner_proc_unlock(proc); + break; + } default: ret = -EINVAL; goto err; @@ -4736,7 +4935,7 @@ err: if (thread) thread->looper_need_return = false; wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); - if (ret && ret != -ERESTARTSYS) + if (ret && ret != -EINTR) pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); err_unlocked: trace_binder_ioctl_done(ret); @@ -4823,6 +5022,7 @@ static int binder_open(struct inode *nodp, struct file *filp) get_task_struct(current->group_leader); proc->tsk = current->group_leader; INIT_LIST_HEAD(&proc->todo); + init_waitqueue_head(&proc->freeze_wait); proc->default_priority = task_nice(current); /* binderfs stashes devices in i_private */ if (is_binderfs_device(nodp)) { @@ -5035,6 +5235,9 @@ static void binder_deferred_release(struct binder_proc *proc) proc->tmp_ref++; proc->is_dead = true; + proc->is_frozen = false; + proc->sync_recv = false; + proc->async_recv = false; threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { @@ -5385,7 +5588,9 @@ static const char * const binder_return_strings[] = { "BR_FINISHED", "BR_DEAD_BINDER", "BR_CLEAR_DEATH_NOTIFICATION_DONE", - "BR_FAILED_REPLY" + "BR_FAILED_REPLY", + "BR_FROZEN_REPLY", + "BR_ONEWAY_SPAM_SUSPECT", }; static const char * const binder_command_strings[] = { diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 7caf74ad2405..340515f54498 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -338,7 +338,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma( return vma; } -static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid) +static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) { /* * Find the amount and size of buffers allocated by the current caller; @@ -366,13 +366,19 @@ static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid) /* * Warn if this pid has more than 50 transactions, or more than 50% of - * async space (which is 25% of total buffer size). + * async space (which is 25% of total buffer size). Oneway spam is only + * detected when the threshold is exceeded. */ if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", alloc->pid, pid, num_buffers, total_alloc_size); + if (!alloc->oneway_spam_detected) { + alloc->oneway_spam_detected = true; + return true; + } } + return false; } static struct binder_buffer *binder_alloc_new_buf_locked( @@ -525,6 +531,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( buffer->async_transaction = is_async; buffer->extra_buffers_size = extra_buffers_size; buffer->pid = pid; + buffer->oneway_spam_suspect = false; if (is_async) { alloc->free_async_space -= size + sizeof(struct binder_buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, @@ -536,7 +543,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked( * of async space left (which is less than 10% of total * buffer size). */ - debug_low_async_space_locked(alloc, pid); + buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); + } else { + alloc->oneway_spam_detected = false; } } return buffer; diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 6e8e001381af..7dea57a84c79 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -26,6 +26,8 @@ struct binder_transaction; * @clear_on_free: %true if buffer must be zeroed after use * @allow_user_free: %true if user is allowed to free buffer * @async_transaction: %true if buffer is in use for an async txn + * @oneway_spam_suspect: %true if total async allocate size just exceed + * spamming detect threshold * @debug_id: unique ID for debugging * @transaction: pointer to associated struct binder_transaction * @target_node: struct binder_node associated with this buffer @@ -45,7 +47,8 @@ struct binder_buffer { unsigned clear_on_free:1; unsigned allow_user_free:1; unsigned async_transaction:1; - unsigned debug_id:28; + unsigned oneway_spam_suspect:1; + unsigned debug_id:27; struct binder_transaction *transaction; @@ -87,6 +90,8 @@ struct binder_lru_page { * @buffer_size: size of address space specified via mmap * @pid: pid for associated binder_proc (invariant after init) * @pages_high: high watermark of offset in @pages + * @oneway_spam_detected: %true if oneway spam detection fired, clear that + * flag once the async buffer has returned to a healthy state * * Bookkeeping structure for per-proc address space management for binder * buffers. It is normally initialized during binder_init() and binder_mmap() @@ -107,6 +112,7 @@ struct binder_alloc { uint32_t buffer_free; int pid; size_t pages_high; + bool oneway_spam_detected; }; #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 6cd79011e35d..810c0b84d3f8 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -155,7 +155,7 @@ enum binder_stat_types { }; struct binder_stats { - atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; + atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1]; atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; atomic_t obj_created[BINDER_STAT_COUNT]; atomic_t obj_deleted[BINDER_STAT_COUNT]; @@ -174,6 +174,7 @@ struct binder_work { enum binder_work_type { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, + BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT, BINDER_WORK_RETURN_ERROR, BINDER_WORK_NODE, BINDER_WORK_DEAD_BINDER, @@ -367,9 +368,22 @@ struct binder_ref { * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform * (protected by binder_deferred_lock) + * @outstanding_txns: number of transactions to be transmitted before + * processes in freeze_wait are woken up + * (protected by @inner_lock) * @is_dead: process is dead and awaiting free * when outstanding transactions are cleaned up * (protected by @inner_lock) + * @is_frozen: process is frozen and unable to service + * binder transactions + * (protected by @inner_lock) + * @sync_recv: process received sync transactions since last frozen + * (protected by @inner_lock) + * @async_recv: process received async transactions since last frozen + * (protected by @inner_lock) + * @freeze_wait: waitqueue of processes waiting for all outstanding + * transactions to be processed + * (protected by @inner_lock) * @todo: list of work for this process * (protected by @inner_lock) * @stats: per-process binder statistics @@ -396,6 +410,8 @@ struct binder_ref { * @outer_lock: no nesting under innor or node lock * Lock order: 1) outer, 2) node, 3) inner * @binderfs_entry: process-specific binderfs log file + * @oneway_spam_detection_enabled: process enabled oneway spam detection + * or not * * Bookkeeping structure for binder processes */ @@ -410,7 +426,12 @@ struct binder_proc { struct task_struct *tsk; struct hlist_node deferred_work_node; int deferred_work; + int outstanding_txns; bool is_dead; + bool is_frozen; + bool sync_recv; + bool async_recv; + wait_queue_head_t freeze_wait; struct list_head todo; struct binder_stats stats; @@ -426,6 +447,7 @@ struct binder_proc { spinlock_t inner_lock; spinlock_t outer_lock; struct dentry *binderfs_entry; + bool oneway_spam_detection_enabled; }; /** |