From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753684AbdF2TFU (ORCPT ); Thu, 29 Jun 2017 15:05:20 -0400 Received: from mail-pg0-f54.google.com ([74.125.83.54]:33818 "EHLO mail-pg0-f54.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753459AbdF2TDm (ORCPT ); Thu, 29 Jun 2017 15:03:42 -0400 From: Todd Kjos X-Google-Original-From: Todd Kjos To: gregkh@linuxfoundation.org, arve@android.com, devel@driverdev.osuosl.org, linux-kernel@vger.kernel.org, maco@google.com, tkjos@google.com Subject: [PATCH 36/37] binder: fix death race conditions Date: Thu, 29 Jun 2017 12:02:10 -0700 Message-Id: <20170629190211.16927-37-tkjos@google.com> X-Mailer: git-send-email 2.13.2.725.g09c95d1e9-goog In-Reply-To: <20170629190211.16927-1-tkjos@google.com> References: <20170629190211.16927-1-tkjos@google.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Martijn Coenen A race existed where one thread could register a death notification for a node, while another thread was cleaning up that node and sending out death notifications for its references, causing simultaneous access to ref->death because different locks were held. Signed-off-by: Martijn Coenen --- drivers/android/binder.c | 64 ++++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 36ef88d10631..1e50b034d49a 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -442,6 +442,7 @@ struct binder_ref_data { * ref for deletion in binder_cleanup_ref, a non-NULL * @node indicates the node must be freed * @death: pointer to death notification (ref_death) if requested + * (protected by @node->lock) * * Structure to track references from procA to target node (on procB). This * structure is unsafe to access without holding @proc->outer_lock. @@ -3337,10 +3338,12 @@ static int binder_thread_write(struct binder_proc *proc, ref->data.desc, ref->data.strong, ref->data.weak, ref->node->debug_id); + binder_node_lock(ref->node); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", proc->pid, thread->pid); + binder_node_unlock(ref->node); binder_proc_unlock(proc); kfree(death); break; @@ -3349,7 +3352,6 @@ static int binder_thread_write(struct binder_proc *proc, INIT_LIST_HEAD(&death->work.entry); death->cookie = cookie; ref->death = death; - binder_node_lock(ref->node); if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; if (thread->looper & @@ -3368,9 +3370,7 @@ static int binder_thread_write(struct binder_proc *proc, &proc->wait); } } - binder_node_unlock(ref->node); } else { - binder_node_lock(ref->node); if (ref->death == NULL) { binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", proc->pid, thread->pid); @@ -3410,8 +3410,8 @@ static int binder_thread_write(struct binder_proc *proc, death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; } binder_inner_proc_unlock(proc); - binder_node_unlock(ref->node); } + binder_node_unlock(ref->node); binder_proc_unlock(proc); } break; case BC_DEAD_BINDER_DONE: { @@ -3748,44 +3748,39 @@ static int binder_thread_read(struct binder_proc *proc, case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; + binder_uintptr_t cookie; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; - /* - * TODO: there is a race condition between - * death notification requests and delivery - * of the notifications. This will be handled - * in a later patch. - */ - binder_inner_proc_unlock(proc); - if (put_user(cmd, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (put_user(death->cookie, - (binder_uintptr_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(binder_uintptr_t); - binder_stat_br(proc, thread, cmd); + cookie = death->cookie; + binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %016llx\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", - (u64)death->cookie); - + (u64)cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { + binder_inner_proc_unlock(proc); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } else { - binder_inner_proc_lock(proc); binder_enqueue_work_ilocked( w, &proc->delivered_death); binder_inner_proc_unlock(proc); } + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (put_user(cookie, + (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + binder_stat_br(proc, thread, cmd); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; @@ -4535,20 +4530,25 @@ static int binder_node_release(struct binder_node *node, int refs) hlist_for_each_entry(ref, &node->refs, node_entry) { refs++; - - if (!ref->death) + /* + * Need the node lock to synchronize + * with new notification requests and the + * inner lock to synchronize with queued + * death notifications. + */ + binder_inner_proc_lock(ref->proc); + if (!ref->death) { + binder_inner_proc_unlock(ref->proc); continue; + } death++; - binder_inner_proc_lock(ref->proc); - if (list_empty(&ref->death->work.entry)) { - ref->death->work.type = BINDER_WORK_DEAD_BINDER; - binder_enqueue_work_ilocked(&ref->death->work, - &ref->proc->todo); - wake_up_interruptible(&ref->proc->wait); - } else - BUG(); + BUG_ON(!list_empty(&ref->death->work.entry)); + ref->death->work.type = BINDER_WORK_DEAD_BINDER; + binder_enqueue_work_ilocked(&ref->death->work, + &ref->proc->todo); + wake_up_interruptible(&ref->proc->wait); binder_inner_proc_unlock(ref->proc); } -- 2.13.2.725.g09c95d1e9-goog