linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Todd Kjos <tkjos@android.com>
To: gregkh@linuxfoundation.org, arve@android.com,
	devel@driverdev.osuosl.org, linux-kernel@vger.kernel.org,
	maco@google.com, tkjos@google.com
Subject: [PATCH 34/37] binder: protect binder_ref with outer lock
Date: Thu, 29 Jun 2017 12:02:08 -0700	[thread overview]
Message-ID: <20170629190211.16927-35-tkjos@google.com> (raw)
In-Reply-To: <20170629190211.16927-1-tkjos@google.com>

Use proc->outer_lock to protect the binder_ref structure.
The outer lock allows functions operating on the binder_ref
to do nested acquires of node and inner locks as necessary
to attach refs to nodes atomically.

Binder refs must never be accesssed without holding the
outer lock.

Signed-off-by: Todd Kjos <tkjos@google.com>
---
 drivers/android/binder.c | 133 +++++++++++++++++++++++++++++------------------
 1 file changed, 83 insertions(+), 50 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 704540ea3e12..f07f0d488aa4 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -475,7 +475,9 @@ enum binder_deferred_state {
  *                        this proc ordered by node->ptr
  *                        (protected by @inner_lock)
  * @refs_by_desc:         rbtree of refs ordered by ref->desc
+ *                        (protected by @outer_lock)
  * @refs_by_node:         rbtree of refs ordered by ref->node
+ *                        (protected by @outer_lock)
  * @pid                   PID of group_leader of process
  *                        (invariant after initialized)
  * @tsk                   task_struct for group_leader of process
@@ -1269,8 +1271,8 @@ static void binder_put_node(struct binder_node *node)
 	binder_dec_node_tmpref(node);
 }
 
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
-					 u32 desc, bool need_strong_ref)
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
+						 u32 desc, bool need_strong_ref)
 {
 	struct rb_node *n = proc->refs_by_desc.rb_node;
 	struct binder_ref *ref;
@@ -1293,7 +1295,7 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
 }
 
 /**
- * binder_get_ref_for_node() - get the ref associated with given node
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
  * @proc:	binder_proc that owns the ref
  * @node:	binder_node of target
  * @new_ref:	newly allocated binder_ref to be initialized or %NULL
@@ -1310,9 +1312,10 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
  *		new_ref. new_ref must be kfree'd by the caller in
  *		this case.
  */
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
-						  struct binder_node *node,
-						  struct binder_ref *new_ref)
+static struct binder_ref *binder_get_ref_for_node_olocked(
+					struct binder_proc *proc,
+					struct binder_node *node,
+					struct binder_ref *new_ref)
 {
 	struct binder_context *context = proc->context;
 	struct rb_node **p = &proc->refs_by_node.rb_node;
@@ -1375,7 +1378,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
 	return new_ref;
 }
 
-static void binder_cleanup_ref(struct binder_ref *ref)
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
 {
 	bool delete_node = false;
 
@@ -1418,17 +1421,17 @@ static void binder_cleanup_ref(struct binder_ref *ref)
 }
 
 /**
- * binder_inc_ref() - increment the ref for given handle
+ * binder_inc_ref_olocked() - increment the ref for given handle
  * @ref:         ref to be incremented
  * @strong:      if true, strong increment, else weak
  * @target_list: list to queue node work on
  *
- * Increment the ref.
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
  *
  * Return: 0, if successful, else errno
  */
-static int binder_inc_ref(struct binder_ref *ref, int strong,
-			  struct list_head *target_list)
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
+				  struct list_head *target_list)
 {
 	int ret;
 
@@ -1457,12 +1460,9 @@ static int binder_inc_ref(struct binder_ref *ref, int strong,
  *
  * Decrement the ref.
  *
- * TODO: kfree is avoided here since an upcoming patch
- * will put this under a lock.
- *
  * Return: true if ref is cleaned up and ready to be freed
  */
-static bool binder_dec_ref(struct binder_ref *ref, int strong)
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
 {
 	if (strong) {
 		if (ref->data.strong == 0) {
@@ -1486,13 +1486,7 @@ static bool binder_dec_ref(struct binder_ref *ref, int strong)
 		ref->data.weak--;
 	}
 	if (ref->data.strong == 0 && ref->data.weak == 0) {
-		binder_cleanup_ref(ref);
-		/*
-		 * TODO: we could kfree(ref) here, but an upcoming
-		 * patch will call this with a lock held, so we
-		 * return an indication that the ref should be
-		 * freed.
-		 */
+		binder_cleanup_ref_olocked(ref);
 		return true;
 	}
 	return false;
@@ -1517,7 +1511,8 @@ static struct binder_node *binder_get_node_from_ref(
 	struct binder_node *node;
 	struct binder_ref *ref;
 
-	ref = binder_get_ref(proc, desc, need_strong_ref);
+	binder_proc_lock(proc);
+	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
 	if (!ref)
 		goto err_no_ref;
 	node = ref->node;
@@ -1528,10 +1523,12 @@ static struct binder_node *binder_get_node_from_ref(
 	binder_inc_node_tmpref(node);
 	if (rdata)
 		*rdata = ref->data;
+	binder_proc_unlock(proc);
 
 	return node;
 
 err_no_ref:
+	binder_proc_unlock(proc);
 	return NULL;
 }
 
@@ -1571,24 +1568,27 @@ static int binder_update_ref_for_handle(struct binder_proc *proc,
 	struct binder_ref *ref;
 	bool delete_ref = false;
 
-	ref = binder_get_ref(proc, desc, strong);
+	binder_proc_lock(proc);
+	ref = binder_get_ref_olocked(proc, desc, strong);
 	if (!ref) {
 		ret = -EINVAL;
 		goto err_no_ref;
 	}
 	if (increment)
-		ret = binder_inc_ref(ref, strong, NULL);
+		ret = binder_inc_ref_olocked(ref, strong, NULL);
 	else
-		delete_ref = binder_dec_ref(ref, strong);
+		delete_ref = binder_dec_ref_olocked(ref, strong);
 
 	if (rdata)
 		*rdata = ref->data;
+	binder_proc_unlock(proc);
 
 	if (delete_ref)
 		binder_free_ref(ref);
 	return ret;
 
 err_no_ref:
+	binder_proc_unlock(proc);
 	return ret;
 }
 
@@ -1633,15 +1633,19 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
 	struct binder_ref *new_ref = NULL;
 	int ret = 0;
 
-	ref = binder_get_ref_for_node(proc, node, NULL);
+	binder_proc_lock(proc);
+	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
 	if (!ref) {
+		binder_proc_unlock(proc);
 		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
 		if (!new_ref)
 			return -ENOMEM;
-		ref = binder_get_ref_for_node(proc, node, new_ref);
+		binder_proc_lock(proc);
+		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
 	}
-	ret = binder_inc_ref(ref, strong, target_list);
+	ret = binder_inc_ref_olocked(ref, strong, target_list);
 	*rdata = ref->data;
+	binder_proc_unlock(proc);
 	if (new_ref && ref != new_ref)
 		/*
 		 * Another thread created the ref first so
@@ -2497,11 +2501,14 @@ static void binder_transaction(struct binder_proc *proc,
 			 * stays alive until the transaction is
 			 * done.
 			 */
-			ref = binder_get_ref(proc, tr->target.handle, true);
+			binder_proc_lock(proc);
+			ref = binder_get_ref_olocked(proc, tr->target.handle,
+						     true);
 			if (ref) {
 				binder_inc_node(ref->node, 1, 0, NULL);
 				target_node = ref->node;
 			}
+			binder_proc_unlock(proc);
 			if (target_node == NULL) {
 				binder_user_error("%d:%d got transaction to invalid handle\n",
 					proc->pid, thread->pid);
@@ -3277,7 +3284,7 @@ static int binder_thread_write(struct binder_proc *proc,
 			uint32_t target;
 			binder_uintptr_t cookie;
 			struct binder_ref *ref;
-			struct binder_ref_death *death;
+			struct binder_ref_death *death = NULL;
 
 			if (get_user(target, (uint32_t __user *)ptr))
 				return -EFAULT;
@@ -3285,7 +3292,29 @@ static int binder_thread_write(struct binder_proc *proc,
 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
-			ref = binder_get_ref(proc, target, false);
+			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+				/*
+				 * Allocate memory for death notification
+				 * before taking lock
+				 */
+				death = kzalloc(sizeof(*death), GFP_KERNEL);
+				if (death == NULL) {
+					WARN_ON(thread->return_error.cmd !=
+						BR_OK);
+					thread->return_error.cmd = BR_ERROR;
+					binder_enqueue_work(
+						thread->proc,
+						&thread->return_error.work,
+						&thread->todo);
+					binder_debug(
+						BINDER_DEBUG_FAILED_TRANSACTION,
+						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+						proc->pid, thread->pid);
+					break;
+				}
+			}
+			binder_proc_lock(proc);
+			ref = binder_get_ref_olocked(proc, target, false);
 			if (ref == NULL) {
 				binder_user_error("%d:%d %s invalid ref %d\n",
 					proc->pid, thread->pid,
@@ -3293,6 +3322,8 @@ static int binder_thread_write(struct binder_proc *proc,
 					"BC_REQUEST_DEATH_NOTIFICATION" :
 					"BC_CLEAR_DEATH_NOTIFICATION",
 					target);
+				binder_proc_unlock(proc);
+				kfree(death);
 				break;
 			}
 
@@ -3310,20 +3341,8 @@ static int binder_thread_write(struct binder_proc *proc,
 				if (ref->death) {
 					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
 						proc->pid, thread->pid);
-					break;
-				}
-				death = kzalloc(sizeof(*death), GFP_KERNEL);
-				if (death == NULL) {
-					WARN_ON(thread->return_error.cmd !=
-						BR_OK);
-					thread->return_error.cmd = BR_ERROR;
-					binder_enqueue_work(
-						thread->proc,
-						&thread->return_error.work,
-						&thread->todo);
-					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-						     "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
-						     proc->pid, thread->pid);
+					binder_proc_unlock(proc);
+					kfree(death);
 					break;
 				}
 				binder_stats_created(BINDER_STAT_DEATH);
@@ -3356,6 +3375,7 @@ static int binder_thread_write(struct binder_proc *proc,
 					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
 						proc->pid, thread->pid);
 					binder_node_unlock(ref->node);
+					binder_proc_unlock(proc);
 					break;
 				}
 				death = ref->death;
@@ -3365,6 +3385,7 @@ static int binder_thread_write(struct binder_proc *proc,
 						(u64)death->cookie,
 						(u64)cookie);
 					binder_node_unlock(ref->node);
+					binder_proc_unlock(proc);
 					break;
 				}
 				ref->death = NULL;
@@ -3391,6 +3412,7 @@ static int binder_thread_write(struct binder_proc *proc,
 				binder_inner_proc_unlock(proc);
 				binder_node_unlock(ref->node);
 			}
+			binder_proc_unlock(proc);
 		} break;
 		case BC_DEAD_BINDER_DONE: {
 			struct binder_work *w;
@@ -4601,14 +4623,18 @@ static void binder_deferred_release(struct binder_proc *proc)
 	binder_inner_proc_unlock(proc);
 
 	outgoing_refs = 0;
+	binder_proc_lock(proc);
 	while ((n = rb_first(&proc->refs_by_desc))) {
 		struct binder_ref *ref;
 
 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
 		outgoing_refs++;
-		binder_cleanup_ref(ref);
+		binder_cleanup_ref_olocked(ref);
+		binder_proc_unlock(proc);
 		binder_free_ref(ref);
+		binder_proc_lock(proc);
 	}
+	binder_proc_unlock(proc);
 
 	binder_release_work(proc, &proc->todo);
 	binder_release_work(proc, &proc->delivered_death);
@@ -4816,8 +4842,10 @@ static void print_binder_node_nilocked(struct seq_file *m,
 	}
 }
 
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+static void print_binder_ref_olocked(struct seq_file *m,
+				     struct binder_ref *ref)
 {
+	WARN_ON(!spin_is_locked(&ref->proc->outer_lock));
 	binder_node_lock(ref->node);
 	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
 		   ref->data.debug_id, ref->data.desc,
@@ -4869,11 +4897,14 @@ static void print_binder_proc(struct seq_file *m,
 		binder_put_node(last_node);
 
 	if (print_all) {
+		binder_proc_lock(proc);
 		for (n = rb_first(&proc->refs_by_desc);
 		     n != NULL;
 		     n = rb_next(n))
-			print_binder_ref(m, rb_entry(n, struct binder_ref,
-						     rb_node_desc));
+			print_binder_ref_olocked(m, rb_entry(n,
+							    struct binder_ref,
+							    rb_node_desc));
+		binder_proc_unlock(proc);
 	}
 	binder_alloc_print_allocated(m, &proc->alloc);
 	binder_inner_proc_lock(proc);
@@ -5013,6 +5044,7 @@ static void print_binder_proc_stats(struct seq_file *m,
 	count = 0;
 	strong = 0;
 	weak = 0;
+	binder_proc_lock(proc);
 	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
 		struct binder_ref *ref = rb_entry(n, struct binder_ref,
 						  rb_node_desc);
@@ -5020,6 +5052,7 @@ static void print_binder_proc_stats(struct seq_file *m,
 		strong += ref->data.strong;
 		weak += ref->data.weak;
 	}
+	binder_proc_unlock(proc);
 	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
 
 	count = binder_alloc_get_allocated_count(&proc->alloc);
-- 
2.13.2.725.g09c95d1e9-goog

  parent reply	other threads:[~2017-06-29 19:05 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-29 19:01 [PATCH 00/37] fine-grained locking in binder driver Todd Kjos
2017-06-29 19:01 ` [PATCH 01/37] Revert "android: binder: Sanity check at binder ioctl" Todd Kjos
2017-07-03  9:17   ` Greg KH
     [not found]     ` <CAHRSSExh9JX5xiSRig55DSei31C_BPSasOKB+BTC6jjjuZ+ZpA@mail.gmail.com>
2017-07-05 18:47       ` Greg KH
2017-06-29 19:01 ` [PATCH 02/37] binder: use group leader instead of open thread Todd Kjos
2017-07-03  9:17   ` Greg KH
     [not found]     ` <CAHRSSEyH3t0igLJqcC4e-HR68RH0bg4T310jnRHZzrMChoOeOg@mail.gmail.com>
2017-07-05 18:47       ` Greg KH
2017-07-07 18:23         ` Todd Kjos
2017-07-07 18:29           ` Greg KH
2017-07-24 21:00   ` John Stultz
2017-07-24 21:07     ` John Stultz
2017-07-25  9:13       ` Martijn Coenen
2017-07-27  9:08         ` Amit Pundir
2017-07-27 13:23           ` Greg Kroah-Hartman
2017-07-27 13:40             ` Martijn Coenen
2017-07-27 13:42             ` Amit Pundir
2017-07-28 11:58               ` Martijn Coenen
2017-07-24 21:23     ` Greg Kroah-Hartman
2017-07-24 21:53       ` John Stultz
2017-06-29 19:01 ` [PATCH 03/37] binder: Use wake up hint for synchronous transactions Todd Kjos
2017-07-03  9:18   ` Greg KH
2017-06-29 19:01 ` [PATCH 04/37] binder: separate binder allocator structure from binder proc Todd Kjos
2017-06-29 19:01 ` [PATCH 05/37] binder: remove unneeded cleanup code Todd Kjos
2017-06-29 19:01 ` [PATCH 06/37] binder: separate out binder_alloc functions Todd Kjos
2017-06-29 19:01 ` [PATCH 07/37] binder: move binder_alloc to separate file Todd Kjos
2017-06-29 19:01 ` [PATCH 08/37] binder: remove binder_debug_no_lock mechanism Todd Kjos
2017-06-29 19:01 ` [PATCH 09/37] binder: add protection for non-perf cases Todd Kjos
2017-06-29 19:01 ` [PATCH 10/37] binder: change binder_stats to atomics Todd Kjos
2017-06-29 19:01 ` [PATCH 11/37] binder: make binder_last_id an atomic Todd Kjos
2017-06-29 19:01 ` [PATCH 12/37] binder: add log information for binder transaction failures Todd Kjos
2017-06-29 19:01 ` [PATCH 13/37] binder: refactor queue management in binder_thread_read Todd Kjos
2017-06-29 19:01 ` [PATCH 14/37] binder: avoid race conditions when enqueuing txn Todd Kjos
2017-06-29 19:01 ` [PATCH 15/37] binder: don't modify thread->looper from other threads Todd Kjos
2017-06-29 19:01 ` [PATCH 16/37] binder: remove dead code in binder_get_ref_for_node Todd Kjos
2017-06-29 19:01 ` [PATCH 17/37] binder: protect against two threads freeing buffer Todd Kjos
2017-06-29 19:01 ` [PATCH 18/37] binder: add more debug info when allocation fails Todd Kjos
2017-06-29 19:01 ` [PATCH 19/37] binder: use atomic for transaction_log index Todd Kjos
2017-06-29 19:01 ` [PATCH 20/37] binder: refactor binder_pop_transaction Todd Kjos
2017-06-29 19:01 ` [PATCH 21/37] binder: guarantee txn complete / errors delivered in-order Todd Kjos
2017-06-29 19:01 ` [PATCH 22/37] binder: make sure target_node has strong ref Todd Kjos
2017-06-29 19:01 ` [PATCH 23/37] binder: make sure accesses to proc/thread are safe Todd Kjos
2017-06-29 19:01 ` [PATCH 24/37] binder: refactor binder ref inc/dec for thread safety Todd Kjos
2017-06-29 19:01 ` [PATCH 25/37] binder: use node->tmp_refs to ensure node safety Todd Kjos
2017-06-29 19:02 ` [PATCH 26/37] binder: introduce locking helper functions Todd Kjos
2017-06-29 19:02 ` [PATCH 27/37] binder: use inner lock to sync work dq and node counts Todd Kjos
2017-06-29 19:02 ` [PATCH 28/37] binder: add spinlocks to protect todo lists Todd Kjos
2017-06-29 19:02 ` [PATCH 29/37] binder: add spinlock to protect binder_node Todd Kjos
2017-06-29 19:02 ` [PATCH 30/37] binder: protect proc->nodes with inner lock Todd Kjos
2017-06-29 19:02 ` [PATCH 31/37] binder: protect proc->threads with inner_lock Todd Kjos
2017-06-29 19:02 ` [PATCH 32/37] binder: protect transaction_stack with inner lock Todd Kjos
2017-06-29 19:02 ` [PATCH 33/37] binder: use inner lock to protect thread accounting Todd Kjos
2017-06-29 19:02 ` Todd Kjos [this message]
2017-06-29 19:02 ` [PATCH 35/37] binder: protect against stale pointers in print_binder_transaction Todd Kjos
2017-06-29 19:02 ` [PATCH 36/37] binder: fix death race conditions Todd Kjos
2017-06-30  6:05   ` Greg KH
2017-06-29 19:02 ` [PATCH 37/37] binder: remove global binder lock Todd Kjos
2017-06-30  6:04 ` [PATCH 00/37] fine-grained locking in binder driver Greg KH
2017-07-17 12:49   ` Greg KH

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170629190211.16927-35-tkjos@google.com \
    --to=tkjos@android.com \
    --cc=arve@android.com \
    --cc=devel@driverdev.osuosl.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maco@google.com \
    --cc=tkjos@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).