linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Todd Kjos <tkjos@android.com>
To: gregkh@linuxfoundation.org, arve@android.com,
	devel@driverdev.osuosl.org, linux-kernel@vger.kernel.org,
	maco@google.com, tkjos@google.com
Subject: [PATCH 27/37] binder: use inner lock to sync work dq and node counts
Date: Thu, 29 Jun 2017 12:02:01 -0700	[thread overview]
Message-ID: <20170629190211.16927-28-tkjos@google.com> (raw)
In-Reply-To: <20170629190211.16927-1-tkjos@google.com>

For correct behavior we need to hold the inner lock when
dequeuing and processing node work in binder_thread_read.
We now hold the inner lock when we enter the switch statement
and release it after processing anything that might be
affected by other threads.

We also need to hold the inner lock to protect the node
weak/strong ref tracking fields as long as node->proc
is non-NULL (if it is NULL then we are guaranteed that
we don't have any node work queued).

This means that other functions that manipulate these fields
must hold the inner lock. Refactored these functions to use
the inner lock.

Signed-off-by: Todd Kjos <tkjos@google.com>
---
 drivers/android/binder.c | 249 +++++++++++++++++++++++++++++++++++++----------
 1 file changed, 198 insertions(+), 51 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 91fece5c067f..6c741416fa00 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -311,17 +311,36 @@ struct binder_error {
  * @refs:                 list of references on this node
  * @internal_strong_refs: used to take strong references when
  *                        initiating a transaction
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
  * @local_weak_refs:      weak user refs from local process
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
  * @local_strong_refs:    strong user refs from local process
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
  * @tmp_refs:             temporary kernel refs
+ *                        (protected by @proc->inner_lock while @proc
+ *                        is valid, and by binder_dead_nodes_lock
+ *                        if @proc is NULL. During inc/dec and node release
+ *                        it is also protected by @lock to provide safety
+ *                        as the node dies and @proc becomes NULL)
  * @ptr:                  userspace pointer for node
  *                        (invariant, no lock needed)
  * @cookie:               userspace cookie for node
  *                        (invariant, no lock needed)
  * @has_strong_ref:       userspace notified of strong ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
  * @pending_strong_ref:   userspace has acked notification of strong ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
  * @has_weak_ref:         userspace notified of weak ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
  * @pending_weak_ref:     userspace has acked notification of weak ref
+ *                        (protected by @proc->inner_lock if @proc
+ *                        and by @lock)
  * @has_async_transaction: async transaction to node in progress
  * @accept_fds:           file descriptor operations supported for node
  *                        (invariant after initialized)
@@ -347,13 +366,24 @@ struct binder_node {
 	int tmp_refs;
 	binder_uintptr_t ptr;
 	binder_uintptr_t cookie;
-	unsigned has_strong_ref:1;
-	unsigned pending_strong_ref:1;
-	unsigned has_weak_ref:1;
-	unsigned pending_weak_ref:1;
-	unsigned has_async_transaction:1;
-	unsigned accept_fds:1;
-	unsigned min_priority:8;
+	struct {
+		/*
+		 * bitfield elements protected by
+		 * proc inner_lock
+		 */
+		u8 has_strong_ref:1;
+		u8 pending_strong_ref:1;
+		u8 has_weak_ref:1;
+		u8 pending_weak_ref:1;
+	};
+	struct {
+		/*
+		 * invariant after initialization
+		 */
+		u8 accept_fds:1;
+		u8 min_priority;
+	};
+	bool has_async_transaction;
 	struct list_head async_todo;
 };
 
@@ -813,9 +843,18 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
 	return node;
 }
 
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
-			   struct list_head *target_list)
+static void binder_free_node(struct binder_node *node)
 {
+	kfree(node);
+	binder_stats_deleted(BINDER_STAT_NODE);
+}
+
+static int binder_inc_node_ilocked(struct binder_node *node, int strong,
+				   int internal,
+				   struct list_head *target_list)
+{
+	if (node->proc)
+		BUG_ON(!spin_is_locked(&node->proc->inner_lock));
 	if (strong) {
 		if (internal) {
 			if (target_list == NULL &&
@@ -849,23 +888,43 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
 	return 0;
 }
 
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+			   struct list_head *target_list)
+{
+	int ret;
+
+	if (node->proc)
+		binder_inner_proc_lock(node->proc);
+	ret = binder_inc_node_ilocked(node, strong, internal, target_list);
+	if (node->proc)
+		binder_inner_proc_unlock(node->proc);
+
+	return ret;
+}
+
+static bool binder_dec_node_ilocked(struct binder_node *node,
+				    int strong, int internal)
 {
+	struct binder_proc *proc = node->proc;
+
+	if (proc)
+		BUG_ON(!spin_is_locked(&proc->inner_lock));
 	if (strong) {
 		if (internal)
 			node->internal_strong_refs--;
 		else
 			node->local_strong_refs--;
 		if (node->local_strong_refs || node->internal_strong_refs)
-			return 0;
+			return false;
 	} else {
 		if (!internal)
 			node->local_weak_refs--;
 		if (node->local_weak_refs || node->tmp_refs ||
 				!hlist_empty(&node->refs))
-			return 0;
+			return false;
 	}
-	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+
+	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
 		if (list_empty(&node->work.entry)) {
 			list_add_tail(&node->work.entry, &node->proc->todo);
 			wake_up_interruptible(&node->proc->wait);
@@ -874,25 +933,55 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
 		    !node->local_weak_refs && !node->tmp_refs) {
 			list_del_init(&node->work.entry);
-			if (node->proc) {
+			if (proc) {
 				rb_erase(&node->rb_node, &node->proc->nodes);
 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 					     "refless node %d deleted\n",
 					     node->debug_id);
 			} else {
 				spin_lock(&binder_dead_nodes_lock);
+				/*
+				 * tmp_refs could have changed so
+				 * check it again
+				 */
+				if (node->tmp_refs) {
+					spin_unlock(&binder_dead_nodes_lock);
+					return false;
+				}
 				hlist_del(&node->dead_node);
 				spin_unlock(&binder_dead_nodes_lock);
 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 					     "dead node %d deleted\n",
 					     node->debug_id);
 			}
-			kfree(node);
-			binder_stats_deleted(BINDER_STAT_NODE);
+			return true;
 		}
 	}
+	return false;
+}
 
-	return 0;
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+	bool free_node;
+
+	if (node->proc)
+		binder_inner_proc_lock(node->proc);
+	free_node = binder_dec_node_ilocked(node, strong, internal);
+	if (node->proc)
+		binder_inner_proc_unlock(node->proc);
+
+	if (free_node)
+		binder_free_node(node);
+}
+
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
+{
+	/*
+	 * No call to binder_inc_node() is needed since we
+	 * don't need to inform userspace of any changes to
+	 * tmp_refs
+	 */
+	node->tmp_refs++;
 }
 
 /**
@@ -900,16 +989,25 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
  * @node:	node to reference
  *
  * Take reference on node to prevent the node from being freed
- * while referenced only by a local variable
+ * while referenced only by a local variable. The inner lock is
+ * needed to serialize with the node work on the queue (which
+ * isn't needed after the node is dead). If the node is dead
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
+ * node->tmp_refs against dead-node-only cases where the node
+ * lock cannot be acquired (eg traversing the dead node list to
+ * print nodes)
  */
 static void binder_inc_node_tmpref(struct binder_node *node)
 {
-	/*
-	 * No call to binder_inc_node() is needed since we
-	 * don't need to inform userspace of any changes to
-	 * tmp_refs
-	 */
-	node->tmp_refs++;
+	if (node->proc)
+		binder_inner_proc_lock(node->proc);
+	else
+		spin_lock(&binder_dead_nodes_lock);
+	binder_inc_node_tmpref_ilocked(node);
+	if (node->proc)
+		binder_inner_proc_unlock(node->proc);
+	else
+		spin_unlock(&binder_dead_nodes_lock);
 }
 
 /**
@@ -920,15 +1018,27 @@ static void binder_inc_node_tmpref(struct binder_node *node)
  */
 static void binder_dec_node_tmpref(struct binder_node *node)
 {
+	bool free_node;
+
+	if (node->proc)
+		binder_inner_proc_lock(node->proc);
+	else
+		spin_lock(&binder_dead_nodes_lock);
 	node->tmp_refs--;
 	BUG_ON(node->tmp_refs < 0);
+	if (!node->proc)
+		spin_unlock(&binder_dead_nodes_lock);
 	/*
 	 * Call binder_dec_node() to check if all refcounts are 0
 	 * and cleanup is needed. Calling with strong=0 and internal=1
 	 * causes no actual reference to be released in binder_dec_node().
 	 * If that changes, a change is needed here too.
 	 */
-	binder_dec_node(node, 0, 1);
+	free_node = binder_dec_node_ilocked(node, 0, 1);
+	if (node->proc)
+		binder_inner_proc_unlock(node->proc);
+	if (free_node)
+		binder_free_node(node);
 }
 
 static void binder_put_node(struct binder_node *node)
@@ -1041,6 +1151,9 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
 
 static void binder_cleanup_ref(struct binder_ref *ref)
 {
+	bool delete_node = false;
+	struct binder_proc *node_proc = ref->node->proc;
+
 	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 		     "%d delete ref %d desc %d for node %d\n",
 		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
@@ -1049,11 +1162,26 @@ static void binder_cleanup_ref(struct binder_ref *ref)
 	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
 	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
 
+	if (node_proc)
+		binder_inner_proc_lock(node_proc);
 	if (ref->data.strong)
-		binder_dec_node(ref->node, 1, 1);
+		binder_dec_node_ilocked(ref->node, 1, 1);
 
 	hlist_del(&ref->node_entry);
-	binder_dec_node(ref->node, 0, 1);
+	delete_node = binder_dec_node_ilocked(ref->node, 0, 1);
+	if (node_proc)
+		binder_inner_proc_unlock(node_proc);
+	/*
+	 * Clear ref->node unless we want the caller to free the node
+	 */
+	if (!delete_node) {
+		/*
+		 * The caller uses ref->node to determine
+		 * whether the node needs to be freed. Clear
+		 * it since the node is still alive.
+		 */
+		ref->node = NULL;
+	}
 
 	if (ref->death) {
 		binder_debug(BINDER_DEBUG_DEAD_BINDER,
@@ -1122,13 +1250,8 @@ static bool binder_dec_ref(struct binder_ref *ref, int strong)
 			return false;
 		}
 		ref->data.strong--;
-		if (ref->data.strong == 0) {
-			int ret;
-
-			ret = binder_dec_node(ref->node, strong, 1);
-			if (ret)
-				return false;
-		}
+		if (ref->data.strong == 0)
+			binder_dec_node(ref->node, strong, 1);
 	} else {
 		if (ref->data.weak == 0) {
 			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
@@ -1193,10 +1316,13 @@ static struct binder_node *binder_get_node_from_ref(
  * binder_free_ref() - free the binder_ref
  * @ref:	ref to free
  *
- * Free the binder_ref and the binder_ref_death indicated by ref->death.
+ * Free the binder_ref. Free the binder_node indicated by ref->node
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
  */
 static void binder_free_ref(struct binder_ref *ref)
 {
+	if (ref->node)
+		binder_free_node(ref->node);
 	kfree(ref->death);
 	kfree(ref);
 }
@@ -2687,11 +2813,13 @@ static int binder_thread_write(struct binder_proc *proc,
 				binder_put_node(node);
 				break;
 			}
+			binder_inner_proc_lock(proc);
 			if (cmd == BC_ACQUIRE_DONE) {
 				if (node->pending_strong_ref == 0) {
 					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
 						proc->pid, thread->pid,
 						node->debug_id);
+					binder_inner_proc_unlock(proc);
 					binder_put_node(node);
 					break;
 				}
@@ -2701,11 +2829,13 @@ static int binder_thread_write(struct binder_proc *proc,
 					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
 						proc->pid, thread->pid,
 						node->debug_id);
+					binder_inner_proc_unlock(proc);
 					binder_put_node(node);
 					break;
 				}
 				node->pending_weak_ref = 0;
 			}
+			binder_inner_proc_unlock(proc);
 			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
 			binder_debug(BINDER_DEBUG_USER_REFS,
 				     "%d:%d %s node %d ls %d lw %d tr %d\n",
@@ -3091,6 +3221,7 @@ static int binder_thread_read(struct binder_proc *proc,
 		struct binder_transaction *t = NULL;
 		struct binder_thread *t_from;
 
+		binder_inner_proc_lock(proc);
 		if (!list_empty(&thread->todo)) {
 			w = list_first_entry(&thread->todo, struct binder_work,
 					     entry);
@@ -3104,11 +3235,15 @@ static int binder_thread_read(struct binder_proc *proc,
 			break;
 		}
 
-		if (end - ptr < sizeof(tr) + 4)
+		if (end - ptr < sizeof(tr) + 4) {
+			binder_inner_proc_unlock(proc);
 			break;
+		}
+		list_del_init(&w->entry);
 
 		switch (w->type) {
 		case BINDER_WORK_TRANSACTION: {
+			binder_inner_proc_unlock(proc);
 			t = container_of(w, struct binder_transaction, work);
 		} break;
 		case BINDER_WORK_RETURN_ERROR: {
@@ -3116,15 +3251,16 @@ static int binder_thread_read(struct binder_proc *proc,
 					w, struct binder_error, work);
 
 			WARN_ON(e->cmd == BR_OK);
+			binder_inner_proc_unlock(proc);
 			if (put_user(e->cmd, (uint32_t __user *)ptr))
 				return -EFAULT;
 			e->cmd = BR_OK;
 			ptr += sizeof(uint32_t);
 
 			binder_stat_br(proc, thread, cmd);
-			list_del(&w->entry);
 		} break;
 		case BINDER_WORK_TRANSACTION_COMPLETE: {
+			binder_inner_proc_unlock(proc);
 			cmd = BR_TRANSACTION_COMPLETE;
 			if (put_user(cmd, (uint32_t __user *)ptr))
 				return -EFAULT;
@@ -3134,8 +3270,6 @@ static int binder_thread_read(struct binder_proc *proc,
 			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
 				     "%d:%d BR_TRANSACTION_COMPLETE\n",
 				     proc->pid, thread->pid);
-
-			list_del(&w->entry);
 			kfree(w);
 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 		} break;
@@ -3172,8 +3306,6 @@ static int binder_thread_read(struct binder_proc *proc,
 				node->has_strong_ref = 0;
 			if (!weak && has_weak_ref)
 				node->has_weak_ref = 0;
-			list_del(&w->entry);
-
 			if (!weak && !strong) {
 				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 					     "%d:%d node %d u%016llx c%016llx deleted\n",
@@ -3182,9 +3314,11 @@ static int binder_thread_read(struct binder_proc *proc,
 					     (u64)node_ptr,
 					     (u64)node_cookie);
 				rb_erase(&node->rb_node, &proc->nodes);
-				kfree(node);
-				binder_stats_deleted(BINDER_STAT_NODE);
-			}
+				binder_inner_proc_unlock(proc);
+				binder_free_node(node);
+			} else
+				binder_inner_proc_unlock(proc);
+
 			if (weak && !has_weak_ref)
 				ret = binder_put_node_cmd(
 						proc, thread, &ptr, node_ptr,
@@ -3226,6 +3360,13 @@ static int binder_thread_read(struct binder_proc *proc,
 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
 			else
 				cmd = BR_DEAD_BINDER;
+			/*
+			 * TODO: there is a race condition between
+			 * death notification requests and delivery
+			 * of the notifications. This will be handled
+			 * in a later patch.
+			 */
+			binder_inner_proc_unlock(proc);
 			if (put_user(cmd, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
@@ -3243,11 +3384,14 @@ static int binder_thread_read(struct binder_proc *proc,
 				      (u64)death->cookie);
 
 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
-				list_del(&w->entry);
 				kfree(death);
 				binder_stats_deleted(BINDER_STAT_DEATH);
-			} else
-				list_move(&w->entry, &proc->delivered_death);
+			} else {
+				binder_inner_proc_lock(proc);
+				list_add_tail(&w->entry,
+					      &proc->delivered_death);
+				binder_inner_proc_unlock(proc);
+			}
 			if (cmd == BR_DEAD_BINDER)
 				goto done; /* DEAD_BINDER notifications can cause transactions */
 		} break;
@@ -3325,7 +3469,6 @@ static int binder_thread_read(struct binder_proc *proc,
 
 		if (t_from)
 			binder_thread_dec_tmpref(t_from);
-		list_del(&t->work.entry);
 		t->buffer->allow_user_free = 1;
 		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
 			t->to_parent = thread->transaction_stack;
@@ -3924,16 +4067,19 @@ static int binder_node_release(struct binder_node *node, int refs)
 {
 	struct binder_ref *ref;
 	int death = 0;
+	struct binder_proc *proc = node->proc;
 
-	list_del_init(&node->work.entry);
 	binder_release_work(&node->async_todo);
+
+	binder_inner_proc_lock(proc);
+	list_del_init(&node->work.entry);
 	/*
 	 * The caller must have taken a temporary ref on the node,
 	 */
 	BUG_ON(!node->tmp_refs);
 	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
-		kfree(node);
-		binder_stats_deleted(BINDER_STAT_NODE);
+		binder_inner_proc_unlock(proc);
+		binder_free_node(node);
 
 		return refs;
 	}
@@ -3941,6 +4087,7 @@ static int binder_node_release(struct binder_node *node, int refs)
 	node->proc = NULL;
 	node->local_strong_refs = 0;
 	node->local_weak_refs = 0;
+	binder_inner_proc_unlock(proc);
 
 	spin_lock(&binder_dead_nodes_lock);
 	hlist_add_head(&node->dead_node, &binder_dead_nodes);
-- 
2.13.2.725.g09c95d1e9-goog

  parent reply	other threads:[~2017-06-29 19:08 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-29 19:01 [PATCH 00/37] fine-grained locking in binder driver Todd Kjos
2017-06-29 19:01 ` [PATCH 01/37] Revert "android: binder: Sanity check at binder ioctl" Todd Kjos
2017-07-03  9:17   ` Greg KH
     [not found]     ` <CAHRSSExh9JX5xiSRig55DSei31C_BPSasOKB+BTC6jjjuZ+ZpA@mail.gmail.com>
2017-07-05 18:47       ` Greg KH
2017-06-29 19:01 ` [PATCH 02/37] binder: use group leader instead of open thread Todd Kjos
2017-07-03  9:17   ` Greg KH
     [not found]     ` <CAHRSSEyH3t0igLJqcC4e-HR68RH0bg4T310jnRHZzrMChoOeOg@mail.gmail.com>
2017-07-05 18:47       ` Greg KH
2017-07-07 18:23         ` Todd Kjos
2017-07-07 18:29           ` Greg KH
2017-07-24 21:00   ` John Stultz
2017-07-24 21:07     ` John Stultz
2017-07-25  9:13       ` Martijn Coenen
2017-07-27  9:08         ` Amit Pundir
2017-07-27 13:23           ` Greg Kroah-Hartman
2017-07-27 13:40             ` Martijn Coenen
2017-07-27 13:42             ` Amit Pundir
2017-07-28 11:58               ` Martijn Coenen
2017-07-24 21:23     ` Greg Kroah-Hartman
2017-07-24 21:53       ` John Stultz
2017-06-29 19:01 ` [PATCH 03/37] binder: Use wake up hint for synchronous transactions Todd Kjos
2017-07-03  9:18   ` Greg KH
2017-06-29 19:01 ` [PATCH 04/37] binder: separate binder allocator structure from binder proc Todd Kjos
2017-06-29 19:01 ` [PATCH 05/37] binder: remove unneeded cleanup code Todd Kjos
2017-06-29 19:01 ` [PATCH 06/37] binder: separate out binder_alloc functions Todd Kjos
2017-06-29 19:01 ` [PATCH 07/37] binder: move binder_alloc to separate file Todd Kjos
2017-06-29 19:01 ` [PATCH 08/37] binder: remove binder_debug_no_lock mechanism Todd Kjos
2017-06-29 19:01 ` [PATCH 09/37] binder: add protection for non-perf cases Todd Kjos
2017-06-29 19:01 ` [PATCH 10/37] binder: change binder_stats to atomics Todd Kjos
2017-06-29 19:01 ` [PATCH 11/37] binder: make binder_last_id an atomic Todd Kjos
2017-06-29 19:01 ` [PATCH 12/37] binder: add log information for binder transaction failures Todd Kjos
2017-06-29 19:01 ` [PATCH 13/37] binder: refactor queue management in binder_thread_read Todd Kjos
2017-06-29 19:01 ` [PATCH 14/37] binder: avoid race conditions when enqueuing txn Todd Kjos
2017-06-29 19:01 ` [PATCH 15/37] binder: don't modify thread->looper from other threads Todd Kjos
2017-06-29 19:01 ` [PATCH 16/37] binder: remove dead code in binder_get_ref_for_node Todd Kjos
2017-06-29 19:01 ` [PATCH 17/37] binder: protect against two threads freeing buffer Todd Kjos
2017-06-29 19:01 ` [PATCH 18/37] binder: add more debug info when allocation fails Todd Kjos
2017-06-29 19:01 ` [PATCH 19/37] binder: use atomic for transaction_log index Todd Kjos
2017-06-29 19:01 ` [PATCH 20/37] binder: refactor binder_pop_transaction Todd Kjos
2017-06-29 19:01 ` [PATCH 21/37] binder: guarantee txn complete / errors delivered in-order Todd Kjos
2017-06-29 19:01 ` [PATCH 22/37] binder: make sure target_node has strong ref Todd Kjos
2017-06-29 19:01 ` [PATCH 23/37] binder: make sure accesses to proc/thread are safe Todd Kjos
2017-06-29 19:01 ` [PATCH 24/37] binder: refactor binder ref inc/dec for thread safety Todd Kjos
2017-06-29 19:01 ` [PATCH 25/37] binder: use node->tmp_refs to ensure node safety Todd Kjos
2017-06-29 19:02 ` [PATCH 26/37] binder: introduce locking helper functions Todd Kjos
2017-06-29 19:02 ` Todd Kjos [this message]
2017-06-29 19:02 ` [PATCH 28/37] binder: add spinlocks to protect todo lists Todd Kjos
2017-06-29 19:02 ` [PATCH 29/37] binder: add spinlock to protect binder_node Todd Kjos
2017-06-29 19:02 ` [PATCH 30/37] binder: protect proc->nodes with inner lock Todd Kjos
2017-06-29 19:02 ` [PATCH 31/37] binder: protect proc->threads with inner_lock Todd Kjos
2017-06-29 19:02 ` [PATCH 32/37] binder: protect transaction_stack with inner lock Todd Kjos
2017-06-29 19:02 ` [PATCH 33/37] binder: use inner lock to protect thread accounting Todd Kjos
2017-06-29 19:02 ` [PATCH 34/37] binder: protect binder_ref with outer lock Todd Kjos
2017-06-29 19:02 ` [PATCH 35/37] binder: protect against stale pointers in print_binder_transaction Todd Kjos
2017-06-29 19:02 ` [PATCH 36/37] binder: fix death race conditions Todd Kjos
2017-06-30  6:05   ` Greg KH
2017-06-29 19:02 ` [PATCH 37/37] binder: remove global binder lock Todd Kjos
2017-06-30  6:04 ` [PATCH 00/37] fine-grained locking in binder driver Greg KH
2017-07-17 12:49   ` Greg KH

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170629190211.16927-28-tkjos@google.com \
    --to=tkjos@android.com \
    --cc=arve@android.com \
    --cc=devel@driverdev.osuosl.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maco@google.com \
    --cc=tkjos@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).