All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: Byungchul Park <byungchul.park@lge.com>
Cc: mingo@kernel.org, tglx@linutronix.de, walken@google.com,
	boqun.feng@gmail.com, kirill@shutemov.name,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	akpm@linux-foundation.org, willy@infradead.org,
	npiggin@gmail.com, kernel-team@lge.com
Subject: Re: [PATCH v8 06/14] lockdep: Detect and handle hist_lock ring buffer overwrite
Date: Wed, 9 Aug 2017 16:16:05 +0200	[thread overview]
Message-ID: <20170809141605.7r3cldc4na3skcnp@hirez.programming.kicks-ass.net> (raw)
In-Reply-To: <1502089981-21272-7-git-send-email-byungchul.park@lge.com>

On Mon, Aug 07, 2017 at 04:12:53PM +0900, Byungchul Park wrote:
> @@ -4773,14 +4784,28 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
>   */
>  void crossrelease_hist_start(enum context_t c)
>  {
> -	if (current->xhlocks)
> -		current->xhlock_idx_hist[c] = current->xhlock_idx;
> +	struct task_struct *cur = current;
> +
> +	if (cur->xhlocks) {
> +		cur->xhlock_idx_hist[c] = cur->xhlock_idx;
> +		cur->hist_id_save[c] = cur->hist_id;
> +	}
>  }
>  
>  void crossrelease_hist_end(enum context_t c)
>  {
> -	if (current->xhlocks)
> -		current->xhlock_idx = current->xhlock_idx_hist[c];
> +	struct task_struct *cur = current;
> +
> +	if (cur->xhlocks) {
> +		unsigned int idx = cur->xhlock_idx_hist[c];
> +		struct hist_lock *h = &xhlock(idx);
> +
> +		cur->xhlock_idx = idx;
> +
> +		/* Check if the ring was overwritten. */
> +		if (h->hist_id != cur->hist_id_save[c])
> +			invalidate_xhlock(h);
> +	}
>  }
>  
>  static int cross_lock(struct lockdep_map *lock)
> @@ -4826,6 +4851,7 @@ static inline int depend_after(struct held_lock *hlock)
>   * Check if the xhlock is valid, which would be false if,
>   *
>   *    1. Has not used after initializaion yet.
> + *    2. Got invalidated.
>   *
>   * Remind hist_lock is implemented as a ring buffer.
>   */
> @@ -4857,6 +4883,7 @@ static void add_xhlock(struct held_lock *hlock)
>  
>  	/* Initialize hist_lock's members */
>  	xhlock->hlock = *hlock;
> +	xhlock->hist_id = current->hist_id++;
>  
>  	xhlock->trace.nr_entries = 0;
>  	xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;


Hehe, _another_ scheme...

Yes I think this works.. but I had just sort of understood the last one.

How about I do this on top? That I think is a combination of what I
proposed last and your single invalidate thing. Combined they solve the
problem with the least amount of extra storage (a single int).


---
Subject: lockdep: Simplify xhlock ring buffer invalidation
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed Aug 9 15:31:27 CEST 2017


Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/lockdep.h  |   20 -----------
 include/linux/sched.h    |    4 --
 kernel/locking/lockdep.c |   82 ++++++++++++++++++++++++++++++-----------------
 3 files changed, 54 insertions(+), 52 deletions(-)

--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -284,26 +284,6 @@ struct held_lock {
  */
 struct hist_lock {
 	/*
-	 * Id for each entry in the ring buffer. This is used to
-	 * decide whether the ring buffer was overwritten or not.
-	 *
-	 * For example,
-	 *
-	 *           |<----------- hist_lock ring buffer size ------->|
-	 *           pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
-	 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
-	 *
-	 *           where 'p' represents an acquisition in process
-	 *           context, 'i' represents an acquisition in irq
-	 *           context.
-	 *
-	 * In this example, the ring buffer was overwritten by
-	 * acquisitions in irq context, that should be detected on
-	 * rollback or commit.
-	 */
-	unsigned int hist_id;
-
-	/*
 	 * Seperate stack_trace data. This will be used at commit step.
 	 */
 	struct stack_trace	trace;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -855,9 +855,7 @@ struct task_struct {
 	unsigned int xhlock_idx;
 	/* For restoring at history boundaries */
 	unsigned int xhlock_idx_hist[XHLOCK_NR];
-	unsigned int hist_id;
-	/* For overwrite check at each context exit */
-	unsigned int hist_id_save[XHLOCK_NR];
+	unsigned int xhlock_idx_max;
 #endif
 
 #ifdef CONFIG_UBSAN
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4818,26 +4818,65 @@ void crossrelease_hist_start(enum contex
 {
 	struct task_struct *cur = current;
 
-	if (cur->xhlocks) {
+	if (cur->xhlocks)
 		cur->xhlock_idx_hist[c] = cur->xhlock_idx;
-		cur->hist_id_save[c] = cur->hist_id;
-	}
 }
 
 void crossrelease_hist_end(enum context_t c)
 {
 	struct task_struct *cur = current;
+	unsigned int idx;
 
-	if (cur->xhlocks) {
-		unsigned int idx = cur->xhlock_idx_hist[c];
-		struct hist_lock *h = &xhlock(idx);
-
-		cur->xhlock_idx = idx;
-
-		/* Check if the ring was overwritten. */
-		if (h->hist_id != cur->hist_id_save[c])
-			invalidate_xhlock(h);
-	}
+	if (!cur->xhlocks)
+		return;
+
+	idx = cur->xhlock_idx_hist[c];
+	cur->xhlock_idx = idx;
+
+	/*
+	 * A bit of magic here.. this deals with rewinding the (cyclic) history
+	 * array further than its size. IOW. looses the complete history.
+	 *
+	 * We detect this by tracking the previous oldest entry we've (over)
+	 * written in @xhlock_idx_max, this means the next entry is the oldest
+	 * entry still in the buffer, ie. its tail.
+	 *
+	 * So when we restore an @xhlock_idx that is at least MAX_XHLOCKS_NR
+	 * older than @xhlock_idx_max we know we've just wiped the entire
+	 * history.
+	 */
+	if ((cur->xhlock_idx_max - idx) < MAX_XHLOCKS_NR)
+		return;
+
+	/*
+	 * Now that we know the buffer is effectively empty, reset our state
+	 * such that it appears empty (without in fact clearing the entire
+	 * buffer).
+	 *
+	 * Pick @idx as the 'new' beginning, (re)set all save-points to not
+	 * rewind past it and reset the max. Then invalidate this idx such that
+	 * commit_xhlocks() will never rewind past it. Since xhlock_idx_inc()
+	 * will return the _next_ entry, we'll not overwrite this invalid entry
+	 * until the entire buffer is full again.
+	 */
+	for (c = 0; c < XHLOCK_NR; c++)
+		cur->xhlock_idx_hist[c] = idx;
+	cur->xhlock_idx_max = idx;
+	invalidate_xhlock(&xhlock(idx));
+}
+
+static inline unsigned int xhlock_idx_inc(void)
+{
+	struct task_struct *cur = current;
+	unsigned int idx = ++cur->xhlock_idx;
+
+	/*
+	 * As per the requirement in crossrelease_hist_end(), track the tail.
+	 */
+	if ((int)(cur->xhlock_idx_max - idx) < 0)
+		cur->xhlock_idx_max = idx;
+
+	return idx;
 }
 
 static int cross_lock(struct lockdep_map *lock)
@@ -4902,7 +4941,7 @@ static inline int xhlock_valid(struct hi
  */
 static void add_xhlock(struct held_lock *hlock)
 {
-	unsigned int idx = ++current->xhlock_idx;
+	unsigned int idx = xhlock_idx_inc();
 	struct hist_lock *xhlock = &xhlock(idx);
 
 #ifdef CONFIG_DEBUG_LOCKDEP
@@ -4915,7 +4954,6 @@ static void add_xhlock(struct held_lock
 
 	/* Initialize hist_lock's members */
 	xhlock->hlock = *hlock;
-	xhlock->hist_id = current->hist_id++;
 
 	xhlock->trace.nr_entries = 0;
 	xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
@@ -5071,7 +5109,6 @@ static int commit_xhlock(struct cross_lo
 static void commit_xhlocks(struct cross_lock *xlock)
 {
 	unsigned int cur = current->xhlock_idx;
-	unsigned int prev_hist_id = xhlock(cur).hist_id;
 	unsigned int i;
 
 	if (!graph_lock())
@@ -5091,17 +5128,6 @@ static void commit_xhlocks(struct cross_
 				break;
 
 			/*
-			 * Filter out the cases that the ring buffer was
-			 * overwritten and the previous entry has a bigger
-			 * hist_id than the following one, which is impossible
-			 * otherwise.
-			 */
-			if (unlikely(before(xhlock->hist_id, prev_hist_id)))
-				break;
-
-			prev_hist_id = xhlock->hist_id;
-
-			/*
 			 * commit_xhlock() returns 0 with graph_lock already
 			 * released if fail.
 			 */
@@ -5186,11 +5212,9 @@ void lockdep_init_task(struct task_struc
 	int i;
 
 	task->xhlock_idx = UINT_MAX;
-	task->hist_id = 0;
 
 	for (i = 0; i < XHLOCK_NR; i++) {
 		task->xhlock_idx_hist[i] = UINT_MAX;
-		task->hist_id_save[i] = 0;
 	}
 
 	task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,

WARNING: multiple messages have this Message-ID (diff)
From: Peter Zijlstra <peterz@infradead.org>
To: Byungchul Park <byungchul.park@lge.com>
Cc: mingo@kernel.org, tglx@linutronix.de, walken@google.com,
	boqun.feng@gmail.com, kirill@shutemov.name,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	akpm@linux-foundation.org, willy@infradead.org,
	npiggin@gmail.com, kernel-team@lge.com
Subject: Re: [PATCH v8 06/14] lockdep: Detect and handle hist_lock ring buffer overwrite
Date: Wed, 9 Aug 2017 16:16:05 +0200	[thread overview]
Message-ID: <20170809141605.7r3cldc4na3skcnp@hirez.programming.kicks-ass.net> (raw)
In-Reply-To: <1502089981-21272-7-git-send-email-byungchul.park@lge.com>

On Mon, Aug 07, 2017 at 04:12:53PM +0900, Byungchul Park wrote:
> @@ -4773,14 +4784,28 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
>   */
>  void crossrelease_hist_start(enum context_t c)
>  {
> -	if (current->xhlocks)
> -		current->xhlock_idx_hist[c] = current->xhlock_idx;
> +	struct task_struct *cur = current;
> +
> +	if (cur->xhlocks) {
> +		cur->xhlock_idx_hist[c] = cur->xhlock_idx;
> +		cur->hist_id_save[c] = cur->hist_id;
> +	}
>  }
>  
>  void crossrelease_hist_end(enum context_t c)
>  {
> -	if (current->xhlocks)
> -		current->xhlock_idx = current->xhlock_idx_hist[c];
> +	struct task_struct *cur = current;
> +
> +	if (cur->xhlocks) {
> +		unsigned int idx = cur->xhlock_idx_hist[c];
> +		struct hist_lock *h = &xhlock(idx);
> +
> +		cur->xhlock_idx = idx;
> +
> +		/* Check if the ring was overwritten. */
> +		if (h->hist_id != cur->hist_id_save[c])
> +			invalidate_xhlock(h);
> +	}
>  }
>  
>  static int cross_lock(struct lockdep_map *lock)
> @@ -4826,6 +4851,7 @@ static inline int depend_after(struct held_lock *hlock)
>   * Check if the xhlock is valid, which would be false if,
>   *
>   *    1. Has not used after initializaion yet.
> + *    2. Got invalidated.
>   *
>   * Remind hist_lock is implemented as a ring buffer.
>   */
> @@ -4857,6 +4883,7 @@ static void add_xhlock(struct held_lock *hlock)
>  
>  	/* Initialize hist_lock's members */
>  	xhlock->hlock = *hlock;
> +	xhlock->hist_id = current->hist_id++;
>  
>  	xhlock->trace.nr_entries = 0;
>  	xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;


Hehe, _another_ scheme...

Yes I think this works.. but I had just sort of understood the last one.

How about I do this on top? That I think is a combination of what I
proposed last and your single invalidate thing. Combined they solve the
problem with the least amount of extra storage (a single int).


---
Subject: lockdep: Simplify xhlock ring buffer invalidation
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed Aug 9 15:31:27 CEST 2017


Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 include/linux/lockdep.h  |   20 -----------
 include/linux/sched.h    |    4 --
 kernel/locking/lockdep.c |   82 ++++++++++++++++++++++++++++++-----------------
 3 files changed, 54 insertions(+), 52 deletions(-)

--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -284,26 +284,6 @@ struct held_lock {
  */
 struct hist_lock {
 	/*
-	 * Id for each entry in the ring buffer. This is used to
-	 * decide whether the ring buffer was overwritten or not.
-	 *
-	 * For example,
-	 *
-	 *           |<----------- hist_lock ring buffer size ------->|
-	 *           pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
-	 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
-	 *
-	 *           where 'p' represents an acquisition in process
-	 *           context, 'i' represents an acquisition in irq
-	 *           context.
-	 *
-	 * In this example, the ring buffer was overwritten by
-	 * acquisitions in irq context, that should be detected on
-	 * rollback or commit.
-	 */
-	unsigned int hist_id;
-
-	/*
 	 * Seperate stack_trace data. This will be used at commit step.
 	 */
 	struct stack_trace	trace;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -855,9 +855,7 @@ struct task_struct {
 	unsigned int xhlock_idx;
 	/* For restoring at history boundaries */
 	unsigned int xhlock_idx_hist[XHLOCK_NR];
-	unsigned int hist_id;
-	/* For overwrite check at each context exit */
-	unsigned int hist_id_save[XHLOCK_NR];
+	unsigned int xhlock_idx_max;
 #endif
 
 #ifdef CONFIG_UBSAN
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4818,26 +4818,65 @@ void crossrelease_hist_start(enum contex
 {
 	struct task_struct *cur = current;
 
-	if (cur->xhlocks) {
+	if (cur->xhlocks)
 		cur->xhlock_idx_hist[c] = cur->xhlock_idx;
-		cur->hist_id_save[c] = cur->hist_id;
-	}
 }
 
 void crossrelease_hist_end(enum context_t c)
 {
 	struct task_struct *cur = current;
+	unsigned int idx;
 
-	if (cur->xhlocks) {
-		unsigned int idx = cur->xhlock_idx_hist[c];
-		struct hist_lock *h = &xhlock(idx);
-
-		cur->xhlock_idx = idx;
-
-		/* Check if the ring was overwritten. */
-		if (h->hist_id != cur->hist_id_save[c])
-			invalidate_xhlock(h);
-	}
+	if (!cur->xhlocks)
+		return;
+
+	idx = cur->xhlock_idx_hist[c];
+	cur->xhlock_idx = idx;
+
+	/*
+	 * A bit of magic here.. this deals with rewinding the (cyclic) history
+	 * array further than its size. IOW. looses the complete history.
+	 *
+	 * We detect this by tracking the previous oldest entry we've (over)
+	 * written in @xhlock_idx_max, this means the next entry is the oldest
+	 * entry still in the buffer, ie. its tail.
+	 *
+	 * So when we restore an @xhlock_idx that is at least MAX_XHLOCKS_NR
+	 * older than @xhlock_idx_max we know we've just wiped the entire
+	 * history.
+	 */
+	if ((cur->xhlock_idx_max - idx) < MAX_XHLOCKS_NR)
+		return;
+
+	/*
+	 * Now that we know the buffer is effectively empty, reset our state
+	 * such that it appears empty (without in fact clearing the entire
+	 * buffer).
+	 *
+	 * Pick @idx as the 'new' beginning, (re)set all save-points to not
+	 * rewind past it and reset the max. Then invalidate this idx such that
+	 * commit_xhlocks() will never rewind past it. Since xhlock_idx_inc()
+	 * will return the _next_ entry, we'll not overwrite this invalid entry
+	 * until the entire buffer is full again.
+	 */
+	for (c = 0; c < XHLOCK_NR; c++)
+		cur->xhlock_idx_hist[c] = idx;
+	cur->xhlock_idx_max = idx;
+	invalidate_xhlock(&xhlock(idx));
+}
+
+static inline unsigned int xhlock_idx_inc(void)
+{
+	struct task_struct *cur = current;
+	unsigned int idx = ++cur->xhlock_idx;
+
+	/*
+	 * As per the requirement in crossrelease_hist_end(), track the tail.
+	 */
+	if ((int)(cur->xhlock_idx_max - idx) < 0)
+		cur->xhlock_idx_max = idx;
+
+	return idx;
 }
 
 static int cross_lock(struct lockdep_map *lock)
@@ -4902,7 +4941,7 @@ static inline int xhlock_valid(struct hi
  */
 static void add_xhlock(struct held_lock *hlock)
 {
-	unsigned int idx = ++current->xhlock_idx;
+	unsigned int idx = xhlock_idx_inc();
 	struct hist_lock *xhlock = &xhlock(idx);
 
 #ifdef CONFIG_DEBUG_LOCKDEP
@@ -4915,7 +4954,6 @@ static void add_xhlock(struct held_lock
 
 	/* Initialize hist_lock's members */
 	xhlock->hlock = *hlock;
-	xhlock->hist_id = current->hist_id++;
 
 	xhlock->trace.nr_entries = 0;
 	xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
@@ -5071,7 +5109,6 @@ static int commit_xhlock(struct cross_lo
 static void commit_xhlocks(struct cross_lock *xlock)
 {
 	unsigned int cur = current->xhlock_idx;
-	unsigned int prev_hist_id = xhlock(cur).hist_id;
 	unsigned int i;
 
 	if (!graph_lock())
@@ -5091,17 +5128,6 @@ static void commit_xhlocks(struct cross_
 				break;
 
 			/*
-			 * Filter out the cases that the ring buffer was
-			 * overwritten and the previous entry has a bigger
-			 * hist_id than the following one, which is impossible
-			 * otherwise.
-			 */
-			if (unlikely(before(xhlock->hist_id, prev_hist_id)))
-				break;
-
-			prev_hist_id = xhlock->hist_id;
-
-			/*
 			 * commit_xhlock() returns 0 with graph_lock already
 			 * released if fail.
 			 */
@@ -5186,11 +5212,9 @@ void lockdep_init_task(struct task_struc
 	int i;
 
 	task->xhlock_idx = UINT_MAX;
-	task->hist_id = 0;
 
 	for (i = 0; i < XHLOCK_NR; i++) {
 		task->xhlock_idx_hist[i] = UINT_MAX;
-		task->hist_id_save[i] = 0;
 	}
 
 	task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2017-08-09 14:16 UTC|newest]

Thread overview: 152+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-07  7:12 [PATCH v8 00/14] lockdep: Implement crossrelease feature Byungchul Park
2017-08-07  7:12 ` Byungchul Park
2017-08-07  7:12 ` [PATCH v8 01/14] lockdep: Refactor lookup_chain_cache() Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-10 12:18   ` [tip:locking/core] locking/lockdep: " tip-bot for Byungchul Park
2017-08-07  7:12 ` [PATCH v8 02/14] lockdep: Add a function building a chain between two classes Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-10 12:18   ` [tip:locking/core] locking/lockdep: " tip-bot for Byungchul Park
2017-08-07  7:12 ` [PATCH v8 03/14] lockdep: Change the meaning of check_prev_add()'s return value Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-10 12:19   ` [tip:locking/core] locking/lockdep: " tip-bot for Byungchul Park
2017-08-07  7:12 ` [PATCH v8 04/14] lockdep: Make check_prev_add() able to handle external stack_trace Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-10 12:19   ` [tip:locking/core] locking/lockdep: " tip-bot for Byungchul Park
2017-08-07  7:12 ` [PATCH v8 05/14] lockdep: Implement crossrelease feature Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-09 14:05   ` Peter Zijlstra
2017-08-09 14:05     ` Peter Zijlstra
2017-08-10  1:30     ` Byungchul Park
2017-08-10  1:30       ` Byungchul Park
2017-08-10  9:21       ` Peter Zijlstra
2017-08-10  9:21         ` Peter Zijlstra
2017-08-10 12:19   ` [tip:locking/core] locking/lockdep: Implement the 'crossrelease' feature tip-bot for Byungchul Park
2017-08-07  7:12 ` [PATCH v8 06/14] lockdep: Detect and handle hist_lock ring buffer overwrite Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-09 14:16   ` Peter Zijlstra [this message]
2017-08-09 14:16     ` Peter Zijlstra
2017-08-10  1:32     ` Byungchul Park
2017-08-10  1:32       ` Byungchul Park
2017-08-10  9:22       ` Peter Zijlstra
2017-08-10  9:22         ` Peter Zijlstra
2017-08-10 10:32     ` Byungchul Park
2017-08-10 10:32       ` Byungchul Park
2017-08-10 11:59   ` Boqun Feng
2017-08-10 12:11     ` Byungchul Park
2017-08-10 12:11       ` Byungchul Park
2017-08-10 12:51       ` Boqun Feng
2017-08-10 13:17         ` Boqun Feng
2017-08-10 13:17           ` Boqun Feng
2017-08-11  0:44           ` Byungchul Park
2017-08-11  0:44             ` Byungchul Park
2017-08-11  3:43           ` Byungchul Park
2017-08-11  3:43             ` Byungchul Park
2017-08-11  8:03             ` Boqun Feng
2017-08-11  8:52               ` Byungchul Park
2017-08-11  8:52                 ` Byungchul Park
2017-08-11  9:44                 ` Byungchul Park
2017-08-11  9:44                   ` Byungchul Park
2017-08-11 13:06                   ` Byungchul Park
2017-08-11 13:06                     ` Byungchul Park
2017-08-14  7:05                     ` Boqun Feng
2017-08-14  7:22                       ` Byungchul Park
2017-08-14  7:22                         ` Byungchul Park
2017-08-14  7:29                       ` Byungchul Park
2017-08-14  7:29                         ` Byungchul Park
2017-08-11  0:40         ` Byungchul Park
2017-08-11  0:40           ` Byungchul Park
2017-08-11  1:03           ` Boqun Feng
2017-08-10 12:20   ` [tip:locking/core] locking/lockdep: " tip-bot for Byungchul Park
2017-08-07  7:12 ` [PATCH v8 07/14] lockdep: Handle non(or multi)-acquisition of a crosslock Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-10 12:20   ` [tip:locking/core] locking/lockdep: " tip-bot for Byungchul Park
2017-08-07  7:12 ` [PATCH v8 08/14] lockdep: Make print_circular_bug() aware of crossrelease Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-10 12:21   ` [tip:locking/core] locking/lockdep: " tip-bot for Byungchul Park
2017-08-07  7:12 ` [PATCH v8 09/14] lockdep: Apply crossrelease to completions Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-07 10:20   ` kbuild test robot
2017-08-07 11:45   ` kbuild test robot
2017-08-09  9:51   ` Peter Zijlstra
2017-08-09  9:51     ` Peter Zijlstra
2017-08-09 10:24     ` Peter Zijlstra
2017-08-09 10:24       ` Peter Zijlstra
2017-08-10  1:24       ` Byungchul Park
2017-08-10  1:24         ` Byungchul Park
2017-08-10 12:21   ` [tip:locking/core] locking/lockdep: " tip-bot for Byungchul Park
2017-08-14  8:50   ` [PATCH v8 09/14] lockdep: " Arnd Bergmann
2017-08-14  8:50     ` Arnd Bergmann
2017-08-18 23:43     ` Boqun Feng
2017-08-18 23:43       ` Boqun Feng
2017-08-19 12:51       ` Arnd Bergmann
2017-08-19 12:51         ` Arnd Bergmann
2017-08-19 13:34         ` Arnd Bergmann
2017-08-19 13:34           ` Arnd Bergmann
2017-08-23 14:43           ` Boqun Feng
2017-08-20  3:18         ` Boqun Feng
2017-08-07  7:12 ` [PATCH v8 10/14] pagemap.h: Remove trailing white space Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-07  7:12 ` [PATCH v8 11/14] lockdep: Apply crossrelease to PG_locked locks Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-07 10:36   ` kbuild test robot
2017-08-10  1:35   ` Byungchul Park
2017-08-10  1:35     ` Byungchul Park
2017-08-10  9:25     ` Peter Zijlstra
2017-08-10  9:25       ` Peter Zijlstra
2017-09-05  1:03   ` Byungchul Park
2017-09-05  1:03     ` Byungchul Park
2017-08-07  7:12 ` [PATCH v8 12/14] lockdep: Apply lock_acquire(release) on __Set(__Clear)PageLocked Byungchul Park
2017-08-07  7:12   ` Byungchul Park
2017-08-07  7:13 ` [PATCH v8 13/14] lockdep: Move data of CONFIG_LOCKDEP_PAGELOCK from page to page_ext Byungchul Park
2017-08-07  7:13   ` Byungchul Park
2017-08-07 10:43   ` kbuild test robot
2017-08-07  7:13 ` [PATCH v8 14/14] lockdep: Crossrelease feature documentation Byungchul Park
2017-08-07  7:13   ` Byungchul Park
2017-08-07 15:58   ` kbuild test robot
2017-08-10 12:22   ` [tip:locking/core] locking/lockdep: Add 'crossrelease' " tip-bot for Byungchul Park
2017-08-09 15:50 ` [PATCH v8 00/14] lockdep: Implement crossrelease feature Peter Zijlstra
2017-08-09 15:50   ` Peter Zijlstra
2017-08-10  0:55   ` Byungchul Park
2017-08-10  0:55     ` Byungchul Park
2017-08-10  3:47     ` Byungchul Park
2017-08-10  3:47       ` Byungchul Park
2017-08-10 10:52     ` Byungchul Park
2017-08-10 10:52       ` Byungchul Park
2017-08-10  9:37   ` Byungchul Park
2017-08-10  9:37     ` Byungchul Park
2017-08-10 10:52     ` Peter Zijlstra
2017-08-10 10:52       ` Peter Zijlstra
2017-08-10 11:10 ` Ingo Molnar
2017-08-10 11:10   ` Ingo Molnar
2017-08-10 11:45   ` Byungchul Park
2017-08-10 11:45     ` Byungchul Park
2017-08-14 10:57     ` Ingo Molnar
2017-08-14 10:57       ` Ingo Molnar
2017-08-14 11:10       ` Byungchul Park
2017-08-14 11:10         ` Byungchul Park
2017-08-15  8:20 ` Ingo Molnar
2017-08-15  8:20   ` Ingo Molnar
2017-08-16  0:16   ` Byungchul Park
2017-08-16  0:16     ` Byungchul Park
2017-08-16  4:05     ` Boqun Feng
2017-08-16  4:05       ` Boqun Feng
2017-08-16  4:37       ` Byungchul Park
2017-08-16  4:37         ` Byungchul Park
2017-08-16  5:40         ` Boqun Feng
2017-08-16  6:37           ` Byungchul Park
2017-08-16  6:37             ` Byungchul Park
2017-08-16  5:05       ` Byungchul Park
2017-08-16  5:05         ` Byungchul Park
2017-08-16  5:58         ` Boqun Feng
2017-08-16  7:14           ` Byungchul Park
2017-08-16  7:14             ` Byungchul Park
2017-08-16  8:06             ` Byungchul Park
2017-08-16  8:06               ` Byungchul Park
2017-08-16  9:38               ` Byungchul Park
2017-08-16  9:38                 ` Byungchul Park
2017-08-17  7:48       ` Ingo Molnar
2017-08-17  7:48         ` Ingo Molnar
2017-08-17  8:04         ` Boqun Feng
2017-08-17  8:12           ` Ingo Molnar
2017-08-17  8:12             ` Ingo Molnar
2017-08-17  8:33             ` Boqun Feng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170809141605.7r3cldc4na3skcnp@hirez.programming.kicks-ass.net \
    --to=peterz@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=boqun.feng@gmail.com \
    --cc=byungchul.park@lge.com \
    --cc=kernel-team@lge.com \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mingo@kernel.org \
    --cc=npiggin@gmail.com \
    --cc=tglx@linutronix.de \
    --cc=walken@google.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.