linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@kernel.org>,
	Juri Lelli <juri.lelli@redhat.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Daniel Bristot de Oliveira <bristot@redhat.com>,
	Will Deacon <will@kernel.org>, Waiman Long <longman@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Davidlohr Bueso <dave@stgolabs.net>,
	Mike Galbraith <efault@gmx.de>
Subject: [patch V4 51/68] locking/rtmutex: Extend the rtmutex core to support ww_mutex
Date: Wed, 11 Aug 2021 14:23:33 +0200 (CEST)	[thread overview]
Message-ID: <20210811121417.184072097@linutronix.de> (raw)
In-Reply-To: 20210811120348.855823694@linutronix.de

From: Peter Zijlstra <peterz@infradead.org>

Add a ww acquire context pointer to the waiter and various functions and
add the ww_mutex related invocations to the proper spots in the locking
code similar to the mutex based variant.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V4: Simplify __waiter_less() (PeterZ)
---
 kernel/locking/rtmutex.c        |  119 ++++++++++++++++++++++++++++++++++++----
 kernel/locking/rtmutex_api.c    |    4 -
 kernel/locking/rtmutex_common.h |    2 
 kernel/locking/rwsem.c          |    2 
 4 files changed, 114 insertions(+), 13 deletions(-)

--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -17,9 +17,44 @@
 #include <linux/sched/signal.h>
 #include <linux/sched/rt.h>
 #include <linux/sched/wake_q.h>
+#include <linux/ww_mutex.h>
 
 #include "rtmutex_common.h"
 
+#ifndef WW_RT
+# define build_ww_mutex()	(false)
+# define ww_container_of(rtm)	NULL
+
+static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
+					struct rt_mutex *lock,
+					struct ww_acquire_ctx *ww_ctx)
+{
+	return 0;
+}
+
+static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
+					    struct ww_acquire_ctx *ww_ctx)
+{
+}
+
+static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
+					  struct ww_acquire_ctx *ww_ctx)
+{
+}
+
+static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
+					struct rt_mutex_waiter *waiter,
+					struct ww_acquire_ctx *ww_ctx)
+{
+	return 0;
+}
+
+#else
+# define build_ww_mutex()	(true)
+# define ww_container_of(rtm)	container_of(rtm, struct ww_mutex, base)
+# include "ww_mutex.h"
+#endif
+
 /*
  * lock->owner state tracking:
  *
@@ -308,7 +343,28 @@ static __always_inline int rt_mutex_wait
 
 static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
 {
-	return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b));
+	struct rt_mutex_waiter *aw = __node_2_waiter(a);
+	struct rt_mutex_waiter *bw = __node_2_waiter(b);
+
+	if (rt_mutex_waiter_less(aw, bw))
+		return 1;
+
+	if (!build_ww_mutex())
+		return 0;
+
+	if (rt_mutex_waiter_less(bw, aw))
+		return 0;
+
+	/* NOTE: relies on waiter->ww_ctx being set before insertion */
+	if (aw->ww_ctx) {
+		if (!bw->ww_ctx)
+			return 1;
+
+		return (signed long)(aw->ww_ctx->stamp -
+				     bw->ww_ctx->stamp) < 0;
+	}
+
+	return 0;
 }
 
 static __always_inline void
@@ -961,6 +1017,7 @@ try_to_take_rt_mutex(struct rt_mutex_bas
 static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
 					   struct rt_mutex_waiter *waiter,
 					   struct task_struct *task,
+					   struct ww_acquire_ctx *ww_ctx,
 					   enum rtmutex_chainwalk chwalk)
 {
 	struct task_struct *owner = rt_mutex_owner(lock);
@@ -996,6 +1053,16 @@ static int __sched task_blocks_on_rt_mut
 
 	raw_spin_unlock(&task->pi_lock);
 
+	if (build_ww_mutex() && ww_ctx) {
+		struct rt_mutex *rtm;
+
+		/* Check whether the waiter should backout immediately */
+		rtm = container_of(lock, struct rt_mutex, rtmutex);
+		res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
+		if (res)
+			return res;
+	}
+
 	if (!owner)
 		return 0;
 
@@ -1281,6 +1348,7 @@ static void __sched remove_waiter(struct
 /**
  * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
  * @lock:		 the rt_mutex to take
+ * @ww_ctx:		 WW mutex context pointer
  * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
  *			 or TASK_UNINTERRUPTIBLE)
  * @timeout:		 the pre-initialized and started timer, or NULL for none
@@ -1289,10 +1357,12 @@ static void __sched remove_waiter(struct
  * Must be called with lock->wait_lock held and interrupts disabled
  */
 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
+					   struct ww_acquire_ctx *ww_ctx,
 					   unsigned int state,
 					   struct hrtimer_sleeper *timeout,
 					   struct rt_mutex_waiter *waiter)
 {
+	struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
 	int ret = 0;
 
 	for (;;) {
@@ -1309,6 +1379,12 @@ static int __sched rt_mutex_slowlock_blo
 			break;
 		}
 
+		if (build_ww_mutex() && ww_ctx) {
+			ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
+			if (ret)
+				break;
+		}
+
 		raw_spin_unlock_irq(&lock->wait_lock);
 
 		schedule();
@@ -1331,6 +1407,9 @@ static void __sched rt_mutex_handle_dead
 	if (res != -EDEADLOCK || detect_deadlock)
 		return;
 
+	if (build_ww_mutex() && w->ww_ctx)
+		return;
+
 	/*
 	 * Yell loudly and stop the task right here.
 	 */
@@ -1344,31 +1423,46 @@ static void __sched rt_mutex_handle_dead
 /**
  * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
  * @lock:	The rtmutex to block lock
+ * @ww_ctx:	WW mutex context pointer
  * @state:	The task state for sleeping
  * @chwalk:	Indicator whether full or partial chainwalk is requested
  * @waiter:	Initializer waiter for blocking
  */
 static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
+				       struct ww_acquire_ctx *ww_ctx,
 				       unsigned int state,
 				       enum rtmutex_chainwalk chwalk,
 				       struct rt_mutex_waiter *waiter)
 {
+	struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
+	struct ww_mutex *ww = ww_container_of(rtm);
 	int ret;
 
 	lockdep_assert_held(&lock->wait_lock);
 
 	/* Try to acquire the lock again: */
-	if (try_to_take_rt_mutex(lock, current, NULL))
+	if (try_to_take_rt_mutex(lock, current, NULL)) {
+		if (build_ww_mutex() && ww_ctx) {
+			__ww_mutex_check_waiters(rtm, ww_ctx);
+			ww_mutex_lock_acquired(ww, ww_ctx);
+		}
 		return 0;
+	}
 
 	set_current_state(state);
 
-	ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
-
+	ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
 	if (likely(!ret))
-		ret = rt_mutex_slowlock_block(lock, state, NULL, waiter);
+		ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
 
-	if (unlikely(ret)) {
+	if (likely(!ret)) {
+		/* acquired the lock */
+		if (build_ww_mutex() && ww_ctx) {
+			if (!ww_ctx->is_wait_die)
+				__ww_mutex_check_waiters(rtm, ww_ctx);
+			ww_mutex_lock_acquired(ww, ww_ctx);
+		}
+	} else {
 		__set_current_state(TASK_RUNNING);
 		remove_waiter(lock, waiter);
 		rt_mutex_handle_deadlock(ret, chwalk, waiter);
@@ -1383,14 +1477,17 @@ static int __sched __rt_mutex_slowlock(s
 }
 
 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
+					     struct ww_acquire_ctx *ww_ctx,
 					     unsigned int state)
 {
 	struct rt_mutex_waiter waiter;
 	int ret;
 
 	rt_mutex_init_waiter(&waiter);
+	waiter.ww_ctx = ww_ctx;
 
-	ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter);
+	ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
+				  &waiter);
 
 	debug_rt_mutex_free_waiter(&waiter);
 	return ret;
@@ -1399,9 +1496,11 @@ static inline int __rt_mutex_slowlock_lo
 /*
  * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
  * @lock:	The rtmutex to block lock
+ * @ww_ctx:	WW mutex context pointer
  * @state:	The task state for sleeping
  */
 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
+				     struct ww_acquire_ctx *ww_ctx,
 				     unsigned int state)
 {
 	unsigned long flags;
@@ -1416,7 +1515,7 @@ static int __sched rt_mutex_slowlock(str
 	 * irqsave/restore variants.
 	 */
 	raw_spin_lock_irqsave(&lock->wait_lock, flags);
-	ret = __rt_mutex_slowlock_locked(lock, state);
+	ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
 	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
 	return ret;
@@ -1428,7 +1527,7 @@ static __always_inline int __rt_mutex_lo
 	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
 		return 0;
 
-	return rt_mutex_slowlock(lock, state);
+	return rt_mutex_slowlock(lock, NULL, state);
 }
 #endif /* RT_MUTEX_BUILD_MUTEX */
 
@@ -1455,7 +1554,7 @@ static void __sched rtlock_slowlock_lock
 	/* Save current state and set state to TASK_RTLOCK_WAIT */
 	current_save_and_set_rtlock_wait_state();
 
-	task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK);
+	task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
 
 	for (;;) {
 		/* Try to acquire the lock again. */
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -267,7 +267,7 @@ int __sched __rt_mutex_start_proxy_lock(
 		return 1;
 
 	/* We enforce deadlock detection for futexes */
-	ret = task_blocks_on_rt_mutex(lock, waiter, task,
+	ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
 				      RT_MUTEX_FULL_CHAINWALK);
 
 	if (ret && !rt_mutex_owner(lock)) {
@@ -343,7 +343,7 @@ int __sched rt_mutex_wait_proxy_lock(str
 	raw_spin_lock_irq(&lock->wait_lock);
 	/* sleep on the mutex */
 	set_current_state(TASK_INTERRUPTIBLE);
-	ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter);
+	ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
 	/*
 	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
 	 * have to fix that up.
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -28,6 +28,7 @@
  * @wake_state:		Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
  * @prio:		Priority of the waiter
  * @deadline:		Deadline of the waiter if applicable
+ * @ww_ctx:		WW context pointer
  */
 struct rt_mutex_waiter {
 	struct rb_node		tree_entry;
@@ -37,6 +38,7 @@ struct rt_mutex_waiter {
 	unsigned int		wake_state;
 	int			prio;
 	u64			deadline;
+	struct ww_acquire_ctx	*ww_ctx;
 };
 
 /**
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -1360,7 +1360,7 @@ static inline void __downgrade_write(str
 	__rt_mutex_lock(rtm, state)
 
 #define rwbase_rtmutex_slowlock_locked(rtm, state)	\
-	__rt_mutex_slowlock_locked(rtm, state)
+	__rt_mutex_slowlock_locked(rtm, NULL, state)
 
 #define rwbase_rtmutex_unlock(rtm)			\
 	__rt_mutex_unlock(rtm)


  parent reply	other threads:[~2021-08-11 12:25 UTC|newest]

Thread overview: 70+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-11 12:22 [patch V4 00/68] locking, sched: The PREEMPT-RT locking infrastructure Thomas Gleixner
2021-08-11 12:22 ` [patch V4 01/68] sched: Split out the wakeup state check Thomas Gleixner
2021-08-11 12:22 ` [patch V4 02/68] sched: Introduce TASK_RTLOCK_WAIT Thomas Gleixner
2021-08-11 12:22 ` [patch V4 03/68] sched: Reorganize current::__state helpers Thomas Gleixner
2021-08-11 12:22 ` [patch V4 04/68] sched: Prepare for RT sleeping spin/rwlocks Thomas Gleixner
2021-08-11 12:22 ` [patch V4 05/68] sched: Rework the __schedule() preempt argument Thomas Gleixner
2021-08-11 12:22 ` [patch V4 06/68] sched: Provide schedule point for RT locks Thomas Gleixner
2021-08-11 12:22 ` [patch V4 07/68] sched/wake_q: Provide WAKE_Q_HEAD_INITIALIZER Thomas Gleixner
2021-08-11 12:22 ` [patch V4 08/68] media/atomisp: Use lockdep instead of *mutex_is_locked() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 09/68] rtmutex: Remove rt_mutex_is_locked() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 10/68] rtmutex: Convert macros to inlines Thomas Gleixner
2021-08-11 12:22 ` [patch V4 11/68] rtmutex: Switch to try_cmpxchg() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 12/68] rtmutex: Split API and implementation Thomas Gleixner
2021-08-11 12:22 ` [patch V4 13/68] rtmutex: Split out the inner parts of struct rtmutex Thomas Gleixner
2021-08-11 12:22 ` [patch V4 14/68] locking/rtmutex: Provide rt_mutex_slowlock_locked() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 15/68] rtmutex: Provide rt_mutex_base_is_locked() Thomas Gleixner
2021-08-11 12:22 ` [patch V4 16/68] locking: Add base code for RT rw_semaphore and rwlock Thomas Gleixner
2021-08-11 12:22 ` [patch V4 17/68] locking/rwsem: Add rtmutex based R/W semaphore implementation Thomas Gleixner
2021-08-11 12:22 ` [patch V4 18/68] locking/rtmutex: Add wake_state to rt_mutex_waiter Thomas Gleixner
2021-08-11 12:22 ` [patch V4 19/68] locking/rtmutex: Provide rt_wake_q and helpers Thomas Gleixner
2021-08-11 12:22 ` [patch V4 20/68] locking/rtmutex: Use rt_mutex_wake_q_head Thomas Gleixner
2021-08-11 12:22 ` [patch V4 21/68] locking/rtmutex: Prepare RT rt_mutex_wake_q for RT locks Thomas Gleixner
2021-08-11 12:22 ` [patch V4 22/68] locking/rtmutex: Guard regular sleeping locks specific functions Thomas Gleixner
2021-08-11 12:22 ` [patch V4 23/68] locking/spinlock: Split the lock types header Thomas Gleixner
2021-08-11 12:23 ` [patch V4 24/68] locking/rtmutex: Prevent future include recursion hell Thomas Gleixner
2021-08-11 12:23 ` [patch V4 25/68] locking/lockdep: Reduce includes in debug_locks.h Thomas Gleixner
2021-08-11 12:23 ` [patch V4 26/68] rbtree: Split out the rbtree type definitions Thomas Gleixner
2021-08-11 12:23 ` [patch V4 27/68] locking/rtmutex: Include only rbtree types Thomas Gleixner
2021-08-11 12:23 ` [patch V4 28/68] locking/spinlock: Provide RT specific spinlock type Thomas Gleixner
2021-08-11 12:23 ` [patch V4 29/68] locking/spinlock: Provide RT variant header Thomas Gleixner
2021-08-11 12:23 ` [patch V4 30/68] locking/rtmutex: Provide the spin/rwlock core lock function Thomas Gleixner
2021-08-11 12:23 ` [patch V4 31/68] locking/spinlock: Provide RT variant Thomas Gleixner
2021-08-11 12:23 ` [patch V4 32/68] locking/rwlock: " Thomas Gleixner
2021-08-11 12:23 ` [patch V4 33/68] locking/rtmutex: Squash !RT tasks to DEFAULT_PRIO Thomas Gleixner
2021-08-11 12:23 ` [patch V4 34/68] locking/mutex: Consolidate core headers Thomas Gleixner
2021-08-11 12:23 ` [patch V4 35/68] locking/mutex: Move waiter to core header Thomas Gleixner
2021-08-11 12:23 ` [patch V4 36/68] locking/ww_mutex: Move ww_mutex declarations into ww_mutex.h Thomas Gleixner
2021-08-11 12:23 ` [patch V4 37/68] locking/mutex: Make mutex::wait_lock raw Thomas Gleixner
2021-08-11 12:23 ` [patch V4 38/68] locking/ww_mutex: Simplify lockdep annotation Thomas Gleixner
2021-08-11 12:23 ` [patch V4 39/68] locking/ww_mutex: Gather mutex_waiter initialization Thomas Gleixner
2021-08-11 12:23 ` [patch V4 40/68] locking/ww_mutex: Split up ww_mutex_unlock() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 41/68] locking/ww_mutex: Split W/W implementation logic Thomas Gleixner
2021-08-11 12:23 ` [patch V4 42/68] locking/ww_mutex: Remove __sched annotation Thomas Gleixner
2021-08-11 12:23 ` [patch V4 43/68] locking/ww_mutex: Abstract waiter iteration Thomas Gleixner
2021-08-11 12:23 ` [patch V4 44/68] locking/ww_mutex: Abstract waiter enqueueing Thomas Gleixner
2021-08-11 12:23 ` [patch V4 45/68] locking/ww_mutex: Abstract mutex accessors Thomas Gleixner
2021-08-11 12:23 ` [patch V4 46/68] locking/ww_mutex: Abstract mutex types Thomas Gleixner
2021-08-11 12:23 ` [patch V4 47/68] locking/ww_mutex: Abstract internal lock access Thomas Gleixner
2021-08-11 12:23 ` [patch V4 48/68] locking/ww_mutex: Implement rt_mutex accessors Thomas Gleixner
2021-08-11 12:23 ` [patch V4 49/68] locking/ww_mutex: Add RT priority to W/W order Thomas Gleixner
2021-08-11 12:23 ` [patch V4 50/68] locking/ww_mutex: Add rt_mutex based lock type and accessors Thomas Gleixner
2021-08-11 12:23 ` Thomas Gleixner [this message]
2021-08-11 12:23 ` [patch V4 52/68] locking/ww_mutex: Implement rtmutex based ww_mutex API functions Thomas Gleixner
2021-08-11 12:23 ` [patch V4 53/68] locking/rtmutex: Add mutex variant for RT Thomas Gleixner
2021-08-11 12:23 ` [patch V4 54/68] lib/test_lockup: Adapt to changed variables Thomas Gleixner
2021-08-11 12:23 ` [patch V4 55/68] futex: Validate waiter correctly in futex_proxy_trylock_atomic() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 56/68] futex: Cleanup stale comments Thomas Gleixner
2021-08-11 12:23 ` [patch V4 57/68] futex: Clarify futex_requeue() PI handling Thomas Gleixner
2021-08-11 12:23 ` [patch V4 58/68] futex: Remove bogus condition for requeue PI Thomas Gleixner
2021-08-11 12:23 ` [patch V4 59/68] futex: Correct the number of requeued waiters for PI Thomas Gleixner
2021-08-11 12:23 ` [patch V4 60/68] futex: Restructure futex_requeue() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 61/68] futex: Clarify comment in futex_requeue() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 62/68] futex: Reorder sanity checks " Thomas Gleixner
2021-08-11 12:23 ` [patch V4 63/68] futex: Simplify handle_early_requeue_pi_wakeup() Thomas Gleixner
2021-08-11 12:23 ` [patch V4 64/68] futex: Prevent requeue_pi() lock nesting issue on RT Thomas Gleixner
2021-08-11 12:23 ` [patch V4 65/68] rtmutex: Prevent lockdep false positive with PI futexes Thomas Gleixner
2021-08-11 12:23 ` [patch V4 66/68] preempt: Adjust PREEMPT_LOCK_OFFSET for RT Thomas Gleixner
2021-08-11 12:23 ` [patch V4 67/68] locking/rtmutex: Implement equal priority lock stealing Thomas Gleixner
2021-08-11 12:23 ` [patch V4 68/68] locking/rtmutex: Add adaptive spinwait mechanism Thomas Gleixner
2021-08-13  8:05 ` [patch V4 69/68] locking/rt: Add missing __might_sleep() to spin/rwlocks Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210811121417.184072097@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=bigeasy@linutronix.de \
    --cc=boqun.feng@gmail.com \
    --cc=bristot@redhat.com \
    --cc=dave@stgolabs.net \
    --cc=efault@gmx.de \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).