All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 21/38] drm/i915: Avoid requiring struct_mutex during suspend
Date: Fri,  3 Jun 2016 17:55:36 +0100	[thread overview]
Message-ID: <1464972953-2726-22-git-send-email-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <1464972953-2726-1-git-send-email-chris@chris-wilson.co.uk>

The struct_mutex can have some tricky interactions with other mutexes
(mainly due to using nasty constructs like stop_machine() from within
its confines). This makes it "illegal" (lockdep should generate WARNs)
from certain paths like suspend, where the locking order may be
inverted. We can extend the RCU request management to track activity on
an engine and thereby wait upon all GPU activity without taking the
struct_mutex.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c         | 56 +++++++++++++--------------------
 drivers/gpu/drm/i915/i915_gem_evict.c   |  2 +-
 drivers/gpu/drm/i915/i915_gem_request.c |  8 +++--
 drivers/gpu/drm/i915/i915_gem_request.h | 11 +++++++
 drivers/gpu/drm/i915/i915_gpu_error.c   |  2 +-
 drivers/gpu/drm/i915/i915_irq.c         |  3 +-
 drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
 drivers/gpu/drm/i915/intel_pm.c         |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c | 30 +++++++++---------
 drivers/gpu/drm/i915/intel_ringbuffer.h | 26 ++++++++-------
 10 files changed, 73 insertions(+), 69 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 76e5a241c7be..c1e91589e7bc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2179,13 +2179,18 @@ static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
 
 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 {
+	struct drm_i915_gem_request *request;
 	struct intel_ring *ring;
 
+	request = i915_gem_active_peek(&engine->last_request,
+				       &engine->i915->dev->struct_mutex);
+
 	/* Mark all pending requests as complete so that any concurrent
 	 * (lockless) lookup doesn't try and wait upon the request as we
 	 * reset it.
 	 */
-	intel_engine_init_seqno(engine, engine->last_submitted_seqno);
+	if (request)
+		intel_engine_init_seqno(engine, request->fence.seqno);
 
 	/*
 	 * Clear the execlists queue up before freeing the requests, as those
@@ -2207,15 +2212,9 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 	 * implicit references on things like e.g. ppgtt address spaces through
 	 * the request.
 	 */
-	if (!list_empty(&engine->request_list)) {
-		struct drm_i915_gem_request *request;
-
-		request = list_last_entry(&engine->request_list,
-					  struct drm_i915_gem_request,
-					  link);
-
+	if (request)
 		i915_gem_request_retire_upto(request);
-	}
+	GEM_BUG_ON(intel_engine_is_active(engine));
 
 	/* Having flushed all requests from all queues, we know that all
 	 * ringbuffers must now be empty. However, since we do not reclaim
@@ -2614,8 +2613,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
 	struct intel_engine_cs *engine;
 	int ret;
 
-	lockdep_assert_held(&dev_priv->dev->struct_mutex);
-
 	for_each_engine(engine, dev_priv) {
 		if (engine->last_context == NULL)
 			continue;
@@ -3787,47 +3784,36 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
 	return NULL;
 }
 
-static void
-i915_gem_stop_engines(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *engine;
-
-	for_each_engine(engine, dev_priv)
-		dev_priv->gt.stop_engine(engine);
-}
-
 int
 i915_gem_suspend(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret = 0;
+	int ret;
 
-	mutex_lock(&dev->struct_mutex);
 	ret = i915_gem_wait_for_idle(dev_priv);
 	if (ret)
-		goto err;
-
-	i915_gem_retire_requests(dev_priv);
-
-	i915_gem_stop_engines(dev);
-	i915_gem_context_lost(dev_priv);
-	mutex_unlock(&dev->struct_mutex);
+		return ret;
 
 	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
 	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
 	flush_delayed_work(&dev_priv->gt.idle_work);
 
+	mutex_lock(&dev_priv->dev->struct_mutex);
+
 	/* Assert that we sucessfully flushed all the work and
 	 * reset the GPU back to its idle, low power state.
 	 */
-	WARN_ON(dev_priv->gt.awake);
+	if (dev_priv->gt.awake) {
+		if (INTEL_INFO(dev_priv)->gen >= 6)
+			gen6_rps_idle(dev_priv);
+		intel_runtime_pm_put(dev_priv);
+		dev_priv->gt.awake = false;
+	}
 
-	return 0;
+	i915_gem_context_lost(dev_priv);
+	mutex_unlock(&dev_priv->dev->struct_mutex);
 
-err:
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	return 0;
 }
 
 void i915_gem_init_swizzling(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 680365f4c4cd..3ead9359dfa2 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -69,7 +69,7 @@ gpu_is_idle(struct drm_i915_private *dev_priv)
 	struct intel_engine_cs *engine;
 
 	for_each_engine(engine, dev_priv) {
-		if (!list_empty(&engine->request_list))
+		if (intel_engine_is_active(engine))
 			return false;
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index a0cdd3f10566..016edc6f2d0b 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -445,6 +445,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 
 	trace_i915_gem_request_add(request);
 
+	trace_i915_gem_request_add(request);
 	request->head = request_start;
 
 	/* Whilst this request exists, batch_obj will be on the
@@ -462,7 +463,8 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 */
 	request->emitted_jiffies = jiffies;
 	request->previous_seqno = engine->last_submitted_seqno;
-	smp_store_mb(engine->last_submitted_seqno, request->fence.seqno);
+	engine->last_submitted_seqno = request->fence.seqno;
+	i915_gem_active_set(&engine->last_request, request);
 	list_add_tail(&request->link, &engine->request_list);
 
 	/* Record the position of the start of the request so that
@@ -690,7 +692,7 @@ complete:
 	}
 
 	if (!IS_ERR_OR_NULL(rps) &&
-	    req->fence.seqno == req->engine->last_submitted_seqno) {
+	    req == __i915_gem_active_peek(&req->engine->last_request)) {
 		/* The GPU is now idle and this client has stalled.
 		 * Since no other client has submitted a request in the
 		 * meantime, assume that this client is the only one
@@ -757,7 +759,7 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 
 	for_each_engine(engine, dev_priv) {
 		i915_gem_retire_requests_ring(engine);
-		if (list_empty(&engine->request_list))
+		if (!intel_engine_is_active(engine))
 			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 006f212b7fd6..8d1225999fae 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -27,6 +27,17 @@
 
 #include <linux/fence.h>
 
+struct intel_wait {
+	struct rb_node node;
+	struct task_struct *task;
+	u32 seqno;
+};
+
+struct intel_signal_node {
+	struct rb_node node;
+	struct intel_wait wait;
+};
+
 /**
  * Request queue structure.
  *
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index cfae2fe1e14f..c2cf5bd57db5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1003,7 +1003,7 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
 	ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
 	ering->acthd = intel_engine_get_active_head(engine);
 	ering->seqno = intel_engine_get_seqno(engine);
-	ering->last_seqno = engine->last_submitted_seqno;
+	ering->last_seqno = __active_get_seqno(&engine->last_request);
 	ering->start = I915_READ_START(engine);
 	ering->head = I915_READ_HEAD(engine);
 	ering->tail = I915_READ_TAIL(engine);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1ffc997b19af..3987b7984fd8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2805,8 +2805,7 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 static bool
 ring_idle(struct intel_engine_cs *engine, u32 seqno)
 {
-	return i915_seqno_passed(seqno,
-				 READ_ONCE(engine->last_submitted_seqno));
+	return !intel_engine_is_active(engine);
 }
 
 static bool
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6cdc421fdc37..4bf63af2a282 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1787,7 +1787,6 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
 
 	engine->fw_domains = fw_domains;
 
-	INIT_LIST_HEAD(&engine->request_list);
 	INIT_LIST_HEAD(&engine->buffers);
 	INIT_LIST_HEAD(&engine->execlist_queue);
 	spin_lock_init(&engine->execlist_lock);
@@ -1799,6 +1798,7 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
 	logical_ring_default_vfuncs(engine);
 	logical_ring_default_irqs(engine, info->irq_shift);
 
+	intel_engine_init_requests(engine);
 	intel_engine_init_hangcheck(engine);
 	i915_gem_batch_pool_init(engine, &engine->batch_pool);
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index c141d3e15eed..45bf830a9b10 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6335,7 +6335,7 @@ bool i915_gpu_busy(void)
 	dev_priv = i915_mch_dev;
 
 	for_each_engine(engine, dev_priv)
-		ret |= !list_empty(&engine->request_list);
+		ret |= intel_engine_is_active(engine);
 
 out_unlock:
 	spin_unlock_irq(&mchdev_lock);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f86039455c5a..f172ac6a06dc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2058,13 +2058,13 @@ static int intel_init_engine(struct drm_device *dev,
 
 	engine->i915 = dev_priv;
 	engine->fence_context = fence_context_alloc(1);
-	INIT_LIST_HEAD(&engine->request_list);
 	INIT_LIST_HEAD(&engine->execlist_queue);
 	INIT_LIST_HEAD(&engine->buffers);
 	i915_gem_batch_pool_init(engine, &engine->batch_pool);
 	memset(engine->semaphore.sync_seqno, 0,
 	       sizeof(engine->semaphore.sync_seqno));
 
+	intel_engine_init_requests(engine);
 	intel_engine_init_breadcrumbs(engine);
 
 	/* We may need to do things with the shrinker which
@@ -2152,22 +2152,24 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 	engine->i915 = NULL;
 }
 
-int intel_engine_idle(struct intel_engine_cs *engine)
+static void
+intel_engine_retire(struct i915_gem_active *active,
+		    struct drm_i915_gem_request *rq)
 {
-	struct drm_i915_gem_request *req;
-
-	/* Wait upon the last request to be completed */
-	if (list_empty(&engine->request_list))
-		return 0;
+}
 
-	req = list_entry(engine->request_list.prev,
-			 struct drm_i915_gem_request,
-			 link);
+void intel_engine_init_requests(struct intel_engine_cs *engine)
+{
+	init_request_active(&engine->last_request, intel_engine_retire);
+	INIT_LIST_HEAD(&engine->request_list);
+}
 
-	/* Make sure we do not trigger any retires */
-	return __i915_wait_request(req,
-				   req->i915->mm.interruptible,
-				   NULL, NULL);
+int intel_engine_idle(struct intel_engine_cs *engine)
+{
+	/* Wait upon the last request to be completed */
+	return i915_gem_active_wait_unlocked(&engine->last_request,
+					     engine->i915->mm.interruptible,
+					     NULL, NULL);
 }
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0976e155edc0..d19fb8c24919 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -3,6 +3,7 @@
 
 #include <linux/hashtable.h>
 #include "i915_gem_batch_pool.h"
+#include "i915_gem_request.h"
 
 #define I915_CMD_HASH_ORDER 9
 
@@ -310,6 +311,14 @@ struct intel_engine_cs {
 	 * inspecting request list.
 	 */
 	u32 last_submitted_seqno;
+
+	/* An RCU guarded pointer to the last request. No reference is
+	 * held to the request, users must carefully acquire a reference to
+	 * the request using i915_gem_active_get_request_rcu(), or hold the
+	 * struct_mutex.
+	 */
+	struct i915_gem_active last_request;
+
 	unsigned user_interrupts;
 
 	struct i915_gem_context *last_context;
@@ -455,6 +464,7 @@ void intel_ring_update_space(struct intel_ring *ring);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_init_requests(struct intel_engine_cs *engine);
 
 int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
 void intel_fini_pipe_control(struct intel_engine_cs *engine);
@@ -493,17 +503,6 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
 }
 
 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
-struct intel_wait {
-	struct rb_node node;
-	struct task_struct *task;
-	u32 seqno;
-};
-
-struct intel_signal_node {
-	struct rb_node node;
-	struct intel_wait wait;
-};
-
 void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
 static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
 {
@@ -540,4 +539,9 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 unsigned intel_kick_waiters(struct drm_i915_private *i915);
 unsigned intel_kick_signalers(struct drm_i915_private *i915);
 
+static inline bool intel_engine_is_active(struct intel_engine_cs *engine)
+{
+	return __i915_gem_active_is_busy(&engine->last_request);
+}
+
 #endif /* _INTEL_RINGBUFFER_H_ */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2016-06-03 16:56 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-03 16:55 Tracking VMA Chris Wilson
2016-06-03 16:55 ` [PATCH 01/38] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
2016-06-03 16:55 ` [PATCH 02/38] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something() Chris Wilson
2016-06-03 16:55 ` [PATCH 03/38] drm/i915: Double check the active status on the batch pool Chris Wilson
2016-06-03 16:55 ` [PATCH 04/38] drm/i915: Remove request retirement before each batch Chris Wilson
2016-06-06 13:40   ` Mika Kuoppala
2016-06-03 16:55 ` [PATCH 05/38] drm/i915: Remove i915_gem_execbuffer_retire_commands() Chris Wilson
2016-06-06 14:26   ` Mika Kuoppala
2016-06-03 16:55 ` [PATCH 06/38] drm/i915: Pad GTT views of exec objects up to user specified size Chris Wilson
2016-06-08  9:41   ` Daniel Vetter
2016-06-08 10:08     ` Chris Wilson
2016-06-03 16:55 ` [PATCH 07/38] drm/i915: Split insertion/binding of an object into the VM Chris Wilson
2016-06-03 16:55 ` [PATCH 08/38] drm/i915: Record allocated vma size Chris Wilson
2016-06-03 16:55 ` [PATCH 09/38] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
2016-06-03 16:55 ` [PATCH 10/38] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin() Chris Wilson
2016-06-08  9:43   ` Daniel Vetter
2016-06-08 12:58     ` Chris Wilson
2016-06-03 16:55 ` [PATCH 11/38] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
2016-06-03 16:55 ` [PATCH 12/38] drm/i915: Use atomics to manipulate obj->frontbuffer_bits Chris Wilson
2016-06-03 16:55 ` [PATCH 13/38] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
2016-06-08  9:53   ` Daniel Vetter
2016-06-03 16:55 ` [PATCH 14/38] drm/i915: Move i915_gem_object_wait_rendering() Chris Wilson
2016-06-03 16:55 ` [PATCH 15/38] drm/i915: Mark all current requests as complete before resetting them Chris Wilson
2016-06-03 16:55 ` [PATCH 16/38] drm/i915: Enable lockless lookup of request tracking via RCU Chris Wilson
2016-06-03 16:55 ` [PATCH 17/38] drm/i915: Introduce i915_gem_active_wait_unlocked() Chris Wilson
2016-06-03 16:55 ` [PATCH 18/38] drm/i915: Convert non-blocking waits for requests over to using RCU Chris Wilson
2016-06-03 16:55 ` [PATCH 19/38] drm/i915: Convert non-blocking userptr " Chris Wilson
2016-06-03 16:55 ` [PATCH 20/38] drm/i915/userptr: Remove superfluous interruptible=false on waiting Chris Wilson
2016-06-03 16:55 ` Chris Wilson [this message]
2016-06-03 16:55 ` [PATCH 22/38] drm/gem/shrinker: Wait before acquiring struct_mutex under oom Chris Wilson
2016-06-08  9:57   ` Daniel Vetter
2016-06-08 10:04     ` Chris Wilson
2016-06-03 16:55 ` [PATCH 23/38] suspend Chris Wilson
2016-06-03 16:55 ` [PATCH 24/38] drm/i915: Do a nonblocking wait first in pread/pwrite Chris Wilson
2016-06-03 16:55 ` [PATCH 25/38] drm/i915: Remove (struct_mutex) locking for wait-ioctl Chris Wilson
2016-06-03 16:55 ` [PATCH 26/38] drm/i915: Remove (struct_mutex) locking for busy-ioctl Chris Wilson
2016-06-03 16:55 ` [PATCH 27/38] drm/i915: Reduce locking inside swfinish ioctl Chris Wilson
2016-06-08  9:59   ` Daniel Vetter
2016-06-08 10:03     ` Chris Wilson
2016-06-03 16:55 ` [PATCH 28/38] drm/i915: Remove pinned check from madvise ioctl Chris Wilson
2016-06-08 10:01   ` Daniel Vetter
2016-06-03 16:55 ` [PATCH 29/38] drm/i915: Remove locking for get_tiling Chris Wilson
2016-06-08 10:02   ` Daniel Vetter
2016-06-08 10:11     ` Chris Wilson
2016-06-13 14:19       ` Daniel Vetter
2016-06-03 16:55 ` [PATCH 30/38] drm/i915: Assert that the request hasn't been retired Chris Wilson
2016-06-03 16:55 ` [PATCH 31/38] drm/i915: Reduce amount of duplicate buffer information captured on error Chris Wilson
2016-06-03 16:55 ` [PATCH 32/38] drm/i915: Stop the machine whilst capturing the GPU crash dump Chris Wilson
2016-06-08 10:06   ` Daniel Vetter
2016-06-08 11:37     ` Chris Wilson
2016-06-03 16:55 ` [PATCH 33/38] drm/i915: Scan GGTT active list for context object Chris Wilson
2016-06-03 16:55 ` [PATCH 34/38] drm/i915: Move setting of request->batch into its single callsite Chris Wilson
2016-06-03 16:55 ` [PATCH 35/38] drm/i915: Mark unmappable GGTT entries as PIN_HIGH Chris Wilson
2016-06-03 16:55 ` [PATCH 36/38] drm/i915: Track pinned vma inside guc Chris Wilson
2016-06-03 16:55 ` [PATCH 37/38] drm/i915: Track pinned VMA Chris Wilson
2016-06-08 10:08   ` Daniel Vetter
2016-06-03 16:55 ` [PATCH 38/38] drm/i915/overlay: Use VMA as the primary tracker for images Chris Wilson
2016-06-06 10:42 ` ✗ Ro.CI.BAT: failure for series starting with [01/38] drm/i915: Combine loops within i915_gem_evict_something Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1464972953-2726-22-git-send-email-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.