All of lore.kernel.org
 help / color / mirror / Atom feed
From: Daniel Vetter <daniel.vetter@ffwll.ch>
To: DRI Development <dri-devel@lists.freedesktop.org>
Cc: "Intel Graphics Development" <intel-gfx@lists.freedesktop.org>,
	"Daniel Vetter" <daniel.vetter@ffwll.ch>,
	"Daniel Vetter" <daniel.vetter@intel.com>,
	"Rob Clark" <robdclark@gmail.com>, "Sean Paul" <sean@poorly.run>,
	"Sumit Semwal" <sumit.semwal@linaro.org>,
	"Christian König" <christian.koenig@amd.com>,
	linux-arm-msm@vger.kernel.org, freedreno@lists.freedesktop.org,
	linux-media@vger.kernel.org, linaro-mm-sig@lists.linaro.org
Subject: [PATCH v5 12/20] drm/msm: Use scheduler dependency handling
Date: Thu,  5 Aug 2021 12:46:57 +0200	[thread overview]
Message-ID: <20210805104705.862416-13-daniel.vetter@ffwll.ch> (raw)
In-Reply-To: <20210805104705.862416-1-daniel.vetter@ffwll.ch>

drm_sched_job_init is already at the right place, so this boils down
to deleting code.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
---
 drivers/gpu/drm/msm/msm_gem.h        |  5 -----
 drivers/gpu/drm/msm/msm_gem_submit.c | 19 +++++--------------
 drivers/gpu/drm/msm/msm_ringbuffer.c | 12 ------------
 3 files changed, 5 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index f9e3ffb2309a..8bf0ac707fd7 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -312,11 +312,6 @@ struct msm_gem_submit {
 	struct ww_acquire_ctx ticket;
 	uint32_t seqno;		/* Sequence number of the submit on the ring */
 
-	/* Array of struct dma_fence * to block on before submitting this job.
-	 */
-	struct xarray deps;
-	unsigned long last_dep;
-
 	/* Hw fence, which is created when the scheduler executes the job, and
 	 * is signaled when the hw finishes (via seqno write from cmdstream)
 	 */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 96cea0ba4cfd..fb5a2eab27a2 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -52,8 +52,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 		return ERR_PTR(ret);
 	}
 
-	xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);
-
 	kref_init(&submit->ref);
 	submit->dev = dev;
 	submit->aspace = queue->ctx->aspace;
@@ -72,8 +70,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
 {
 	struct msm_gem_submit *submit =
 			container_of(kref, struct msm_gem_submit, ref);
-	unsigned long index;
-	struct dma_fence *fence;
 	unsigned i;
 
 	if (submit->fence_id) {
@@ -82,12 +78,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
 		mutex_unlock(&submit->queue->lock);
 	}
 
-	xa_for_each (&submit->deps, index, fence) {
-		dma_fence_put(fence);
-	}
-
-	xa_destroy(&submit->deps);
-
 	dma_fence_put(submit->user_fence);
 	dma_fence_put(submit->hw_fence);
 
@@ -343,8 +333,9 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 		if (no_implicit)
 			continue;
 
-		ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
-			write);
+		ret = drm_sched_job_add_implicit_dependencies(&submit->base,
+							      obj,
+							      write);
 		if (ret)
 			break;
 	}
@@ -588,7 +579,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
 		if (ret)
 			break;
 
-		ret = drm_gem_fence_array_add(&submit->deps, fence);
+		ret = drm_sched_job_add_dependency(&submit->base, fence);
 		if (ret)
 			break;
 
@@ -798,7 +789,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 			goto out_unlock;
 		}
 
-		ret = drm_gem_fence_array_add(&submit->deps, in_fence);
+		ret = drm_sched_job_add_dependency(&submit->base, in_fence);
 		if (ret)
 			goto out_unlock;
 	}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index bd54c1412649..652b1dedd7c1 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -11,17 +11,6 @@ static uint num_hw_submissions = 8;
 MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
 module_param(num_hw_submissions, uint, 0600);
 
-static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
-		struct drm_sched_entity *s_entity)
-{
-	struct msm_gem_submit *submit = to_msm_submit(job);
-
-	if (!xa_empty(&submit->deps))
-		return xa_erase(&submit->deps, submit->last_dep++);
-
-	return NULL;
-}
-
 static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 {
 	struct msm_gem_submit *submit = to_msm_submit(job);
@@ -52,7 +41,6 @@ static void msm_job_free(struct drm_sched_job *job)
 }
 
 const struct drm_sched_backend_ops msm_sched_ops = {
-	.dependency = msm_job_dependency,
 	.run_job = msm_job_run,
 	.free_job = msm_job_free
 };
-- 
2.32.0


WARNING: multiple messages have this Message-ID (diff)
From: Daniel Vetter <daniel.vetter@ffwll.ch>
To: DRI Development <dri-devel@lists.freedesktop.org>
Cc: "Intel Graphics Development" <intel-gfx@lists.freedesktop.org>,
	"Daniel Vetter" <daniel.vetter@ffwll.ch>,
	"Daniel Vetter" <daniel.vetter@intel.com>,
	"Rob Clark" <robdclark@gmail.com>, "Sean Paul" <sean@poorly.run>,
	"Sumit Semwal" <sumit.semwal@linaro.org>,
	"Christian König" <christian.koenig@amd.com>,
	linux-arm-msm@vger.kernel.org, freedreno@lists.freedesktop.org,
	linux-media@vger.kernel.org, linaro-mm-sig@lists.linaro.org
Subject: [Intel-gfx] [PATCH v5 12/20] drm/msm: Use scheduler dependency handling
Date: Thu,  5 Aug 2021 12:46:57 +0200	[thread overview]
Message-ID: <20210805104705.862416-13-daniel.vetter@ffwll.ch> (raw)
In-Reply-To: <20210805104705.862416-1-daniel.vetter@ffwll.ch>

drm_sched_job_init is already at the right place, so this boils down
to deleting code.

Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
---
 drivers/gpu/drm/msm/msm_gem.h        |  5 -----
 drivers/gpu/drm/msm/msm_gem_submit.c | 19 +++++--------------
 drivers/gpu/drm/msm/msm_ringbuffer.c | 12 ------------
 3 files changed, 5 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index f9e3ffb2309a..8bf0ac707fd7 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -312,11 +312,6 @@ struct msm_gem_submit {
 	struct ww_acquire_ctx ticket;
 	uint32_t seqno;		/* Sequence number of the submit on the ring */
 
-	/* Array of struct dma_fence * to block on before submitting this job.
-	 */
-	struct xarray deps;
-	unsigned long last_dep;
-
 	/* Hw fence, which is created when the scheduler executes the job, and
 	 * is signaled when the hw finishes (via seqno write from cmdstream)
 	 */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 96cea0ba4cfd..fb5a2eab27a2 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -52,8 +52,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 		return ERR_PTR(ret);
 	}
 
-	xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);
-
 	kref_init(&submit->ref);
 	submit->dev = dev;
 	submit->aspace = queue->ctx->aspace;
@@ -72,8 +70,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
 {
 	struct msm_gem_submit *submit =
 			container_of(kref, struct msm_gem_submit, ref);
-	unsigned long index;
-	struct dma_fence *fence;
 	unsigned i;
 
 	if (submit->fence_id) {
@@ -82,12 +78,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
 		mutex_unlock(&submit->queue->lock);
 	}
 
-	xa_for_each (&submit->deps, index, fence) {
-		dma_fence_put(fence);
-	}
-
-	xa_destroy(&submit->deps);
-
 	dma_fence_put(submit->user_fence);
 	dma_fence_put(submit->hw_fence);
 
@@ -343,8 +333,9 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 		if (no_implicit)
 			continue;
 
-		ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
-			write);
+		ret = drm_sched_job_add_implicit_dependencies(&submit->base,
+							      obj,
+							      write);
 		if (ret)
 			break;
 	}
@@ -588,7 +579,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
 		if (ret)
 			break;
 
-		ret = drm_gem_fence_array_add(&submit->deps, fence);
+		ret = drm_sched_job_add_dependency(&submit->base, fence);
 		if (ret)
 			break;
 
@@ -798,7 +789,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 			goto out_unlock;
 		}
 
-		ret = drm_gem_fence_array_add(&submit->deps, in_fence);
+		ret = drm_sched_job_add_dependency(&submit->base, in_fence);
 		if (ret)
 			goto out_unlock;
 	}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index bd54c1412649..652b1dedd7c1 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -11,17 +11,6 @@ static uint num_hw_submissions = 8;
 MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
 module_param(num_hw_submissions, uint, 0600);
 
-static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
-		struct drm_sched_entity *s_entity)
-{
-	struct msm_gem_submit *submit = to_msm_submit(job);
-
-	if (!xa_empty(&submit->deps))
-		return xa_erase(&submit->deps, submit->last_dep++);
-
-	return NULL;
-}
-
 static struct dma_fence *msm_job_run(struct drm_sched_job *job)
 {
 	struct msm_gem_submit *submit = to_msm_submit(job);
@@ -52,7 +41,6 @@ static void msm_job_free(struct drm_sched_job *job)
 }
 
 const struct drm_sched_backend_ops msm_sched_ops = {
-	.dependency = msm_job_dependency,
 	.run_job = msm_job_run,
 	.free_job = msm_job_free
 };
-- 
2.32.0


  parent reply	other threads:[~2021-08-05 10:47 UTC|newest]

Thread overview: 122+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-05 10:46 [PATCH v5 00/20] drm/sched dependency handling and implicit sync fixes Daniel Vetter
2021-08-05 10:46 ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 01/20] drm/sched: Split drm_sched_job_init Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:43   ` Christian König
2021-08-05 13:43     ` [Intel-gfx] " Christian König
2021-08-05 14:07     ` Daniel Vetter
2021-08-05 14:07       ` [Intel-gfx] " Daniel Vetter
2021-08-05 14:47       ` Christian König
2021-08-05 14:47         ` [Intel-gfx] " Christian König
2021-08-05 15:07         ` Daniel Vetter
2021-08-05 15:07           ` [Intel-gfx] " Daniel Vetter
2021-08-17  8:49   ` [PATCH] " Daniel Vetter
2021-08-17  8:49     ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 02/20] drm/msm: Fix drm/sched point of no return rules Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 23:02   ` Rob Clark
2021-08-05 23:02     ` [Intel-gfx] " Rob Clark
2021-08-06 16:41     ` Daniel Vetter
2021-08-06 16:41       ` [Intel-gfx] " Daniel Vetter
2021-08-06 17:19       ` Rob Clark
2021-08-06 17:19         ` [Intel-gfx] " Rob Clark
2021-08-06 18:41         ` Daniel Vetter
2021-08-06 18:41           ` [Intel-gfx] " Daniel Vetter
2021-08-06 19:01           ` Rob Clark
2021-08-06 19:01             ` [Intel-gfx] " Rob Clark
2021-08-06 19:10             ` Daniel Vetter
2021-08-06 19:10               ` [Intel-gfx] " Daniel Vetter
2021-08-06 19:59               ` Rob Clark
2021-08-06 19:59                 ` [Intel-gfx] " Rob Clark
2021-08-17  8:53   ` [PATCH] drm/msm: Improve " Daniel Vetter
2021-08-17  8:53     ` [Intel-gfx] " Daniel Vetter
2021-08-26  9:33     ` Daniel Vetter
2021-08-26  9:33       ` [Intel-gfx] " Daniel Vetter
2021-08-26 15:38       ` Rob Clark
2021-08-26 15:38         ` Rob Clark
2021-08-05 10:46 ` [PATCH v5 03/20] drm/sched: Barriers are needed for entity->last_scheduled Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:45   ` Christian König
2021-08-05 13:45     ` [Intel-gfx] " Christian König
2021-08-05 10:46 ` [PATCH v5 04/20] drm/sched: Add dependency tracking Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:47   ` Christian König
2021-08-05 13:47     ` [Intel-gfx] " Christian König
2021-08-05 10:46 ` [PATCH v5 05/20] drm/sched: drop entity parameter from drm_sched_push_job Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:48   ` Christian König
2021-08-05 13:48     ` [Intel-gfx] " Christian König
2021-08-05 10:46 ` [PATCH v5 06/20] drm/sched: improve docs around drm_sched_entity Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 07/20] drm/panfrost: use scheduler dependency tracking Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 15:10   ` Alyssa Rosenzweig
2021-08-05 15:10     ` [Intel-gfx] " Alyssa Rosenzweig
2021-08-05 10:46 ` [PATCH v5 08/20] drm/lima: " Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-12 19:28   ` Daniel Vetter
2021-08-12 19:28     ` [Intel-gfx] " Daniel Vetter
2021-08-14  2:45     ` Qiang Yu
2021-08-14  2:45       ` [Intel-gfx] " Qiang Yu
2021-08-05 10:46 ` [PATCH v5 09/20] drm/v3d: Move drm_sched_job_init to v3d_job_init Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 10/20] drm/v3d: Use scheduler dependency handling Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 11/20] drm/etnaviv: " Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-12 19:28   ` Daniel Vetter
2021-08-12 19:28     ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` Daniel Vetter [this message]
2021-08-05 10:46   ` [Intel-gfx] [PATCH v5 12/20] drm/msm: " Daniel Vetter
2021-08-12 19:29   ` Daniel Vetter
2021-08-12 19:29     ` [Intel-gfx] " Daniel Vetter
2021-08-26 16:12   ` Rob Clark
2021-08-26 16:12     ` [Intel-gfx] " Rob Clark
2021-08-30  9:01   ` Daniel Vetter
2021-08-30  9:01     ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 13/20] drm/gem: Delete gem array fencing helpers Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-12 19:29   ` Daniel Vetter
2021-08-12 19:29     ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:46 ` [PATCH v5 14/20] drm/sched: Don't store self-dependencies Daniel Vetter
2021-08-05 10:46   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:18   ` Christian König
2021-08-05 13:18     ` [Intel-gfx] " Christian König
2021-08-05 13:25     ` Daniel Vetter
2021-08-05 13:25       ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:57       ` Christian König
2021-08-05 13:57         ` [Intel-gfx] " Christian König
2021-08-05 15:06         ` Daniel Vetter
2021-08-05 15:06           ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 15/20] drm/sched: Check locking in drm_sched_job_await_implicit Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:19   ` Christian König
2021-08-05 13:19     ` [Intel-gfx] " Christian König
2021-08-05 13:27     ` Daniel Vetter
2021-08-05 13:27       ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 16/20] drm/msm: Don't break exclusive fence ordering Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-26 16:16   ` Rob Clark
2021-08-26 16:16     ` [Intel-gfx] " Rob Clark
2021-08-26 16:16     ` Rob Clark
2021-08-30  9:02     ` Daniel Vetter
2021-08-30  9:02       ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 17/20] drm/etnaviv: " Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 18/20] drm/i915: delete exclude argument from i915_sw_fence_await_reservation Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 19/20] drm/i915: Don't break exclusive fence ordering Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-05 10:47 ` [PATCH v5 20/20] dma-resv: Give the docs a do-over Daniel Vetter
2021-08-05 10:47   ` [Intel-gfx] " Daniel Vetter
2021-08-30 19:38   ` Daniel Vetter
2021-08-30 19:38     ` [Intel-gfx] " Daniel Vetter
2021-08-05 13:58 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/sched dependency handling and implicit sync fixes Patchwork
2021-08-05 14:29 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2021-08-06 19:14 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for drm/sched dependency handling and implicit sync fixes (rev2) Patchwork
2021-08-17 16:27 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/sched dependency handling and implicit sync fixes (rev4) Patchwork
2021-08-17 16:57 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-08-17 18:15 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2021-08-26 13:17 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/sched dependency handling and implicit sync fixes (rev5) Patchwork
2021-08-26 13:48 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-08-26 21:46 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210805104705.862416-13-daniel.vetter@ffwll.ch \
    --to=daniel.vetter@ffwll.ch \
    --cc=christian.koenig@amd.com \
    --cc=daniel.vetter@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=freedreno@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=robdclark@gmail.com \
    --cc=sean@poorly.run \
    --cc=sumit.semwal@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.