All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 12/28] drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request
Date: Thu,  7 Nov 2019 08:12:36 +0000	[thread overview]
Message-ID: <20191107081252.10542-12-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20191107081252.10542-1-chris@chris-wilson.co.uk>

As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_request.c   | 35 ++++++++++++++++++---------
 drivers/gpu/drm/i915/i915_scheduler.c |  6 +++++
 drivers/gpu/drm/i915/i915_scheduler.h |  1 +
 drivers/gpu/drm/i915/i915_sw_fence.c  |  8 ++++++
 drivers/gpu/drm/i915/i915_sw_fence.h  |  2 ++
 5 files changed, 41 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 00011f9533b6..f7cc13d9d0ae 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -214,7 +214,7 @@ static void remove_from_engine(struct i915_request *rq)
 		spin_lock(&engine->active.lock);
 		locked = engine;
 	}
-	list_del(&rq->sched.link);
+	list_del_init(&rq->sched.link);
 	spin_unlock_irq(&locked->active.lock);
 }
 
@@ -586,6 +586,18 @@ request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
 	return kmem_cache_alloc(global.slab_requests, gfp);
 }
 
+static void __i915_request_ctor(void *arg)
+{
+	struct i915_request *rq = arg;
+
+	spin_lock_init(&rq->lock);
+	i915_sched_node_init(&rq->sched);
+	i915_sw_fence_init(&rq->submit, submit_notify);
+	i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+
+	INIT_LIST_HEAD(&rq->execute_cb);
+}
+
 struct i915_request *
 __i915_request_create(struct intel_context *ce, gfp_t gfp)
 {
@@ -655,15 +667,14 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 
 	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 
-	spin_lock_init(&rq->lock);
 	dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
 		       tl->fence_context, seqno);
 
 	/* We bump the ref for the fence chain */
-	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
-	i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
+	i915_sw_fence_reinit(&i915_request_get(rq)->submit);
+	i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
 
-	i915_sched_node_init(&rq->sched);
+	i915_sched_node_reinit(&rq->sched);
 
 	/* No zalloc, must clear what we need by hand */
 	rq->file_priv = NULL;
@@ -671,8 +682,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	rq->capture_list = NULL;
 	rq->flags = 0;
 
-	INIT_LIST_HEAD(&rq->execute_cb);
-
 	/*
 	 * Reserve space in the ring buffer for all the commands required to
 	 * eventually emit this request. This is to guarantee that the
@@ -1533,10 +1542,14 @@ static struct i915_global_request global = { {
 
 int __init i915_global_request_init(void)
 {
-	global.slab_requests = KMEM_CACHE(i915_request,
-					  SLAB_HWCACHE_ALIGN |
-					  SLAB_RECLAIM_ACCOUNT |
-					  SLAB_TYPESAFE_BY_RCU);
+	global.slab_requests =
+		kmem_cache_create("i915_request",
+				  sizeof(struct i915_request),
+				  __alignof__(struct i915_request),
+				  SLAB_HWCACHE_ALIGN |
+				  SLAB_RECLAIM_ACCOUNT |
+				  SLAB_TYPESAFE_BY_RCU,
+				  __i915_request_ctor);
 	if (!global.slab_requests)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 010d67f48ad9..724e96fe96e9 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -387,6 +387,10 @@ void i915_sched_node_init(struct i915_sched_node *node)
 	INIT_LIST_HEAD(&node->signalers_list);
 	INIT_LIST_HEAD(&node->waiters_list);
 	INIT_LIST_HEAD(&node->link);
+}
+
+void i915_sched_node_reinit(struct i915_sched_node *node)
+{
 	node->attr.priority = I915_PRIORITY_INVALID;
 	node->semaphores = 0;
 	node->flags = 0;
@@ -481,6 +485,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
 			i915_dependency_free(dep);
 	}
+	INIT_LIST_HEAD(&node->signalers_list);
 
 	/* Remove ourselves from everyone who depends upon us */
 	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
@@ -491,6 +496,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
 			i915_dependency_free(dep);
 	}
+	INIT_LIST_HEAD(&node->waiters_list);
 
 	spin_unlock_irq(&schedule_lock);
 }
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 07d243acf553..d1dc4efef77b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -26,6 +26,7 @@
 					 sched.link)
 
 void i915_sched_node_init(struct i915_sched_node *node);
+void i915_sched_node_reinit(struct i915_sched_node *node);
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 				      struct i915_sched_node *signal,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6a88db291252..eacc6c5ce0fd 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -227,6 +227,14 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
 	fence->flags = (unsigned long)fn;
 }
 
+void i915_sw_fence_reinit(struct i915_sw_fence *fence)
+{
+	debug_fence_init(fence);
+
+	atomic_set(&fence->pending, 1);
+	fence->error = 0;
+}
+
 void i915_sw_fence_commit(struct i915_sw_fence *fence)
 {
 	debug_fence_activate(fence);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index ab7d58bd0b9d..1e90d9a51bd2 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -54,6 +54,8 @@ do {								\
 	__i915_sw_fence_init((fence), (fn), NULL, NULL)
 #endif
 
+void i915_sw_fence_reinit(struct i915_sw_fence *fence);
+
 #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
 void i915_sw_fence_fini(struct i915_sw_fence *fence);
 #else
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

WARNING: multiple messages have this Message-ID (diff)
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH 12/28] drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request
Date: Thu,  7 Nov 2019 08:12:36 +0000	[thread overview]
Message-ID: <20191107081252.10542-12-chris@chris-wilson.co.uk> (raw)
Message-ID: <20191107081236.tpXFwtqYDEme7bX2A5jU_fRyI699SBit9Mi7xNRP7EI@z> (raw)
In-Reply-To: <20191107081252.10542-1-chris@chris-wilson.co.uk>

As we start peeking into requests for longer and longer, e.g.
incorporating use of spinlocks when only protected by an
rcu_read_lock(), we need to be careful in how we reset the request when
recycling and need to preserve any barriers that may still be in use as
the request is reset for reuse.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_request.c   | 35 ++++++++++++++++++---------
 drivers/gpu/drm/i915/i915_scheduler.c |  6 +++++
 drivers/gpu/drm/i915/i915_scheduler.h |  1 +
 drivers/gpu/drm/i915/i915_sw_fence.c  |  8 ++++++
 drivers/gpu/drm/i915/i915_sw_fence.h  |  2 ++
 5 files changed, 41 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 00011f9533b6..f7cc13d9d0ae 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -214,7 +214,7 @@ static void remove_from_engine(struct i915_request *rq)
 		spin_lock(&engine->active.lock);
 		locked = engine;
 	}
-	list_del(&rq->sched.link);
+	list_del_init(&rq->sched.link);
 	spin_unlock_irq(&locked->active.lock);
 }
 
@@ -586,6 +586,18 @@ request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
 	return kmem_cache_alloc(global.slab_requests, gfp);
 }
 
+static void __i915_request_ctor(void *arg)
+{
+	struct i915_request *rq = arg;
+
+	spin_lock_init(&rq->lock);
+	i915_sched_node_init(&rq->sched);
+	i915_sw_fence_init(&rq->submit, submit_notify);
+	i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+
+	INIT_LIST_HEAD(&rq->execute_cb);
+}
+
 struct i915_request *
 __i915_request_create(struct intel_context *ce, gfp_t gfp)
 {
@@ -655,15 +667,14 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 
 	rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 
-	spin_lock_init(&rq->lock);
 	dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
 		       tl->fence_context, seqno);
 
 	/* We bump the ref for the fence chain */
-	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
-	i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
+	i915_sw_fence_reinit(&i915_request_get(rq)->submit);
+	i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
 
-	i915_sched_node_init(&rq->sched);
+	i915_sched_node_reinit(&rq->sched);
 
 	/* No zalloc, must clear what we need by hand */
 	rq->file_priv = NULL;
@@ -671,8 +682,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	rq->capture_list = NULL;
 	rq->flags = 0;
 
-	INIT_LIST_HEAD(&rq->execute_cb);
-
 	/*
 	 * Reserve space in the ring buffer for all the commands required to
 	 * eventually emit this request. This is to guarantee that the
@@ -1533,10 +1542,14 @@ static struct i915_global_request global = { {
 
 int __init i915_global_request_init(void)
 {
-	global.slab_requests = KMEM_CACHE(i915_request,
-					  SLAB_HWCACHE_ALIGN |
-					  SLAB_RECLAIM_ACCOUNT |
-					  SLAB_TYPESAFE_BY_RCU);
+	global.slab_requests =
+		kmem_cache_create("i915_request",
+				  sizeof(struct i915_request),
+				  __alignof__(struct i915_request),
+				  SLAB_HWCACHE_ALIGN |
+				  SLAB_RECLAIM_ACCOUNT |
+				  SLAB_TYPESAFE_BY_RCU,
+				  __i915_request_ctor);
 	if (!global.slab_requests)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 010d67f48ad9..724e96fe96e9 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -387,6 +387,10 @@ void i915_sched_node_init(struct i915_sched_node *node)
 	INIT_LIST_HEAD(&node->signalers_list);
 	INIT_LIST_HEAD(&node->waiters_list);
 	INIT_LIST_HEAD(&node->link);
+}
+
+void i915_sched_node_reinit(struct i915_sched_node *node)
+{
 	node->attr.priority = I915_PRIORITY_INVALID;
 	node->semaphores = 0;
 	node->flags = 0;
@@ -481,6 +485,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
 			i915_dependency_free(dep);
 	}
+	INIT_LIST_HEAD(&node->signalers_list);
 
 	/* Remove ourselves from everyone who depends upon us */
 	list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
@@ -491,6 +496,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 		if (dep->flags & I915_DEPENDENCY_ALLOC)
 			i915_dependency_free(dep);
 	}
+	INIT_LIST_HEAD(&node->waiters_list);
 
 	spin_unlock_irq(&schedule_lock);
 }
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 07d243acf553..d1dc4efef77b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -26,6 +26,7 @@
 					 sched.link)
 
 void i915_sched_node_init(struct i915_sched_node *node);
+void i915_sched_node_reinit(struct i915_sched_node *node);
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 				      struct i915_sched_node *signal,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6a88db291252..eacc6c5ce0fd 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -227,6 +227,14 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
 	fence->flags = (unsigned long)fn;
 }
 
+void i915_sw_fence_reinit(struct i915_sw_fence *fence)
+{
+	debug_fence_init(fence);
+
+	atomic_set(&fence->pending, 1);
+	fence->error = 0;
+}
+
 void i915_sw_fence_commit(struct i915_sw_fence *fence)
 {
 	debug_fence_activate(fence);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index ab7d58bd0b9d..1e90d9a51bd2 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -54,6 +54,8 @@ do {								\
 	__i915_sw_fence_init((fence), (fn), NULL, NULL)
 #endif
 
+void i915_sw_fence_reinit(struct i915_sw_fence *fence);
+
 #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
 void i915_sw_fence_fini(struct i915_sw_fence *fence);
 #else
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-11-07  8:13 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-07  8:12 [PATCH 01/28] drm/i915: Leave the aliasing-ppgtt size alone Chris Wilson
2019-11-07  8:12 ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 02/28] drm: Move EXPORT_SYMBOL_FOR_TESTS_ONLY under a separate Kconfig Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 03/28] drm: Expose a method for creating anonymous struct file around drm_minor Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 04/28] drm/i915/selftests: Replace mock_file hackery with drm's true fake Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 05/28] drm/i915/selftests: Wrap vm_mmap() around GEM objects Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 06/28] drm/i915/selftests: Verify mmap_gtt revocation on unbinding Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 07/28] drm/i915/userptr: Try to acquire the page lock around set_page_dirty() Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 08/28] drm/i915/gem: Safely acquire the ctx->vm when copying Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 09/28] drm/i915/selftests: Exercise parallel blit operations on a single ctx Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 10/28] drm/i915/selftests: Perform some basic cycle counting of MI ops Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:39   ` Mika Kuoppala
2019-11-07  8:39     ` [Intel-gfx] " Mika Kuoppala
2019-11-07  8:47     ` Chris Wilson
2019-11-07  8:47       ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 11/28] drm/i915/selftests: Mock the engine sorting for easy validation Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` Chris Wilson [this message]
2019-11-07  8:12   ` [Intel-gfx] [PATCH 12/28] drm/i915: Use a ctor for TYPESAFE_BY_RCU i915_request Chris Wilson
2019-11-07  8:12 ` [PATCH 13/28] drm/i915: Drop GEM context as a direct link from i915_request Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 14/28] drm/i915: Push the use-semaphore marker onto the intel_context Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 15/28] drm/i915: Remove i915->kernel_context Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 16/28] drm/i915: Move i915_gem_init_contexts() earlier Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 17/28] drm/i915/gt: Defer engine registration until fully initialised Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07 12:22   ` Mika Kuoppala
2019-11-07 12:22     ` [Intel-gfx] " Mika Kuoppala
2019-11-07  8:12 ` [PATCH 18/28] drm/i915/gt: Pull GT initialisation under intel_gt_init() Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 19/28] drm/i915/gt: Merge engine init/setup loops Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 20/28] drm/i915/gt: Expose engine properties via sysfs Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 21/28] drm/i915/gt: Expose engine->mmio_base " Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 22/28] drm/i915/gt: Expose timeslice duration to sysfs Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 23/28] drm/i915/gt: Expose reset stop timeout via sysfs Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 24/28] drm/i915/gt: Expose preempt reset " Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 25/28] drm/i915/gt: Expose heartbeat interval " Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 26/28] drm/i915: Flush idle barriers when waiting Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 27/28] drm/i915: Allow userspace to specify ringsize on construction Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07  8:12 ` [PATCH 28/28] drm/i915/gem: Honour O_NONBLOCK before throttling execbuf submissions Chris Wilson
2019-11-07  8:12   ` [Intel-gfx] " Chris Wilson
2019-11-07 11:18 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/28] drm/i915: Leave the aliasing-ppgtt size alone Patchwork
2019-11-07 11:18   ` [Intel-gfx] " Patchwork
2019-11-07 11:30 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-11-07 11:30   ` [Intel-gfx] " Patchwork
2019-11-07 11:39 ` ✗ Fi.CI.BAT: failure " Patchwork
2019-11-07 11:39   ` [Intel-gfx] " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191107081252.10542-12-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.