All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-gfx@lists.freedesktop.org
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Subject: [Intel-gfx] [RFC 52/60] drm/i915/guc: put all guc objects in lmem when available
Date: Fri, 10 Jul 2020 12:57:49 +0100	[thread overview]
Message-ID: <20200710115757.290984-53-matthew.auld@intel.com> (raw)
In-Reply-To: <20200710115757.290984-1-matthew.auld@intel.com>

From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>

The firmware binary has to be loaded from lmem and the recommendation is
to put all other objects in there as well. Note that we don't fall back
to system memory if the allocation in lmem fails because all objects are
allocated during driver load and if we have issues with lmem at that point
something is seriously wrong with the system, so no point in trying to
handle it.

Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> #v1
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c  | 41 +++++++++++++++++++++++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h  |  8 +++++
 drivers/gpu/drm/i915/gt/uc/intel_guc.c    |  9 ++++-
 drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 11 ++++--
 drivers/gpu/drm/i915/gt/uc/intel_huc.c    | 14 ++++++--
 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c  | 35 ++++++++++++++++---
 6 files changed, 107 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index c719f6b5e785..423662cec5da 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -215,6 +215,21 @@ i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
 	return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
 }
 
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+			    unsigned long n,
+			    unsigned long size)
+{
+	resource_size_t offset;
+
+	GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+	offset -= obj->mm.region->region.start;
+
+	return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 {
 	struct intel_memory_region *region = obj->mm.region;
@@ -229,6 +244,32 @@ bool i915_gem_object_is_devmem(struct drm_i915_gem_object *obj)
 	return region && region->is_devmem;
 }
 
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+				      const void *data, size_t size)
+{
+	struct drm_i915_gem_object *obj;
+	void *map;
+
+	obj = i915_gem_object_create_lmem(i915,
+					  round_up(size, PAGE_SIZE),
+					  I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj))
+		return obj;
+
+	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(map)) {
+		i915_gem_object_put(obj);
+		return map;
+	}
+
+	memcpy(map, data, size);
+
+	i915_gem_object_unpin_map(obj);
+
+	return obj;
+}
+
 struct drm_i915_gem_object *
 i915_gem_object_create_lmem(struct drm_i915_private *i915,
 			    resource_size_t size,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index a1b6a10050bf..e11e0545e39c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -14,6 +14,10 @@ struct intel_memory_region;
 
 extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
 
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+			    unsigned long n,
+			    unsigned long size);
 void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
 					       unsigned long n);
 void __iomem *
@@ -23,6 +27,10 @@ i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 bool i915_gem_object_is_devmem(struct drm_i915_gem_object *obj);
 
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+				      const void *data, size_t size);
+
 struct drm_i915_gem_object *
 i915_gem_object_create_lmem(struct drm_i915_private *i915,
 			    resource_size_t size,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 861657897c0f..9a43cfbfc26d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -3,6 +3,7 @@
  * Copyright © 2014-2019 Intel Corporation
  */
 
+#include "gem/i915_gem_lmem.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_irq.h"
 #include "gt/intel_gt_pm_irq.h"
@@ -668,7 +669,13 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
 	u64 flags;
 	int ret;
 
-	obj = i915_gem_object_create_shmem(gt->i915, size);
+	if (HAS_LMEM(gt->i915))
+		obj = i915_gem_object_create_lmem(gt->i915, size,
+						  I915_BO_ALLOC_CPU_CLEAR |
+						  I915_BO_ALLOC_CONTIGUOUS);
+	else
+		obj = i915_gem_object_create_shmem(gt->i915, size);
+
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index d4a87f4c9421..93a1bdb3d81b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -41,7 +41,7 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
 }
 
 /* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
 			 struct intel_uncore *uncore)
 {
 	u32 rsa[UOS_RSA_SCRATCH_COUNT];
@@ -49,10 +49,13 @@ static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
 	int i;
 
 	copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
-	GEM_BUG_ON(copied < sizeof(rsa));
+	if (copied < sizeof(rsa))
+		return -ENOMEM;
 
 	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
 		intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
+
+	return 0;
 }
 
 /*
@@ -129,7 +132,9 @@ int intel_guc_fw_upload(struct intel_guc *guc)
 	 * by the DMA engine in one operation, whereas the RSA signature is
 	 * loaded via MMIO.
 	 */
-	guc_xfer_rsa(&guc->fw, uncore);
+	ret = guc_xfer_rsa(&guc->fw, uncore);
+	if (ret)
+		goto out;
 
 	/*
 	 * Current uCode expects the code to be loaded at 8k; locations below
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 65eeb44b397d..359a0f947672 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -85,17 +85,25 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
 	vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		i915_vma_unpin_and_release(&vma, 0);
-		return PTR_ERR(vaddr);
+		err = PTR_ERR(vaddr);
+		goto unpin_out;
 	}
 
 	copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
-	GEM_BUG_ON(copied < huc->fw.rsa_size);
-
 	i915_gem_object_unpin_map(vma->obj);
 
+	if (copied < huc->fw.rsa_size) {
+		err = -ENOMEM;
+		goto unpin_out;
+	}
+
 	huc->rsa_data = vma;
 
 	return 0;
+
+unpin_out:
+	i915_vma_unpin_and_release(&vma, 0);
+	return err;
 }
 
 static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 59b27aba15c6..2a49498fbdcb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -7,6 +7,7 @@
 #include <linux/firmware.h>
 #include <drm/drm_print.h>
 
+#include "gem/i915_gem_lmem.h"
 #include "intel_uc_fw.h"
 #include "intel_uc_fw_abi.h"
 #include "i915_drv.h"
@@ -371,7 +372,11 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
 		}
 	}
 
-	obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+	if (HAS_LMEM(i915))
+		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
+	else
+		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+
 	if (IS_ERR(obj)) {
 		err = PTR_ERR(obj);
 		goto fail;
@@ -420,14 +425,19 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
 		.pages = obj->mm.pages,
 		.vm = &ggtt->vm,
 	};
+	u32 pte_flags = 0;
 
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 	GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
 
 	/* uc_fw->obj cache domains were not controlled across suspend */
-	drm_clflush_sg(dummy.pages);
+	if (i915_gem_object_has_struct_page(obj))
+		drm_clflush_sg(dummy.pages);
+
+	if (i915_gem_object_is_lmem(obj))
+		pte_flags |= PTE_LM;
 
-	ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
+	ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, pte_flags);
 }
 
 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
@@ -592,7 +602,24 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
 
 	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
 
-	return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset);
+	if (i915_gem_object_is_lmem(uc_fw->obj)) {
+		unsigned long page_idx = offset >> PAGE_SHIFT;
+		unsigned int page_off = offset_in_page(offset);
+		void __iomem *vaddr;
+
+		vaddr = i915_gem_object_lmem_io_map(uc_fw->obj,
+						    page_idx,
+						    page_off + size);
+		if (!vaddr)
+			return 0;
+
+		memcpy(dst, vaddr + page_off, size);
+		io_mapping_unmap(vaddr);
+		return size;
+	} else {
+		return sg_pcopy_to_buffer(pages->sgl, pages->nents,
+					  dst, size, offset);
+	}
 }
 
 /**
-- 
2.26.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-07-10 12:01 UTC|newest]

Thread overview: 87+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-10 11:56 [Intel-gfx] [RFC 00/60] DG1 LMEM enabling Matthew Auld
2020-07-10 11:56 ` [Intel-gfx] [RFC 01/60] drm/i915: Add has_master_unit_irq flag Matthew Auld
2020-07-10 11:56 ` [Intel-gfx] [RFC 02/60] drm/i915/dg1: add initial DG-1 definitions Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 03/60] drm/i915/dg1: Add DG1 PCI IDs Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 04/60] drm/i915/dg1: add support for the master unit interrupt Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 05/60] drm/i915/dg1: Remove SHPD_FILTER_CNT register programming Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 06/60] drm/i915/dg1: Add fake PCH Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 07/60] drm/i915/dg1: Initialize RAWCLK properly Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 08/60] drm/i915/dg1: Define MOCS table for DG1 Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 09/60] drm/i915/dg1: Add DG1 power wells Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 10/60] drm/i915/dg1: Increase mmio size to 4MB Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 11/60] drm/i915/dg1: Wait for pcode/uncore handshake at startup Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 12/60] drm/i915/dg1: Add DPLL macros for DG1 Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 13/60] drm/i915/dg1: Add and setup DPLLs " Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 14/60] drm/i915/dg1: Enable DPLL " Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 15/60] drm/i915/dg1: add hpd interrupt handling Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 16/60] drm/i915/dg1: invert HPD pins Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 17/60] drm/i915/dg1: gmbus pin mapping Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 18/60] drm/i915/dg1: Enable first 2 ports for DG1 Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 19/60] drm/i915/dg1: Don't program PHY_MISC for PHY-C and PHY-D Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 20/60] drm/i915/dg1: Update comp master/slave relationships for PHYs Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 21/60] drm/i915/dg1: Update voltage swing tables for DP Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 22/60] drm/i915/dg1: provide port/phy mapping for vbt Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 23/60] drm/i915/dg1: map/unmap pll clocks Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 24/60] drm/i915/dg1: enable PORT C/D aka D/E Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 25/60] drm/i915/dg1: Load DMC Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 26/60] drm/i915/rkl: Add initial workarounds Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 27/60] drm/i915/dg1: Add initial DG1 workarounds Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 28/60] drm/i915/dg1: DG1 does not support DC6 Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 29/60] drm/i915/lmem: Limit block size to 4G Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 30/60] drm/i915/lmem: Do not check r->sgt.pfn for NULL Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 31/60] drm/i915/dgfx: define llc and snooping behaviour Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 32/60] drm/i915/lmem: support pread Matthew Auld
2020-07-10 12:39   ` Tvrtko Ursulin
2020-07-10 13:04     ` Chris Wilson
2020-07-10 11:57 ` [Intel-gfx] [RFC 33/60] drm/i915/lmem: support pwrite Matthew Auld
2020-07-13  5:09   ` Dave Airlie
2020-07-14 14:35     ` Matthew Auld
2020-07-16  0:43       ` Dave Airlie
2020-07-16 10:11         ` Matthew Auld
2020-07-19 21:52           ` Dave Airlie
2020-08-07  9:32             ` Joonas Lahtinen
2020-08-07  9:46     ` Joonas Lahtinen
2020-08-09 21:06       ` Dave Airlie
2020-07-10 11:57 ` [Intel-gfx] [RFC 34/60] drm/i915: introduce kernel blitter_context Matthew Auld
2020-08-03 19:59   ` Lucas De Marchi
2020-08-03 20:17     ` Lucas De Marchi
2020-07-10 11:57 ` [Intel-gfx] [RFC 35/60] drm/i915/query: Expose memory regions through the query uAPI Matthew Auld
2020-07-10 16:20   ` Tvrtko Ursulin
2020-07-10 11:57 ` [Intel-gfx] [RFC 36/60] drm/i915/uapi: introduce drm_i915_gem_create_ext Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 37/60] drm/i915/lmem: allocate cmd ring in lmem Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 38/60] drm/i915/dg1: Introduce dmabuf mmap to LMEM Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 39/60] drm/i915: setup the LMEM region Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 40/60] drm/i915: drop fake LMEM Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 41/60] drm/i915: Distinction of memory regions Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 42/60] drm/i915: PPGTT support Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 43/60] drm/i915: support GGTT LMEM entries Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 44/60] drm/i915: allocate context from LMEM Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 45/60] drm/i915: move engine scratch to LMEM Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 46/60] drm/i915: Provide a way to disable PCIe relaxed write ordering Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 47/60] drm/i915: setup GPU device lmem region Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 48/60] drm/i915: Fix object page offset within a region Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 49/60] drm/i915: add i915_gem_object_is_devmem() function Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 50/60] drm/i915: finish memory region support for stolen objects Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 51/60] drm/i915/lmem: support optional CPU clearing for special internal use Matthew Auld
2020-07-10 11:57 ` Matthew Auld [this message]
2020-07-10 11:57 ` [Intel-gfx] [RFC 53/60] drm/i915: Create stolen memory region from local memory Matthew Auld
2020-07-13  4:48   ` Dave Airlie
2020-07-14 15:01     ` Matthew Auld
2020-07-14 16:57       ` Tang, CQ
2020-07-14 19:26         ` Dave Airlie
2020-08-07  9:38           ` Joonas Lahtinen
2020-08-07 16:24             ` Tang, CQ
2020-07-14 17:39       ` Ville Syrjälä
2020-07-10 11:57 ` [Intel-gfx] [RFC 54/60] drm/i915/lmem: Bypass aperture when lmem is available Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 55/60] drm/i915/lmem: reset the lmem buffer created by fbdev Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 56/60] drm/i915/dsb: Enable lmem for dsb Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 57/60] drm/i915: Reintroduce mem->reserved Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 58/60] drm/i915/dg1: Reserve first 1MB of local memory Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 59/60] drm/i915: defer pd lmem block put to worker Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 60/60] drm/i915/lmem: allocate HWSP in lmem Matthew Auld
2020-07-10 12:51 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for DG1 LMEM enabling Patchwork
2020-07-10 12:52 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-10 13:12 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-07-10 14:50 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2020-08-11  3:23 ` [Intel-gfx] [RFC 00/60] " Dave Airlie
2020-08-11 10:48   ` Matthew Auld

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200710115757.290984-53-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=abdiel.janulgue@linux.intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.