All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-xe@lists.freedesktop.org>
Cc: Matthew Brost <matthew.brost@intel.com>
Subject: [PATCH v4 20/30] drm/xe: Update PT layer with better error handling
Date: Thu,  7 Mar 2024 21:07:56 -0800	[thread overview]
Message-ID: <20240308050806.577176-21-matthew.brost@intel.com> (raw)
In-Reply-To: <20240308050806.577176-1-matthew.brost@intel.com>

Update PT layer so if a memory allocation for a PTE fails the error can
be propagated to the user without requiring to be killed.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_pt.c       | 211 ++++++++++++++++++++++++-------
 drivers/gpu/drm/xe/xe_pt_types.h |   2 +
 2 files changed, 165 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 5a8523d0a049..6691efd13911 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -812,19 +812,27 @@ xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *t
 	}
 }
 
-static void xe_pt_abort_bind(struct xe_vma *vma,
-			     struct xe_vm_pgtable_update *entries,
-			     u32 num_entries)
+static void xe_pt_cancel_bind(struct xe_vma *vma,
+			      struct xe_vm_pgtable_update *entries,
+			      u32 num_entries)
 {
 	u32 i, j;
 
 	for (i = 0; i < num_entries; i++) {
-		if (!entries[i].pt_entries)
+		struct xe_pt *pt = entries[i].pt;
+
+		if (!pt)
 			continue;
 
-		for (j = 0; j < entries[i].qwords; j++)
-			xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL);
+		if (pt->level) {
+			for (j = 0; j < entries[i].qwords; j++)
+				xe_pt_destroy(entries[i].pt_entries[j].pt,
+					      xe_vma_vm(vma)->flags, NULL);
+		}
+
 		kfree(entries[i].pt_entries);
+		entries[i].pt_entries = NULL;
+		entries[i].qwords = 0;
 	}
 }
 
@@ -840,10 +848,61 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
 	xe_vm_assert_held(vm);
 }
 
-static void xe_pt_commit_bind(struct xe_vma *vma,
-			      struct xe_vm_pgtable_update *entries,
-			      u32 num_entries, bool rebind,
-			      struct llist_head *deferred)
+static void xe_pt_commit(struct xe_vma *vma,
+			 struct xe_vm_pgtable_update *entries,
+			 u32 num_entries, struct llist_head *deferred)
+{
+	u32 i, j;
+
+	xe_pt_commit_locks_assert(vma);
+
+	for (i = 0; i < num_entries; i++) {
+		struct xe_pt *pt = entries[i].pt;
+
+		if (!pt->level)
+			continue;
+
+		for (j = 0; j < entries[i].qwords; j++) {
+			struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+			xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+		}
+	}
+}
+
+static void xe_pt_abort_bind(struct xe_vma *vma,
+			     struct xe_vm_pgtable_update *entries,
+			     u32 num_entries, bool rebind)
+{
+	int i, j;
+
+	xe_pt_commit_locks_assert(vma);
+
+	for (i = num_entries - 1; i >= 0; --i) {
+		struct xe_pt *pt = entries[i].pt;
+		struct xe_pt_dir *pt_dir;
+
+		if (!rebind)
+			pt->num_live -= entries[i].qwords;
+
+		if (!pt->level)
+			continue;
+
+		pt_dir = as_xe_pt_dir(pt);
+		for (j = 0; j < entries[i].qwords; j++) {
+			u32 j_ = j + entries[i].ofs;
+			struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+			struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+			pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+			xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
+		}
+	}
+}
+
+static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
+				      struct xe_vm_pgtable_update *entries,
+				      u32 num_entries, bool rebind)
 {
 	u32 i, j;
 
@@ -863,12 +922,13 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
 		for (j = 0; j < entries[i].qwords; j++) {
 			u32 j_ = j + entries[i].ofs;
 			struct xe_pt *newpte = entries[i].pt_entries[j].pt;
+			struct xe_pt *oldpte = NULL;
 
 			if (xe_pt_entry(pt_dir, j_))
-				xe_pt_destroy(xe_pt_entry(pt_dir, j_),
-					      xe_vma_vm(vma)->flags, deferred);
+				oldpte = xe_pt_entry(pt_dir, j_);
 
 			pt_dir->children[j_] = &newpte->base;
+			entries[i].pt_entries[j].pt = oldpte;
 		}
 	}
 }
@@ -892,8 +952,6 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
 	err = xe_pt_stage_bind(tile, vma, entries, num_entries);
 	if (!err)
 		xe_tile_assert(tile, *num_entries);
-	else /* abort! */
-		xe_pt_abort_bind(vma, entries, *num_entries);
 
 	return err;
 }
@@ -1383,7 +1441,7 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
 				     &end_offset))
 		return 0;
 
-	(void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false);
+	(void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true);
 	xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
 
 	return 0;
@@ -1451,32 +1509,57 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
 		memset64(ptr, empty, num_qwords);
 }
 
+static void xe_pt_abort_unbind(struct xe_vma *vma,
+			       struct xe_vm_pgtable_update *entries,
+			       u32 num_entries)
+{
+	int j, i;
+
+	xe_pt_commit_locks_assert(vma);
+
+	for (j = num_entries - 1; j >= 0; --j) {
+		struct xe_vm_pgtable_update *entry = &entries[j];
+		struct xe_pt *pt = entry->pt;
+		struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
+
+		pt->num_live += entry->qwords;
+
+		if (!pt->level)
+			continue;
+
+		for (i = entry->ofs; i < entry->ofs + entry->qwords; i++)
+			pt_dir->children[i] =
+				entries[j].pt_entries[i - entry->ofs].pt ?
+				&entries[j].pt_entries[i - entry->ofs].pt->base : 0;
+	}
+}
+
 static void
-xe_pt_commit_unbind(struct xe_vma *vma,
-		    struct xe_vm_pgtable_update *entries, u32 num_entries,
-		    struct llist_head *deferred)
+xe_pt_commit_prepare_unbind(struct xe_vma *vma,
+			    struct xe_vm_pgtable_update *entries,
+			    u32 num_entries)
 {
-	u32 j;
+	int j, i;
 
 	xe_pt_commit_locks_assert(vma);
 
 	for (j = 0; j < num_entries; ++j) {
 		struct xe_vm_pgtable_update *entry = &entries[j];
 		struct xe_pt *pt = entry->pt;
+		struct xe_pt_dir *pt_dir;
 
 		pt->num_live -= entry->qwords;
-		if (pt->level) {
-			struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
-			u32 i;
-
-			for (i = entry->ofs; i < entry->ofs + entry->qwords;
-			     i++) {
-				if (xe_pt_entry(pt_dir, i))
-					xe_pt_destroy(xe_pt_entry(pt_dir, i),
-						      xe_vma_vm(vma)->flags, deferred);
+		if (!pt->level)
+			continue;
 
-				pt_dir->children[i] = NULL;
-			}
+		pt_dir = as_xe_pt_dir(pt);
+		for (i = entry->ofs; i < entry->ofs + entry->qwords; i++) {
+			if (xe_pt_entry(pt_dir, i))
+				entries[j].pt_entries[i - entry->ofs].pt =
+					xe_pt_entry(pt_dir, i);
+			else
+				entries[j].pt_entries[i - entry->ofs].pt = NULL;
+			pt_dir->children[i] = NULL;
 		}
 	}
 }
@@ -1513,7 +1596,6 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
 {
 	u32 current_op = pt_update_ops->current_op;
 	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
-	struct llist_head *deferred = &pt_update_ops->deferred;
 	int err;
 
 	xe_bo_assert_held(xe_vma_bo(vma));
@@ -1522,6 +1604,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
 	       "Preparing bind, with range [%llx...%llx)\n",
 	       xe_vma_start(vma), xe_vma_end(vma) - 1);
 
+	pt_op->vma = NULL;
 	pt_op->bind = true;
 	pt_op->rebind = BIT(tile->id) & vma->tile_present;
 
@@ -1555,9 +1638,11 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
 			(!pt_op->rebind && vm->scratch_pt[tile->id] &&
 			 xe_vm_in_preempt_fence_mode(vm));
 
-		/* FIXME: Don't commit right away */
-		xe_pt_commit_bind(vma, pt_op->entries, pt_op->num_entries,
-				  pt_op->rebind, deferred);
+		pt_op->vma = vma;
+		xe_pt_commit_prepare_bind(vma, pt_op->entries,
+					  pt_op->num_entries, pt_op->rebind);
+	} else {
+		xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
 	}
 
 	return err;
@@ -1569,7 +1654,6 @@ static int unbind_op_prepare(struct xe_tile *tile,
 {
 	u32 current_op = pt_update_ops->current_op;
 	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
-	struct llist_head *deferred = &pt_update_ops->deferred;
 
 	xe_bo_assert_held(xe_vma_bo(vma));
 
@@ -1577,6 +1661,7 @@ static int unbind_op_prepare(struct xe_tile *tile,
 	       "Preparing unbind, with range [%llx...%llx)\n",
 	       xe_vma_start(vma), xe_vma_end(vma) - 1);
 
+	pt_op->vma = vma;
 	pt_op->bind = false;
 	pt_op->rebind = false;
 
@@ -1589,9 +1674,7 @@ static int unbind_op_prepare(struct xe_tile *tile,
 	pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
 	pt_update_ops->needs_invalidation = true;
 
-	/* FIXME: Don't commit right away */
-	xe_pt_commit_unbind(vma, pt_op->entries, pt_op->num_entries,
-			    deferred);
+	xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
 
 	return 0;
 }
@@ -1806,7 +1889,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 	struct invalidation_fence *ifence = NULL;
 	struct xe_range_fence *rfence;
 	struct xe_vma_op *op;
-	int err = 0;
+	int err = 0, i;
 	struct xe_migrate_pt_update update = {
 		.ops = pt_update_ops->needs_userptr_lock ?
 			&userptr_migrate_ops :
@@ -1820,8 +1903,10 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 
 	if (pt_update_ops->needs_invalidation) {
 		ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
-		if (!ifence)
-			return ERR_PTR(-ENOMEM);
+		if (!ifence) {
+			err = -ENOMEM;
+			goto kill_vm_tile1;
+		}
 	}
 
 	rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
@@ -1830,10 +1915,19 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 		goto free_ifence;
 	}
 
+	/* FIXME: Point of no return - VM killed if failure after this */
+	for (i = 0; i < pt_update_ops->num_ops; ++i) {
+		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+
+		xe_pt_commit(pt_op->vma, pt_op->entries,
+			     pt_op->num_entries, &pt_update_ops->deferred);
+		pt_op->vma = NULL;	/* skip in xe_pt_update_ops_abort */
+	}
+
 	fence = xe_migrate_update_pgtables(tile->migrate, &update);
 	if (IS_ERR(fence)) {
 		err = PTR_ERR(fence);
-		goto free_rfence;
+		goto kill_vm_tile0;
 	}
 
 	err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
@@ -1871,10 +1965,15 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
 	if (pt_update_ops->needs_userptr_lock)
 		up_read(&vm->userptr.notifier_lock);
 	dma_fence_put(fence);
-free_rfence:
+kill_vm_tile0:
+	if (!tile->id)
+		xe_vm_kill(vops->vm, false);
 	kfree(rfence);
 free_ifence:
 	kfree(ifence);
+kill_vm_tile1:
+	if (tile->id)
+		xe_vm_kill(vops->vm, false);
 
 	return ERR_PTR(err);
 }
@@ -1895,12 +1994,10 @@ void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
 	lockdep_assert_held(&vops->vm->lock);
 	xe_vm_assert_held(vops->vm);
 
-	/* FIXME: Not 100% correct */
 	for (i = 0; i < pt_update_ops->num_ops; ++i) {
 		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
 
-		if (pt_op->bind)
-			xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
+		xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
 	}
 	xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
 }
@@ -1914,10 +2011,28 @@ void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
  */
 void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
 {
+	struct xe_vm_pgtable_update_ops *pt_update_ops =
+		&vops->pt_update_ops[tile->id];
+	int i;
+
 	lockdep_assert_held(&vops->vm->lock);
 	xe_vm_assert_held(vops->vm);
 
-	/* FIXME: Just kill VM for now + cleanup PTs */
+	for (i = pt_update_ops->num_ops - 1; i >= 0; --i) {
+		struct xe_vm_pgtable_update_op *pt_op =
+			&pt_update_ops->ops[i];
+
+		if (!pt_op->vma || i >= pt_update_ops->current_op)
+			continue;
+
+		if (pt_op->bind)
+			xe_pt_abort_bind(pt_op->vma, pt_op->entries,
+					 pt_op->num_entries,
+					 pt_op->rebind);
+		else
+			xe_pt_abort_unbind(pt_op->vma, pt_op->entries,
+					   pt_op->num_entries);
+	}
+
 	xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
-	xe_vm_kill(vops->vm, false);
 }
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 16252f1be055..384cc04de719 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -78,6 +78,8 @@ struct xe_vm_pgtable_update {
 struct xe_vm_pgtable_update_op {
 	/** @entries: entries to update for this operation */
 	struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
+	/** @vma: VMA for operation, operation not valid if NULL */
+	struct xe_vma *vma;
 	/** @num_entries: number of entries for this update operation */
 	u32 num_entries;
 	/** @bind: is a bind */
-- 
2.34.1


  parent reply	other threads:[~2024-03-08  5:08 UTC|newest]

Thread overview: 76+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-08  5:07 [PATCH v4 00/30] Refactor VM bind code Matthew Brost
2024-03-08  5:07 ` [PATCH v4 01/30] drm/xe: Lock all gpuva ops during VM bind IOCTL Matthew Brost
2024-03-10 17:44   ` Zeng, Oak
2024-03-11 19:48     ` Matthew Brost
2024-03-11 22:02       ` Zeng, Oak
2024-03-12  1:29         ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 02/30] drm/xe: Add ops_execute function which returns a fence Matthew Brost
2024-03-22 16:11   ` Zeng, Oak
2024-03-22 17:31     ` Matthew Brost
2024-03-22 19:39       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 03/30] drm/xe: Move migrate to prefetch to op_lock function Matthew Brost
2024-03-22 17:06   ` Zeng, Oak
2024-03-22 17:36     ` Matthew Brost
2024-03-22 19:45       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 04/30] drm/xe: Add struct xe_vma_ops abstraction Matthew Brost
2024-03-22 17:13   ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 05/30] drm/xe: Update xe_vm_rebind to use dummy VMA operations Matthew Brost
2024-03-22 21:23   ` Zeng, Oak
2024-03-22 22:51     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 06/30] drm/xe: Simplify VM bind IOCTL error handling and cleanup Matthew Brost
2024-03-25 16:03   ` Zeng, Oak
2024-03-26 18:46     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 07/30] drm/xe: Update pagefaults to use dummy VMA operations Matthew Brost
2024-03-08  5:07 ` [PATCH v4 08/30] drm/xe: s/xe_tile_migrate_engine/xe_tile_migrate_exec_queue Matthew Brost
2024-03-25 16:05   ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 09/30] drm/xe: Add some members to xe_vma_ops Matthew Brost
2024-03-25 16:10   ` Zeng, Oak
2024-03-26 18:47     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 10/30] drm/xe: Add vm_bind_ioctl_ops_install_fences helper Matthew Brost
2024-03-25 16:51   ` Zeng, Oak
2024-03-25 19:34     ` Matthew Brost
2024-03-25 19:44       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 11/30] drm/xe: Move setting last fence to vm_bind_ioctl_ops_install_fences Matthew Brost
2024-03-25 17:02   ` Zeng, Oak
2024-03-25 19:35     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 12/30] drm/xe: Move ufence check to op_lock Matthew Brost
2024-03-25 20:37   ` Zeng, Oak
2024-03-26 18:49     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 13/30] drm/xe: Move ufence add to vm_bind_ioctl_ops_install_fences Matthew Brost
2024-03-25 20:54   ` Zeng, Oak
2024-03-26 18:54     ` Matthew Brost
2024-03-26 20:59       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 14/30] drm/xe: Add xe_gt_tlb_invalidation_range and convert PT layer to use this Matthew Brost
2024-03-25 21:35   ` Zeng, Oak
2024-03-26 18:57     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 15/30] drm/xe: Add xe_vm_pgtable_update_op to xe_vma_ops Matthew Brost
2024-03-25 21:58   ` Zeng, Oak
2024-03-26 19:05     ` Matthew Brost
2024-03-27  1:29       ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 16/30] drm/xe: Use ordered WQ for TLB invalidation fences Matthew Brost
2024-03-25 22:30   ` Zeng, Oak
2024-03-26 19:10     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 17/30] drm/xe: Delete PT update selftest Matthew Brost
2024-03-25 22:31   ` Zeng, Oak
2024-03-08  5:07 ` [PATCH v4 18/30] drm/xe: Convert multiple bind ops into single job Matthew Brost
2024-03-27  2:40   ` Zeng, Oak
2024-03-27 19:26     ` Matthew Brost
2024-03-08  5:07 ` [PATCH v4 19/30] drm/xe: Remove old functions defs in xe_pt.h Matthew Brost
2024-03-08  5:07 ` Matthew Brost [this message]
2024-03-08  5:07 ` [PATCH v4 21/30] drm/xe: Update xe_vm_rebind to return int Matthew Brost
2024-03-08  5:07 ` [PATCH v4 22/30] drm/xe: Move vma rebinding to the drm_exec locking loop Matthew Brost
2024-03-08  5:07 ` [PATCH v4 23/30] drm/xe: Update VM trace events Matthew Brost
2024-03-08  5:08 ` [PATCH v4 24/30] drm/xe: Update clear / populate arguments Matthew Brost
2024-03-08  5:08 ` [PATCH v4 25/30] drm/xe: Add __xe_migrate_update_pgtables_cpu helper Matthew Brost
2024-03-08  5:08 ` [PATCH v4 26/30] drm/xe: CPU binds for jobs Matthew Brost
2024-03-08  5:08 ` [PATCH v4 27/30] drm/xe: Don't use migrate exec queue for page fault binds Matthew Brost
2024-03-08  5:08 ` [PATCH v4 28/30] drm/xe: Add VM bind IOCTL error injection Matthew Brost
2024-03-08  5:08 ` [PATCH v4 29/30] drm/xe/guc: Assert time'd out jobs are not from a VM exec queue Matthew Brost
2024-03-08  5:08 ` [PATCH v4 30/30] drm/xe: Add PT exec queues Matthew Brost
2024-03-08  5:42 ` ✓ CI.Patch_applied: success for Refactor VM bind code (rev5) Patchwork
2024-03-08  5:43 ` ✗ CI.checkpatch: warning " Patchwork
2024-03-08  5:44 ` ✓ CI.KUnit: success " Patchwork
2024-03-08  5:55 ` ✓ CI.Build: " Patchwork
2024-03-08  5:55 ` ✗ CI.Hooks: failure " Patchwork
2024-03-08  5:56 ` ✓ CI.checksparse: success " Patchwork
2024-03-08  6:26 ` ✗ CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240308050806.577176-21-matthew.brost@intel.com \
    --to=matthew.brost@intel.com \
    --cc=intel-xe@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.