All of lore.kernel.org
 help / color / mirror / Atom feed
From: Felix Kuehling <Felix.Kuehling@amd.com>
To: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: alex.sierra@amd.com, Philip Yang <Philip.Yang@amd.com>
Subject: [PATCH 34/35] drm/amdkfd: add svm range validate timestamp
Date: Wed,  6 Jan 2021 22:01:26 -0500	[thread overview]
Message-ID: <20210107030127.20393-35-Felix.Kuehling@amd.com> (raw)
In-Reply-To: <20210107030127.20393-1-Felix.Kuehling@amd.com>

From: Philip Yang <Philip.Yang@amd.com>

With xnack on, add validate timestamp in order to handle GPU vm fault
from multiple GPUs.

If GPU retry fault need migrate the range to the best restore location,
use range validate timestamp to record system timestamp after range is
restored to update GPU page table.

Because multiple pages of same range have multiple retry fault, define
AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING to the long time period that
pending retry fault may still comes after page table update, to skip
duplicate retry fault of same range.

If difference between system timestamp and range last validate timestamp
is bigger than AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING, that means the
retry fault is from another GPU, then continue to handle retry fault
recover.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 27 +++++++++++++++++++++++----
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h |  2 ++
 2 files changed, 25 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 8b57f5a471bd..65f20a72ddcb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -34,6 +34,11 @@
 
 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
 
+/* Long enough to ensure no retry fault comes after svm range is restored and
+ * page table is updated.
+ */
+#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	2000
+
 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
 /**
  * svm_range_unlink - unlink svm_range from lists and interval tree
@@ -122,6 +127,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
 	INIT_LIST_HEAD(&prange->remove_list);
 	INIT_LIST_HEAD(&prange->svm_bo_list);
 	atomic_set(&prange->invalid, 0);
+	prange->validate_timestamp = ktime_to_us(ktime_get());
 	mutex_init(&prange->mutex);
 	spin_lock_init(&prange->svm_bo_lock);
 	svm_range_set_default_attributes(&prange->preferred_loc,
@@ -482,20 +488,28 @@ static int svm_range_validate_vram(struct svm_range *prange)
 static int
 svm_range_validate(struct mm_struct *mm, struct svm_range *prange)
 {
+	struct kfd_process *p;
 	int r;
 
 	pr_debug("svms 0x%p [0x%lx 0x%lx] actual loc 0x%x\n", prange->svms,
 		 prange->it_node.start, prange->it_node.last,
 		 prange->actual_loc);
 
+	p = container_of(prange->svms, struct kfd_process, svms);
+
 	if (!prange->actual_loc)
 		r = svm_range_validate_ram(mm, prange);
 	else
 		r = svm_range_validate_vram(prange);
 
-	pr_debug("svms 0x%p [0x%lx 0x%lx] ret %d invalid %d\n", prange->svms,
-		 prange->it_node.start, prange->it_node.last,
-		 r, atomic_read(&prange->invalid));
+	if (!r) {
+		if (p->xnack_enabled)
+			atomic_set(&prange->invalid, 0);
+		prange->validate_timestamp = ktime_to_us(ktime_get());
+	}
+
+	pr_debug("svms 0x%p [0x%lx 0x%lx] ret %d\n", prange->svms,
+		 prange->it_node.start, prange->it_node.last, r);
 
 	return r;
 }
@@ -1766,6 +1780,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	struct svm_range_list *svms;
 	struct svm_range *prange;
 	struct kfd_process *p;
+	uint64_t timestamp;
 	int32_t best_loc;
 	int srcu_idx;
 	int r = 0;
@@ -1790,7 +1805,11 @@ svm_range_restore_pages(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		goto out_srcu_unlock;
 	}
 
-	if (!atomic_read(&prange->invalid)) {
+	mutex_lock(&prange->mutex);
+	timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
+	mutex_unlock(&prange->mutex);
+	/* skip duplicate vm fault on different pages of same range */
+	if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
 		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
 			 svms, prange->it_node.start, prange->it_node.last);
 		goto out_srcu_unlock;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 0685eb04b87c..466ec5537bbb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -66,6 +66,7 @@ struct svm_range_bo {
  * @actual_loc: the actual location, 0 for CPU, or GPU id
  * @granularity:migration granularity, log2 num pages
  * @invalid:    not 0 means cpu page table is invalidated
+ * @validate_timestamp: system timestamp when range is validated
  * @bitmap_access: index bitmap of GPUs which can access the range
  * @bitmap_aip: index bitmap of GPUs which can access the range in place
  *
@@ -95,6 +96,7 @@ struct svm_range {
 	uint32_t			actual_loc;
 	uint8_t				granularity;
 	atomic_t			invalid;
+	uint64_t			validate_timestamp;
 	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
 	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
 };
-- 
2.29.2

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

WARNING: multiple messages have this Message-ID (diff)
From: Felix Kuehling <Felix.Kuehling@amd.com>
To: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: alex.sierra@amd.com, Philip Yang <Philip.Yang@amd.com>
Subject: [PATCH 34/35] drm/amdkfd: add svm range validate timestamp
Date: Wed,  6 Jan 2021 22:01:26 -0500	[thread overview]
Message-ID: <20210107030127.20393-35-Felix.Kuehling@amd.com> (raw)
In-Reply-To: <20210107030127.20393-1-Felix.Kuehling@amd.com>

From: Philip Yang <Philip.Yang@amd.com>

With xnack on, add validate timestamp in order to handle GPU vm fault
from multiple GPUs.

If GPU retry fault need migrate the range to the best restore location,
use range validate timestamp to record system timestamp after range is
restored to update GPU page table.

Because multiple pages of same range have multiple retry fault, define
AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING to the long time period that
pending retry fault may still comes after page table update, to skip
duplicate retry fault of same range.

If difference between system timestamp and range last validate timestamp
is bigger than AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING, that means the
retry fault is from another GPU, then continue to handle retry fault
recover.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 27 +++++++++++++++++++++++----
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h |  2 ++
 2 files changed, 25 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 8b57f5a471bd..65f20a72ddcb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -34,6 +34,11 @@
 
 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
 
+/* Long enough to ensure no retry fault comes after svm range is restored and
+ * page table is updated.
+ */
+#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING	2000
+
 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
 /**
  * svm_range_unlink - unlink svm_range from lists and interval tree
@@ -122,6 +127,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
 	INIT_LIST_HEAD(&prange->remove_list);
 	INIT_LIST_HEAD(&prange->svm_bo_list);
 	atomic_set(&prange->invalid, 0);
+	prange->validate_timestamp = ktime_to_us(ktime_get());
 	mutex_init(&prange->mutex);
 	spin_lock_init(&prange->svm_bo_lock);
 	svm_range_set_default_attributes(&prange->preferred_loc,
@@ -482,20 +488,28 @@ static int svm_range_validate_vram(struct svm_range *prange)
 static int
 svm_range_validate(struct mm_struct *mm, struct svm_range *prange)
 {
+	struct kfd_process *p;
 	int r;
 
 	pr_debug("svms 0x%p [0x%lx 0x%lx] actual loc 0x%x\n", prange->svms,
 		 prange->it_node.start, prange->it_node.last,
 		 prange->actual_loc);
 
+	p = container_of(prange->svms, struct kfd_process, svms);
+
 	if (!prange->actual_loc)
 		r = svm_range_validate_ram(mm, prange);
 	else
 		r = svm_range_validate_vram(prange);
 
-	pr_debug("svms 0x%p [0x%lx 0x%lx] ret %d invalid %d\n", prange->svms,
-		 prange->it_node.start, prange->it_node.last,
-		 r, atomic_read(&prange->invalid));
+	if (!r) {
+		if (p->xnack_enabled)
+			atomic_set(&prange->invalid, 0);
+		prange->validate_timestamp = ktime_to_us(ktime_get());
+	}
+
+	pr_debug("svms 0x%p [0x%lx 0x%lx] ret %d\n", prange->svms,
+		 prange->it_node.start, prange->it_node.last, r);
 
 	return r;
 }
@@ -1766,6 +1780,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	struct svm_range_list *svms;
 	struct svm_range *prange;
 	struct kfd_process *p;
+	uint64_t timestamp;
 	int32_t best_loc;
 	int srcu_idx;
 	int r = 0;
@@ -1790,7 +1805,11 @@ svm_range_restore_pages(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		goto out_srcu_unlock;
 	}
 
-	if (!atomic_read(&prange->invalid)) {
+	mutex_lock(&prange->mutex);
+	timestamp = ktime_to_us(ktime_get()) - prange->validate_timestamp;
+	mutex_unlock(&prange->mutex);
+	/* skip duplicate vm fault on different pages of same range */
+	if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
 		pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
 			 svms, prange->it_node.start, prange->it_node.last);
 		goto out_srcu_unlock;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 0685eb04b87c..466ec5537bbb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -66,6 +66,7 @@ struct svm_range_bo {
  * @actual_loc: the actual location, 0 for CPU, or GPU id
  * @granularity:migration granularity, log2 num pages
  * @invalid:    not 0 means cpu page table is invalidated
+ * @validate_timestamp: system timestamp when range is validated
  * @bitmap_access: index bitmap of GPUs which can access the range
  * @bitmap_aip: index bitmap of GPUs which can access the range in place
  *
@@ -95,6 +96,7 @@ struct svm_range {
 	uint32_t			actual_loc;
 	uint8_t				granularity;
 	atomic_t			invalid;
+	uint64_t			validate_timestamp;
 	DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
 	DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
 };
-- 
2.29.2

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2021-01-07  3:04 UTC|newest]

Thread overview: 168+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-07  3:00 [PATCH 00/35] Add HMM-based SVM memory manager to KFD Felix Kuehling
2021-01-07  3:00 ` Felix Kuehling
2021-01-07  3:00 ` [PATCH 01/35] drm/amdkfd: select kernel DEVICE_PRIVATE option Felix Kuehling
2021-01-07  3:00   ` Felix Kuehling
2021-01-07  3:00 ` [PATCH 02/35] drm/amdgpu: replace per_device_list by array Felix Kuehling
2021-01-07  3:00   ` Felix Kuehling
2021-01-07  3:00 ` [PATCH 03/35] drm/amdkfd: helper to convert gpu id and idx Felix Kuehling
2021-01-07  3:00   ` Felix Kuehling
2021-01-07  3:00 ` [PATCH 04/35] drm/amdkfd: add svm ioctl API Felix Kuehling
2021-01-07  3:00   ` Felix Kuehling
2021-01-07  3:00 ` [PATCH 05/35] drm/amdkfd: Add SVM API support capability bits Felix Kuehling
2021-01-07  3:00   ` Felix Kuehling
2021-01-07  3:00 ` [PATCH 06/35] drm/amdkfd: register svm range Felix Kuehling
2021-01-07  3:00   ` Felix Kuehling
2021-01-07  3:00 ` [PATCH 07/35] drm/amdkfd: add svm ioctl GET_ATTR op Felix Kuehling
2021-01-07  3:00   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 08/35] drm/amdgpu: add common HMM get pages function Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07 10:53   ` Christian König
2021-01-07 10:53     ` Christian König
2021-01-07  3:01 ` [PATCH 09/35] drm/amdkfd: validate svm range system memory Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 10/35] drm/amdkfd: register overlap system memory range Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 11/35] drm/amdkfd: deregister svm range Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 12/35] drm/amdgpu: export vm update mapping interface Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07 10:54   ` Christian König
2021-01-07 10:54     ` Christian König
2021-01-07  3:01 ` [PATCH 13/35] drm/amdkfd: map svm range to GPUs Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 14/35] drm/amdkfd: svm range eviction and restore Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 15/35] drm/amdkfd: add xnack enabled flag to kfd_process Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 16/35] drm/amdkfd: add ioctl to configure and query xnack retries Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 17/35] drm/amdkfd: register HMM device private zone Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-03-01  8:32   ` Daniel Vetter
2021-03-01  8:32     ` Daniel Vetter
2021-03-01  8:46     ` Thomas Hellström (Intel)
2021-03-01  8:46       ` Thomas Hellström (Intel)
2021-03-01  8:58       ` Daniel Vetter
2021-03-01  8:58         ` Daniel Vetter
2021-03-01  9:30         ` Thomas Hellström (Intel)
2021-03-01  9:30           ` Thomas Hellström (Intel)
2021-03-04 17:58       ` Felix Kuehling
2021-03-04 17:58         ` Felix Kuehling
2021-03-11 12:24         ` Thomas Hellström (Intel)
2021-03-11 12:24           ` Thomas Hellström (Intel)
2021-01-07  3:01 ` [PATCH 18/35] drm/amdkfd: validate vram svm range from TTM Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 19/35] drm/amdkfd: support xgmi same hive mapping Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 20/35] drm/amdkfd: copy memory through gart table Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 21/35] drm/amdkfd: HMM migrate ram to vram Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 22/35] drm/amdkfd: HMM migrate vram to ram Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 23/35] drm/amdkfd: invalidate tables on page retry fault Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 24/35] drm/amdkfd: page table restore through svm API Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 25/35] drm/amdkfd: SVM API call to restore page tables Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 26/35] drm/amdkfd: add svm_bo reference for eviction fence Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 27/35] drm/amdgpu: add param bit flag to create SVM BOs Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 28/35] drm/amdkfd: add svm_bo eviction mechanism support Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 29/35] drm/amdgpu: svm bo enable_signal call condition Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07 10:56   ` Christian König
2021-01-07 10:56     ` Christian König
2021-01-07 16:16     ` Felix Kuehling
2021-01-07 16:16       ` Felix Kuehling
2021-01-07 16:28       ` Christian König
2021-01-07 16:28         ` Christian König
2021-01-07 16:53         ` Felix Kuehling
2021-01-07 16:53           ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 30/35] drm/amdgpu: add svm_bo eviction to enable_signal cb Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` [PATCH 31/35] drm/amdgpu: reserve fence slot to update page table Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07 10:57   ` Christian König
2021-01-07 10:57     ` Christian König
2021-01-07  3:01 ` [PATCH 32/35] drm/amdgpu: enable retry fault wptr overflow Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07 11:01   ` Christian König
2021-01-07 11:01     ` Christian König
2021-01-07  3:01 ` [PATCH 33/35] drm/amdkfd: refine migration policy with xnack on Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  3:01 ` Felix Kuehling [this message]
2021-01-07  3:01   ` [PATCH 34/35] drm/amdkfd: add svm range validate timestamp Felix Kuehling
2021-01-07  3:01 ` [PATCH 35/35] drm/amdkfd: multiple gpu migrate vram to vram Felix Kuehling
2021-01-07  3:01   ` Felix Kuehling
2021-01-07  9:23 ` [PATCH 00/35] Add HMM-based SVM memory manager to KFD Daniel Vetter
2021-01-07  9:23   ` Daniel Vetter
2021-01-07 16:25   ` Felix Kuehling
2021-01-07 16:25     ` Felix Kuehling
2021-01-08 14:40     ` Daniel Vetter
2021-01-08 14:40       ` Daniel Vetter
2021-01-08 14:45       ` Christian König
2021-01-08 14:45         ` Christian König
2021-01-08 15:58       ` Felix Kuehling
2021-01-08 15:58         ` Felix Kuehling
2021-01-08 16:06         ` Daniel Vetter
2021-01-08 16:06           ` Daniel Vetter
2021-01-08 16:36           ` Felix Kuehling
2021-01-08 16:36             ` Felix Kuehling
2021-01-08 16:53             ` Daniel Vetter
2021-01-08 16:53               ` Daniel Vetter
2021-01-08 17:56               ` Felix Kuehling
2021-01-08 17:56                 ` Felix Kuehling
2021-01-11 16:29                 ` Daniel Vetter
2021-01-11 16:29                   ` Daniel Vetter
2021-01-14  5:34                   ` Felix Kuehling
2021-01-14  5:34                     ` Felix Kuehling
2021-01-14 12:19                     ` Christian König
2021-01-14 12:19                       ` Christian König
2021-01-13 16:56       ` Jerome Glisse
2021-01-13 16:56         ` Jerome Glisse
2021-01-13 20:31         ` Daniel Vetter
2021-01-13 20:31           ` Daniel Vetter
2021-01-14  3:27           ` Jerome Glisse
2021-01-14  3:27             ` Jerome Glisse
2021-01-14  9:26             ` Daniel Vetter
2021-01-14  9:26               ` Daniel Vetter
2021-01-14 10:39               ` Daniel Vetter
2021-01-14 10:39                 ` Daniel Vetter
2021-01-14 10:49         ` Christian König
2021-01-14 10:49           ` Christian König
2021-01-14 11:52           ` Daniel Vetter
2021-01-14 11:52             ` Daniel Vetter
2021-01-14 13:37             ` HMM fence (was Re: [PATCH 00/35] Add HMM-based SVM memory manager to KFD) Christian König
2021-01-14 13:37               ` Christian König
2021-01-14 13:57               ` Daniel Vetter
2021-01-14 13:57                 ` Daniel Vetter
2021-01-14 14:13                 ` Christian König
2021-01-14 14:13                   ` Christian König
2021-01-14 14:23                   ` Daniel Vetter
2021-01-14 14:23                     ` Daniel Vetter
2021-01-14 15:08                     ` Christian König
2021-01-14 15:08                       ` Christian König
2021-01-14 15:40                       ` Daniel Vetter
2021-01-14 15:40                         ` Daniel Vetter
2021-01-14 16:01                         ` Christian König
2021-01-14 16:01                           ` Christian König
2021-01-14 16:36                           ` Daniel Vetter
2021-01-14 16:36                             ` Daniel Vetter
2021-01-14 19:08                             ` Christian König
2021-01-14 19:08                               ` Christian König
2021-01-14 20:09                               ` Daniel Vetter
2021-01-14 20:09                                 ` Daniel Vetter
2021-01-14 16:51               ` Jerome Glisse
2021-01-14 16:51                 ` Jerome Glisse
2021-01-14 21:13                 ` Felix Kuehling
2021-01-14 21:13                   ` Felix Kuehling
2021-01-15  7:47                   ` Christian König
2021-01-15  7:47                     ` Christian König
2021-01-13 16:47 ` [PATCH 00/35] Add HMM-based SVM memory manager to KFD Jerome Glisse
2021-01-13 16:47   ` Jerome Glisse
2021-01-14  0:06   ` Felix Kuehling
2021-01-14  0:06     ` Felix Kuehling

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210107030127.20393-35-Felix.Kuehling@amd.com \
    --to=felix.kuehling@amd.com \
    --cc=Philip.Yang@amd.com \
    --cc=alex.sierra@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=dri-devel@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.