dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve
@ 2020-09-25 14:55 Christian König
  2020-09-25 14:55 ` [PATCH 2/5] drm/radeon: stop using TTMs fault callback Christian König
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Christian König @ 2020-09-25 14:55 UTC (permalink / raw)
  To: dri-devel, ray.huang, airlied

Just check earlier if a BO can be page faulted in the first place.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo_vm.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 98a006fc30a5..991ef132e108 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -157,6 +157,15 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
 			return VM_FAULT_NOPAGE;
 	}
 
+	/*
+	 * Refuse to fault imported pages. This should be handled
+	 * (if at all) by redirecting mmap to the exporter.
+	 */
+	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+		dma_resv_unlock(bo->base.resv);
+		return VM_FAULT_SIGBUS;
+	}
+
 	return 0;
 }
 EXPORT_SYMBOL(ttm_bo_vm_reserve);
@@ -281,13 +290,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 	vm_fault_t ret = VM_FAULT_NOPAGE;
 	unsigned long address = vmf->address;
 
-	/*
-	 * Refuse to fault imported pages. This should be handled
-	 * (if at all) by redirecting mmap to the exporter.
-	 */
-	if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
-		return VM_FAULT_SIGBUS;
-
 	if (bdev->driver->fault_reserve_notify) {
 		struct dma_fence *moving = dma_fence_get(bo->moving);
 
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/5] drm/radeon: stop using TTMs fault callback
  2020-09-25 14:55 [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve Christian König
@ 2020-09-25 14:55 ` Christian König
  2020-09-25 14:55 ` [PATCH 3/5] drm/amdgpu: " Christian König
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Christian König @ 2020-09-25 14:55 UTC (permalink / raw)
  To: dri-devel, ray.huang, airlied

We already implemented the fault handler ourself,
just open code what is necessary here.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/radeon/radeon_object.c | 22 +++++++++++--------
 drivers/gpu/drm/radeon/radeon_object.h |  2 +-
 drivers/gpu/drm/radeon/radeon_ttm.c    | 29 ++++++++++++++++++--------
 3 files changed, 34 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 689426dd8480..8c285eb118f9 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -775,7 +775,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
 	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
 }
 
-int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
 	struct ttm_operation_ctx ctx = { false, false };
 	struct radeon_device *rdev;
@@ -798,7 +798,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
 	/* Can't move a pinned BO to visible VRAM */
 	if (rbo->tbo.pin_count > 0)
-		return -EINVAL;
+		return VM_FAULT_SIGBUS;
 
 	/* hurrah the memory is not visible ! */
 	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
@@ -812,16 +812,20 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 	r = ttm_bo_validate(bo, &rbo->placement, &ctx);
 	if (unlikely(r == -ENOMEM)) {
 		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
-		return ttm_bo_validate(bo, &rbo->placement, &ctx);
-	} else if (unlikely(r != 0)) {
-		return r;
+		r = ttm_bo_validate(bo, &rbo->placement, &ctx);
+	} else if (likely(!r)) {
+		offset = bo->mem.start << PAGE_SHIFT;
+		/* this should never happen */
+		if ((offset + size) > rdev->mc.visible_vram_size)
+			return VM_FAULT_SIGBUS;
 	}
 
-	offset = bo->mem.start << PAGE_SHIFT;
-	/* this should never happen */
-	if ((offset + size) > rdev->mc.visible_vram_size)
-		return -EINVAL;
+	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
+		return VM_FAULT_NOPAGE;
+	else if (unlikely(r))
+		return VM_FAULT_SIGBUS;
 
+	ttm_bo_move_to_lru_tail_unlocked(bo);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 27cfb64057fe..d606e9a935e3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -163,7 +163,7 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
 				  bool evict,
 				  struct ttm_resource *new_mem);
-extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
 			    bool shared);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index df5cedb2b632..63e38b05a5bc 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -803,7 +803,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
 	.move = &radeon_bo_move,
 	.verify_access = &radeon_verify_access,
 	.move_notify = &radeon_bo_move_notify,
-	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
 	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
 };
 
@@ -904,17 +903,29 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
 
 static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
 {
-	struct ttm_buffer_object *bo;
-	struct radeon_device *rdev;
+	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+	struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
 	vm_fault_t ret;
 
-	bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
-	if (bo == NULL)
-		return VM_FAULT_NOPAGE;
-
-	rdev = radeon_get_rdev(bo->bdev);
 	down_read(&rdev->pm.mclk_lock);
-	ret = ttm_bo_vm_fault(vmf);
+
+	ret = ttm_bo_vm_reserve(bo, vmf);
+	if (ret)
+		goto unlock_mclk;
+
+	ret = radeon_bo_fault_reserve_notify(bo);
+	if (ret)
+		goto unlock_resv;
+
+	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+				       TTM_BO_VM_NUM_PREFAULT, 1);
+	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+		goto unlock_mclk;
+
+unlock_resv:
+	dma_resv_unlock(bo->base.resv);
+
+unlock_mclk:
 	up_read(&rdev->pm.mclk_lock);
 	return ret;
 }
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/5] drm/amdgpu: stop using TTMs fault callback
  2020-09-25 14:55 [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve Christian König
  2020-09-25 14:55 ` [PATCH 2/5] drm/radeon: stop using TTMs fault callback Christian König
@ 2020-09-25 14:55 ` Christian König
  2020-09-25 19:08   ` Nirmoy
  2020-09-25 14:55 ` [PATCH 4/5] drm/nouveau: " Christian König
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 8+ messages in thread
From: Christian König @ 2020-09-25 14:55 UTC (permalink / raw)
  To: dri-devel, ray.huang, airlied

Implement the fault handler ourself using the provided TTM functions.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 20 +++++------
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    | 40 +++++++++++++++++++---
 3 files changed, 46 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 63e9c5793c30..80bc7177cd45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1341,19 +1341,14 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
  * Returns:
  * 0 for success or a negative error code on failure.
  */
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 	struct ttm_operation_ctx ctx = { false, false };
-	struct amdgpu_bo *abo;
+	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
 	unsigned long offset, size;
 	int r;
 
-	if (!amdgpu_bo_is_amdgpu_bo(bo))
-		return 0;
-
-	abo = ttm_to_amdgpu_bo(bo);
-
 	/* Remember that this BO was accessed by the CPU */
 	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
@@ -1367,7 +1362,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
 	/* Can't move a pinned BO to visible VRAM */
 	if (abo->tbo.pin_count > 0)
-		return -EINVAL;
+		return VM_FAULT_SIGBUS;
 
 	/* hurrah the memory is not visible ! */
 	atomic64_inc(&adev->num_vram_cpu_page_faults);
@@ -1379,15 +1374,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 	abo->placement.busy_placement = &abo->placements[1];
 
 	r = ttm_bo_validate(bo, &abo->placement, &ctx);
-	if (unlikely(r != 0))
-		return r;
+	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
+		return VM_FAULT_NOPAGE;
+	else if (unlikely(r))
+		return VM_FAULT_SIGBUS;
 
 	offset = bo->mem.start << PAGE_SHIFT;
 	/* this should never happen */
 	if (bo->mem.mem_type == TTM_PL_VRAM &&
 	    (offset + size) > adev->gmc.visible_vram_size)
-		return -EINVAL;
+		return VM_FAULT_SIGBUS;
 
+	ttm_bo_move_to_lru_tail_unlocked(bo);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index e91750e43448..132e5f955180 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -284,7 +284,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 			   bool evict,
 			   struct ttm_resource *new_mem);
 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 		     bool shared);
 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index d3bd2fd448be..399961035ae6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1708,7 +1708,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
 	.verify_access = &amdgpu_verify_access,
 	.move_notify = &amdgpu_bo_move_notify,
 	.release_notify = &amdgpu_bo_release_notify,
-	.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
 	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
 	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
 	.access_memory = &amdgpu_ttm_access_memory,
@@ -2088,15 +2087,48 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 	adev->mman.buffer_funcs_enabled = enable;
 }
 
+static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
+{
+	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+	vm_fault_t ret;
+
+	ret = ttm_bo_vm_reserve(bo, vmf);
+	if (ret)
+		return ret;
+
+	ret = amdgpu_bo_fault_reserve_notify(bo);
+	if (ret)
+		goto unlock;
+
+	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+				       TTM_BO_VM_NUM_PREFAULT, 1);
+	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+		return ret;
+
+unlock:
+	dma_resv_unlock(bo->base.resv);
+	return ret;
+}
+
+static struct vm_operations_struct amdgpu_ttm_vm_ops = {
+	.fault = amdgpu_ttm_fault,
+	.open = ttm_bo_vm_open,
+	.close = ttm_bo_vm_close,
+	.access = ttm_bo_vm_access
+};
+
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	struct drm_file *file_priv = filp->private_data;
 	struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
+	int r;
 
-	if (adev == NULL)
-		return -EINVAL;
+	r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+	if (unlikely(r != 0))
+		return r;
 
-	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+	vma->vm_ops = &amdgpu_ttm_vm_ops;
+	return 0;
 }
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 4/5] drm/nouveau: stop using TTMs fault callback
  2020-09-25 14:55 [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve Christian König
  2020-09-25 14:55 ` [PATCH 2/5] drm/radeon: stop using TTMs fault callback Christian König
  2020-09-25 14:55 ` [PATCH 3/5] drm/amdgpu: " Christian König
@ 2020-09-25 14:55 ` Christian König
  2020-09-25 19:35   ` Nirmoy
  2020-09-25 14:55 ` [PATCH 5/5] drm/ttm: remove " Christian König
  2020-09-28  5:28 ` [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve Dave Airlie
  4 siblings, 1 reply; 8+ messages in thread
From: Christian König @ 2020-09-25 14:55 UTC (permalink / raw)
  To: dri-devel, ray.huang, airlied

We already implemented the fault handler ourself,
just open code what is necessary here.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/nouveau/nouveau_bo.c  | 50 ++++++++++++++-------------
 drivers/gpu/drm/nouveau/nouveau_bo.h  |  1 +
 drivers/gpu/drm/nouveau/nouveau_ttm.c | 10 +++---
 3 files changed, 33 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 8d51cfca07c8..1d4b16c0e353 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1226,8 +1226,7 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
 	mutex_unlock(&drm->ttm.io_reserve_mutex);
 }
 
-static int
-nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1243,34 +1242,38 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 		    !nvbo->kind)
 			return 0;
 
-		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-			nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
-						 0);
+		if (bo->mem.mem_type != TTM_PL_SYSTEM)
+			return 0;
+
+		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
+
+	} else {
+		/* make sure bo is in mappable vram */
+		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
+		    bo->mem.start + bo->mem.num_pages < mappable)
+			return 0;
 
-			ret = nouveau_bo_validate(nvbo, false, false);
-			if (ret)
-				return ret;
+		for (i = 0; i < nvbo->placement.num_placement; ++i) {
+			nvbo->placements[i].fpfn = 0;
+			nvbo->placements[i].lpfn = mappable;
 		}
-		return 0;
-	}
 
-	/* make sure bo is in mappable vram */
-	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
-	    bo->mem.start + bo->mem.num_pages < mappable)
-		return 0;
+		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
+			nvbo->busy_placements[i].fpfn = 0;
+			nvbo->busy_placements[i].lpfn = mappable;
+		}
 
-	for (i = 0; i < nvbo->placement.num_placement; ++i) {
-		nvbo->placements[i].fpfn = 0;
-		nvbo->placements[i].lpfn = mappable;
+		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
 	}
 
-	for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
-		nvbo->busy_placements[i].fpfn = 0;
-		nvbo->busy_placements[i].lpfn = mappable;
-	}
+	ret = nouveau_bo_validate(nvbo, false, false);
+	if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
+		return VM_FAULT_NOPAGE;
+	else if (unlikely(ret))
+		return VM_FAULT_SIGBUS;
 
-	nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
-	return nouveau_bo_validate(nvbo, false, false);
+	ttm_bo_move_to_lru_tail_unlocked(bo);
+	return 0;
 }
 
 static int
@@ -1381,7 +1384,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
 	.move_notify = nouveau_bo_move_ntfy,
 	.move = nouveau_bo_move,
 	.verify_access = nouveau_bo_verify_access,
-	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
 	.io_mem_free = &nouveau_ttm_io_mem_free,
 };
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index ff68ded8d590..641ef6298a0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -89,6 +89,7 @@ void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
 void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
 u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
 void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo);
 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
 int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
 			 bool no_wait_gpu);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 427341753441..edf3bb89a47f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -134,17 +134,19 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
 	if (ret)
 		return ret;
 
-	nouveau_bo_del_io_reserve_lru(bo);
+	ret = nouveau_ttm_fault_reserve_notify(bo);
+	if (ret)
+		goto error_unlock;
 
+	nouveau_bo_del_io_reserve_lru(bo);
 	prot = vm_get_page_prot(vma->vm_flags);
 	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+	nouveau_bo_add_io_reserve_lru(bo);
 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
 		return ret;
 
-	nouveau_bo_add_io_reserve_lru(bo);
-
+error_unlock:
 	dma_resv_unlock(bo->base.resv);
-
 	return ret;
 }
 
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 5/5] drm/ttm: remove fault callback
  2020-09-25 14:55 [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve Christian König
                   ` (2 preceding siblings ...)
  2020-09-25 14:55 ` [PATCH 4/5] drm/nouveau: " Christian König
@ 2020-09-25 14:55 ` Christian König
  2020-09-28  5:28 ` [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve Dave Airlie
  4 siblings, 0 replies; 8+ messages in thread
From: Christian König @ 2020-09-25 14:55 UTC (permalink / raw)
  To: dri-devel, ray.huang, airlied

Another one bites the dust.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo_vm.c | 22 ----------------------
 include/drm/ttm/ttm_bo_driver.h |  3 ---
 2 files changed, 25 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 991ef132e108..87ee8f0ca08e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -290,28 +290,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 	vm_fault_t ret = VM_FAULT_NOPAGE;
 	unsigned long address = vmf->address;
 
-	if (bdev->driver->fault_reserve_notify) {
-		struct dma_fence *moving = dma_fence_get(bo->moving);
-
-		err = bdev->driver->fault_reserve_notify(bo);
-		switch (err) {
-		case 0:
-			break;
-		case -EBUSY:
-		case -ERESTARTSYS:
-			dma_fence_put(moving);
-			return VM_FAULT_NOPAGE;
-		default:
-			dma_fence_put(moving);
-			return VM_FAULT_SIGBUS;
-		}
-
-		if (bo->moving != moving) {
-			ttm_bo_move_to_lru_tail_unlocked(bo);
-		}
-		dma_fence_put(moving);
-	}
-
 	/*
 	 * Wait for buffer data in transit, due to a pipelined
 	 * move.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 864afa8f6f18..9897a16c0a9d 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -190,9 +190,6 @@ struct ttm_bo_driver {
 	void (*move_notify)(struct ttm_buffer_object *bo,
 			    bool evict,
 			    struct ttm_resource *new_mem);
-	/* notify the driver we are taking a fault on this BO
-	 * and have reserved it */
-	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
 
 	/**
 	 * notify the driver that we're about to swap out this bo
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 3/5] drm/amdgpu: stop using TTMs fault callback
  2020-09-25 14:55 ` [PATCH 3/5] drm/amdgpu: " Christian König
@ 2020-09-25 19:08   ` Nirmoy
  0 siblings, 0 replies; 8+ messages in thread
From: Nirmoy @ 2020-09-25 19:08 UTC (permalink / raw)
  To: dri-devel

Tested-by: Nirmoy Das <nirmoy.das@amd.com>

On 9/25/20 4:55 PM, Christian König wrote:
> Implement the fault handler ourself using the provided TTM functions.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 20 +++++------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    | 40 +++++++++++++++++++---
>   3 files changed, 46 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index 63e9c5793c30..80bc7177cd45 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -1341,19 +1341,14 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
>    * Returns:
>    * 0 for success or a negative error code on failure.
>    */
> -int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
> +vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
>   {
>   	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
>   	struct ttm_operation_ctx ctx = { false, false };
> -	struct amdgpu_bo *abo;
> +	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
>   	unsigned long offset, size;
>   	int r;
>   
> -	if (!amdgpu_bo_is_amdgpu_bo(bo))
> -		return 0;
> -
> -	abo = ttm_to_amdgpu_bo(bo);
> -
>   	/* Remember that this BO was accessed by the CPU */
>   	abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
>   
> @@ -1367,7 +1362,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
>   
>   	/* Can't move a pinned BO to visible VRAM */
>   	if (abo->tbo.pin_count > 0)
> -		return -EINVAL;
> +		return VM_FAULT_SIGBUS;
>   
>   	/* hurrah the memory is not visible ! */
>   	atomic64_inc(&adev->num_vram_cpu_page_faults);
> @@ -1379,15 +1374,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
>   	abo->placement.busy_placement = &abo->placements[1];
>   
>   	r = ttm_bo_validate(bo, &abo->placement, &ctx);
> -	if (unlikely(r != 0))
> -		return r;
> +	if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
> +		return VM_FAULT_NOPAGE;
> +	else if (unlikely(r))
> +		return VM_FAULT_SIGBUS;
>   
>   	offset = bo->mem.start << PAGE_SHIFT;
>   	/* this should never happen */
>   	if (bo->mem.mem_type == TTM_PL_VRAM &&
>   	    (offset + size) > adev->gmc.visible_vram_size)
> -		return -EINVAL;
> +		return VM_FAULT_SIGBUS;
>   
> +	ttm_bo_move_to_lru_tail_unlocked(bo);
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
> index e91750e43448..132e5f955180 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
> @@ -284,7 +284,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
>   			   bool evict,
>   			   struct ttm_resource *new_mem);
>   void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
> -int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
> +vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
>   void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
>   		     bool shared);
>   int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index d3bd2fd448be..399961035ae6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -1708,7 +1708,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
>   	.verify_access = &amdgpu_verify_access,
>   	.move_notify = &amdgpu_bo_move_notify,
>   	.release_notify = &amdgpu_bo_release_notify,
> -	.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
>   	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
>   	.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
>   	.access_memory = &amdgpu_ttm_access_memory,
> @@ -2088,15 +2087,48 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>   	adev->mman.buffer_funcs_enabled = enable;
>   }
>   
> +static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
> +{
> +	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
> +	vm_fault_t ret;
> +
> +	ret = ttm_bo_vm_reserve(bo, vmf);
> +	if (ret)
> +		return ret;
> +
> +	ret = amdgpu_bo_fault_reserve_notify(bo);
> +	if (ret)
> +		goto unlock;
> +
> +	ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
> +				       TTM_BO_VM_NUM_PREFAULT, 1);
> +	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
> +		return ret;
> +
> +unlock:
> +	dma_resv_unlock(bo->base.resv);
> +	return ret;
> +}
> +
> +static struct vm_operations_struct amdgpu_ttm_vm_ops = {
> +	.fault = amdgpu_ttm_fault,
> +	.open = ttm_bo_vm_open,
> +	.close = ttm_bo_vm_close,
> +	.access = ttm_bo_vm_access
> +};
> +
>   int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
>   {
>   	struct drm_file *file_priv = filp->private_data;
>   	struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
> +	int r;
>   
> -	if (adev == NULL)
> -		return -EINVAL;
> +	r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
> +	if (unlikely(r != 0))
> +		return r;
>   
> -	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
> +	vma->vm_ops = &amdgpu_ttm_vm_ops;
> +	return 0;
>   }
>   
>   int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 4/5] drm/nouveau: stop using TTMs fault callback
  2020-09-25 14:55 ` [PATCH 4/5] drm/nouveau: " Christian König
@ 2020-09-25 19:35   ` Nirmoy
  0 siblings, 0 replies; 8+ messages in thread
From: Nirmoy @ 2020-09-25 19:35 UTC (permalink / raw)
  To: Christian König, dri-devel, ray.huang, airlied

Tested this on GeForce GT 710

Tested-by: Nirmoy Das <nirmoy.das@amd.com>


On 9/25/20 4:55 PM, Christian König wrote:
> We already implemented the fault handler ourself,
> just open code what is necessary here.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/nouveau/nouveau_bo.c  | 50 ++++++++++++++-------------
>   drivers/gpu/drm/nouveau/nouveau_bo.h  |  1 +
>   drivers/gpu/drm/nouveau/nouveau_ttm.c | 10 +++---
>   3 files changed, 33 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
> index 8d51cfca07c8..1d4b16c0e353 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
> @@ -1226,8 +1226,7 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
>   	mutex_unlock(&drm->ttm.io_reserve_mutex);
>   }
>   
> -static int
> -nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
> +vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
>   {
>   	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
>   	struct nouveau_bo *nvbo = nouveau_bo(bo);
> @@ -1243,34 +1242,38 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
>   		    !nvbo->kind)
>   			return 0;
>   
> -		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
> -			nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
> -						 0);
> +		if (bo->mem.mem_type != TTM_PL_SYSTEM)
> +			return 0;
> +
> +		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
> +
> +	} else {
> +		/* make sure bo is in mappable vram */
> +		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
> +		    bo->mem.start + bo->mem.num_pages < mappable)
> +			return 0;
>   
> -			ret = nouveau_bo_validate(nvbo, false, false);
> -			if (ret)
> -				return ret;
> +		for (i = 0; i < nvbo->placement.num_placement; ++i) {
> +			nvbo->placements[i].fpfn = 0;
> +			nvbo->placements[i].lpfn = mappable;
>   		}
> -		return 0;
> -	}
>   
> -	/* make sure bo is in mappable vram */
> -	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
> -	    bo->mem.start + bo->mem.num_pages < mappable)
> -		return 0;
> +		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
> +			nvbo->busy_placements[i].fpfn = 0;
> +			nvbo->busy_placements[i].lpfn = mappable;
> +		}
>   
> -	for (i = 0; i < nvbo->placement.num_placement; ++i) {
> -		nvbo->placements[i].fpfn = 0;
> -		nvbo->placements[i].lpfn = mappable;
> +		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
>   	}
>   
> -	for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
> -		nvbo->busy_placements[i].fpfn = 0;
> -		nvbo->busy_placements[i].lpfn = mappable;
> -	}
> +	ret = nouveau_bo_validate(nvbo, false, false);
> +	if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
> +		return VM_FAULT_NOPAGE;
> +	else if (unlikely(ret))
> +		return VM_FAULT_SIGBUS;
>   
> -	nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
> -	return nouveau_bo_validate(nvbo, false, false);
> +	ttm_bo_move_to_lru_tail_unlocked(bo);
> +	return 0;
>   }
>   
>   static int
> @@ -1381,7 +1384,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
>   	.move_notify = nouveau_bo_move_ntfy,
>   	.move = nouveau_bo_move,
>   	.verify_access = nouveau_bo_verify_access,
> -	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
>   	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
>   	.io_mem_free = &nouveau_ttm_io_mem_free,
>   };
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
> index ff68ded8d590..641ef6298a0e 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_bo.h
> +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
> @@ -89,6 +89,7 @@ void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
>   void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
>   u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
>   void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
> +vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo);
>   void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
>   int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
>   			 bool no_wait_gpu);
> diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> index 427341753441..edf3bb89a47f 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
> @@ -134,17 +134,19 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
>   	if (ret)
>   		return ret;
>   
> -	nouveau_bo_del_io_reserve_lru(bo);
> +	ret = nouveau_ttm_fault_reserve_notify(bo);
> +	if (ret)
> +		goto error_unlock;
>   
> +	nouveau_bo_del_io_reserve_lru(bo);
>   	prot = vm_get_page_prot(vma->vm_flags);
>   	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
> +	nouveau_bo_add_io_reserve_lru(bo);
>   	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
>   		return ret;
>   
> -	nouveau_bo_add_io_reserve_lru(bo);
> -
> +error_unlock:
>   	dma_resv_unlock(bo->base.resv);
> -
>   	return ret;
>   }
>   
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve
  2020-09-25 14:55 [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve Christian König
                   ` (3 preceding siblings ...)
  2020-09-25 14:55 ` [PATCH 5/5] drm/ttm: remove " Christian König
@ 2020-09-28  5:28 ` Dave Airlie
  4 siblings, 0 replies; 8+ messages in thread
From: Dave Airlie @ 2020-09-28  5:28 UTC (permalink / raw)
  To: Christian König; +Cc: Dave Airlie, Huang Rui, dri-devel

On Sat, 26 Sep 2020 at 00:55, Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Just check earlier if a BO can be page faulted in the first place.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Nice work,

For the series

Reviewed-by: Dave Airlie <airlied@redhat.com>

> ---
>  drivers/gpu/drm/ttm/ttm_bo_vm.c | 16 +++++++++-------
>  1 file changed, 9 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index 98a006fc30a5..991ef132e108 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -157,6 +157,15 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
>                         return VM_FAULT_NOPAGE;
>         }
>
> +       /*
> +        * Refuse to fault imported pages. This should be handled
> +        * (if at all) by redirecting mmap to the exporter.
> +        */
> +       if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
> +               dma_resv_unlock(bo->base.resv);
> +               return VM_FAULT_SIGBUS;
> +       }
> +
>         return 0;
>  }
>  EXPORT_SYMBOL(ttm_bo_vm_reserve);
> @@ -281,13 +290,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
>         vm_fault_t ret = VM_FAULT_NOPAGE;
>         unsigned long address = vmf->address;
>
> -       /*
> -        * Refuse to fault imported pages. This should be handled
> -        * (if at all) by redirecting mmap to the exporter.
> -        */
> -       if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
> -               return VM_FAULT_SIGBUS;
> -
>         if (bdev->driver->fault_reserve_notify) {
>                 struct dma_fence *moving = dma_fence_get(bo->moving);
>
> --
> 2.17.1
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-09-28  5:29 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-25 14:55 [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve Christian König
2020-09-25 14:55 ` [PATCH 2/5] drm/radeon: stop using TTMs fault callback Christian König
2020-09-25 14:55 ` [PATCH 3/5] drm/amdgpu: " Christian König
2020-09-25 19:08   ` Nirmoy
2020-09-25 14:55 ` [PATCH 4/5] drm/nouveau: " Christian König
2020-09-25 19:35   ` Nirmoy
2020-09-25 14:55 ` [PATCH 5/5] drm/ttm: remove " Christian König
2020-09-28  5:28 ` [PATCH 1/5] drm/ttm: move SG flag check into ttm_bo_vm_reserve Dave Airlie

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).