All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Christian König" <deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: [PATCH 3/3] drm/amdgpu: fix PRT teardown on VM fini v2
Date: Wed, 15 Feb 2017 15:57:21 +0100	[thread overview]
Message-ID: <1487170641-1927-3-git-send-email-deathsimple@vodafone.de> (raw)
In-Reply-To: <1487170641-1927-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>

From: Christian König <christian.koenig@amd.com>

v2: new approach fixing this by registering a fence callback for
    all users of the VM on teardown

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 106 +++++++++++++++++++++++++--------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |   2 +-
 2 files changed, 82 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c11b6b6..d3437ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1188,22 +1188,31 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
 	bool enable;
 
 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
-	enable = !!atomic_read(&adev->vm_manager.num_prt_mappings);
+	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
 	adev->gart.gart_funcs->set_prt(adev, enable);
 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
 }
 
 /**
+ * amdgpu_vm_prt_put - add a PRT user
+ */
+static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
+{
+	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
+		amdgpu_vm_update_prt_state(adev);
+}
+
+/**
  * amdgpu_vm_prt_put - drop a PRT user
  */
 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
 {
-	if (atomic_dec_return(&adev->vm_manager.num_prt_mappings) == 0)
+	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
 		amdgpu_vm_update_prt_state(adev);
 }
 
 /**
- * amdgpu_vm_prt - callback for updating the PRT status
+ * amdgpu_vm_prt_cb - callback for updating the PRT status
  */
 static void amdgpu_vm_prt_cb(struct fence *fence, struct fence_cb *_cb)
 {
@@ -1214,6 +1223,29 @@ static void amdgpu_vm_prt_cb(struct fence *fence, struct fence_cb *_cb)
 }
 
 /**
+ * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ */
+static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
+				 struct fence *fence)
+{
+	struct amdgpu_prt_cb *cb = kmalloc(sizeof(struct amdgpu_prt_cb),
+					   GFP_KERNEL);
+
+	if (!cb) {
+		/* Last resort when we are OOM */
+		if (fence)
+			fence_wait(fence, false);
+
+		amdgpu_vm_prt_put(cb->adev);
+	} else {
+		cb->adev = adev;
+		if (!fence || fence_add_callback(fence, &cb->cb,
+						 amdgpu_vm_prt_cb))
+			amdgpu_vm_prt_cb(fence, &cb->cb);
+	}
+}
+
+/**
  * amdgpu_vm_free_mapping - free a mapping
  *
  * @adev: amdgpu_device pointer
@@ -1228,24 +1260,47 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
 				   struct amdgpu_bo_va_mapping *mapping,
 				   struct fence *fence)
 {
-	if (mapping->flags & AMDGPU_PTE_PRT) {
-		struct amdgpu_prt_cb *cb = kmalloc(sizeof(struct amdgpu_prt_cb),
-						   GFP_KERNEL);
+	if (mapping->flags & AMDGPU_PTE_PRT)
+		amdgpu_vm_add_prt_cb(adev, fence);
+	kfree(mapping);
+}
 
-		if (!cb) {
-			/* Last resort when we are OOM */
-			if (fence)
-				fence_wait(fence, false);
+/**
+ * amdgpu_vm_prt_fini - finish all prt mappings
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
+ * Register a cleanup callback to disable PRT support after VM dies.
+ */
+static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+	struct reservation_object *resv = vm->page_directory->tbo.resv;
+	struct fence *excl, **shared;
+	unsigned i, shared_count;
+	int r;
 
-			amdgpu_vm_prt_put(cb->adev);
-		} else {
-			cb->adev = adev;
-			if (!fence || fence_add_callback(fence, &cb->cb,
-							 amdgpu_vm_prt_cb))
-				amdgpu_vm_prt_cb(fence, &cb->cb);
-		}
+	r = reservation_object_get_fences_rcu(resv, &excl,
+					      &shared_count, &shared);
+	if (r) {
+		/* Not enough memory to grab the fence list, as last resort
+		 * block for all the fences to complete.
+		 */
+		reservation_object_wait_timeout_rcu(resv, true, false,
+						    MAX_SCHEDULE_TIMEOUT);
+		return;
 	}
-	kfree(mapping);
+
+	/* Add a callback for each fence in the reservation object */
+	amdgpu_vm_prt_get(adev);
+	amdgpu_vm_add_prt_cb(adev, excl);
+
+	for (i = 0; i < shared_count; ++i) {
+		amdgpu_vm_prt_get(adev);
+		amdgpu_vm_add_prt_cb(adev, shared[i]);
+	}
+
+	kfree(shared);
 }
 
 /**
@@ -1395,8 +1450,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 		if (!adev->gart.gart_funcs->set_prt)
 			return -EINVAL;
 
-		if (atomic_inc_return(&adev->vm_manager.num_prt_mappings) == 1)
-			amdgpu_vm_update_prt_state(adev);
+		amdgpu_vm_prt_get(adev);
 	}
 
 	/* make sure object fit at this offset */
@@ -1699,6 +1753,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
 	struct amdgpu_bo_va_mapping *mapping, *tmp;
+	bool prt_fini_called = false;
 	int i;
 
 	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
@@ -1712,13 +1767,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		kfree(mapping);
 	}
 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
-		if (mapping->flags & AMDGPU_PTE_PRT)
-			continue;
+		if (mapping->flags & AMDGPU_PTE_PRT && !prt_fini_called) {
+			amdgpu_vm_prt_fini(adev, vm);
+			prt_fini_called = true;
+		}
 
 		list_del(&mapping->list);
-		kfree(mapping);
+		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
 	}
-	amdgpu_vm_clear_freed(adev, vm);
 
 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
 		struct amdgpu_bo *pt = vm->page_tables[i].bo;
@@ -1764,7 +1820,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
 	atomic64_set(&adev->vm_manager.client_counter, 0);
 	spin_lock_init(&adev->vm_manager.prt_lock);
-	atomic_set(&adev->vm_manager.num_prt_mappings, 0);
+	atomic_set(&adev->vm_manager.num_prt_users, 0);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 51fa12f..7fccd486 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -164,7 +164,7 @@ struct amdgpu_vm_manager {
 
 	/* partial resident texture handling */
 	spinlock_t				prt_lock;
-	atomic_t				num_prt_mappings;
+	atomic_t				num_prt_users;
 };
 
 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
-- 
2.5.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2017-02-15 14:57 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-15 14:57 [PATCH 1/3] drm/amdgpu: minor PRT turnoff fix Christian König
     [not found] ` <1487170641-1927-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-02-15 14:57   ` [PATCH 2/3] drm/amdgpu: add OOM fallback on PRT teardown Christian König
2017-02-15 14:57   ` Christian König [this message]
2017-02-15 15:59   ` [PATCH 1/3] drm/amdgpu: minor PRT turnoff fix Christian König
     [not found]     ` <79b48c6a-964c-7339-4b5b-1d97401bb107-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-02-15 17:28       ` Nicolai Hähnle
     [not found]         ` <1300e2ee-ed90-7920-d135-b75a09347880-5C7GfCeVMHo@public.gmane.org>
2017-02-16 10:01           ` Christian König
     [not found]             ` <7820c81d-28f6-c2a3-2c3b-31be54bfe29e-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-02-16 10:12               ` Nicolai Hähnle

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1487170641-1927-3-git-send-email-deathsimple@vodafone.de \
    --to=deathsimple-antagkrnahcb1svskn2v4q@public.gmane.org \
    --cc=amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.