All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: "Matthew Brost" <matthew.brost@intel.com>,
	"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	dri-devel@lists.freedesktop.org
Subject: [PATCH 2/4] drm/xe/vm: Implement userptr page pinning
Date: Fri, 18 Aug 2023 17:08:43 +0200	[thread overview]
Message-ID: <20230818150845.96679-3-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20230818150845.96679-1-thomas.hellstrom@linux.intel.com>

Implement pinning of userptrs between VM_BIND and VM_UNBIND, which will
facilitate avoiding long hangs on non-preemptible workloads. But don't
hook it up to userspace just yet.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c       | 76 ++++++++++++++++++++++----------
 drivers/gpu/drm/xe/xe_vm.h       |  9 ++++
 drivers/gpu/drm/xe/xe_vm_types.h | 12 +++++
 3 files changed, 74 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 8bf7f62e6548..ecbcad696b60 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -74,10 +74,6 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	if (notifier_seq == vma->userptr.notifier_seq)
 		return 0;
 
-	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
-	if (!pages)
-		return -ENOMEM;
-
 	if (vma->userptr.sg) {
 		dma_unmap_sgtable(xe->drm.dev,
 				  vma->userptr.sg,
@@ -87,6 +83,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 		vma->userptr.sg = NULL;
 	}
 
+	if (vma->userptr.pinned_pages) {
+		unpin_user_pages_dirty_lock(vma->userptr.pinned_pages,
+					    vma->userptr.num_pinned,
+					    !read_only);
+		pages = vma->userptr.pinned_pages;
+	} else {
+		pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
+		if (!pages)
+			return -ENOMEM;
+	}
+
 	pinned = ret = 0;
 	if (in_kthread) {
 		if (!mmget_not_zero(vma->userptr.notifier.mm)) {
@@ -97,11 +104,18 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	}
 
 	while (pinned < num_pages) {
-		ret = get_user_pages_fast(xe_vma_userptr(vma) +
-					  pinned * PAGE_SIZE,
-					  num_pages - pinned,
-					  read_only ? 0 : FOLL_WRITE,
-					  &pages[pinned]);
+		if (xe_vma_is_pinned(vma))
+			ret = pin_user_pages_fast(xe_vma_userptr(vma) +
+						  pinned * PAGE_SIZE,
+						  num_pages - pinned,
+						  read_only ? 0 : FOLL_WRITE,
+						  &pages[pinned]);
+		else
+			ret = get_user_pages_fast(xe_vma_userptr(vma) +
+						  pinned * PAGE_SIZE,
+						  num_pages - pinned,
+						  read_only ? 0 : FOLL_WRITE,
+						  &pages[pinned]);
 		if (ret < 0) {
 			if (in_kthread)
 				ret = 0;
@@ -137,19 +151,24 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	if (ret)
 		goto out_free_sg;
 
-	for (i = 0; i < pinned; ++i) {
-		if (!read_only) {
-			lock_page(pages[i]);
-			set_page_dirty(pages[i]);
-			unlock_page(pages[i]);
+	if (!xe_vma_is_pinned(vma)) {
+		for (i = 0; i < pinned; ++i) {
+			if (!read_only) {
+				lock_page(pages[i]);
+				set_page_dirty(pages[i]);
+				unlock_page(pages[i]);
+			}
+
+			mark_page_accessed(pages[i]);
 		}
 
-		mark_page_accessed(pages[i]);
+		release_pages(pages, pinned);
+		kvfree(pages);
+	} else {
+		vma->userptr.pinned_pages = pages;
+		vma->userptr.num_pinned = pinned;
 	}
 
-	release_pages(pages, pinned);
-	kvfree(pages);
-
 	vma->userptr.notifier_seq = notifier_seq;
 	if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
 		goto retry;
@@ -160,9 +179,14 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	sg_free_table(vma->userptr.sg);
 	vma->userptr.sg = NULL;
 out_release_pages:
-	release_pages(pages, pinned);
+	if (!xe_vma_is_pinned(vma))
+		release_pages(pages, pinned);
+	else
+		unpin_user_pages(pages, pinned);
+	vma->userptr.num_pinned = 0;
 mm_closed:
 	kvfree(pages);
+	vma->userptr.pinned_pages = NULL;
 	return ret;
 }
 
@@ -721,7 +745,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
 	mmu_interval_set_seq(mni, cur_seq);
 
 	/* No need to stop gpu access if the userptr is not yet bound. */
-	if (!vma->userptr.initial_bind) {
+	if (xe_vma_is_pinned(vma) || !vma->userptr.initial_bind) {
 		up_write(&vm->userptr.notifier_lock);
 		return true;
 	}
@@ -976,10 +1000,16 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
 			vma->userptr.sg = NULL;
 		}
 
+		if (vma->userptr.pinned_pages) {
+			unpin_user_pages_dirty_lock(vma->userptr.pinned_pages,
+						    vma->userptr.num_pinned,
+						    !read_only);
+			kvfree(vma->userptr.pinned_pages);
+		}
+
 		/*
-		 * Since userptr pages are not pinned, we can't remove
-		 * the notifer until we're sure the GPU is not accessing
-		 * them anymore
+		 * We can't remove the notifer until we're sure the GPU is
+		 * not accessing the pages anymore
 		 */
 		mmu_interval_notifier_remove(&vma->userptr.notifier);
 		xe_vm_put(vm);
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 6de6e3edb24a..913544d7d995 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -139,6 +139,15 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
 }
 
+/**
+ * xe_vma_is_pinned() - User has requested the backing store of this vma
+ * to be pinned.
+ */
+static inline bool xe_vma_is_pinned(struct xe_vma *vma)
+{
+	return xe_vma_is_userptr(vma) && (vma->gpuva.flags & XE_VMA_PINNED);
+}
+
 #define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv)
 
 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 3681a5ff588b..9b90e649cd69 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -33,6 +33,8 @@ struct xe_vm;
 #define XE_VMA_PTE_4K		(DRM_GPUVA_USERBITS << 5)
 #define XE_VMA_PTE_2M		(DRM_GPUVA_USERBITS << 6)
 #define XE_VMA_PTE_1G		(DRM_GPUVA_USERBITS << 7)
+/* User requested backing store to be pinned */
+#define XE_VMA_PINNED           (DRM_GPUVA_USERBITS << 8)
 
 /** struct xe_userptr - User pointer */
 struct xe_userptr {
@@ -54,6 +56,16 @@ struct xe_userptr {
 	 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
 	 */
 	bool initial_bind;
+	/**
+	 * @pinned_pages: List of pinned pages if xe_vma_pinned(),
+	 * NULL otherwise. protected by the vm lock.
+	 */
+	struct page **pinned_pages;
+	/**
+	 * @num_pinned: Number of pointers to pinned pages in @pinned_pages.
+	 * protected by the vm lock.
+	 */
+	unsigned long num_pinned;
 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
 	u32 divisor;
 #endif
-- 
2.41.0


WARNING: multiple messages have this Message-ID (diff)
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	dri-devel@lists.freedesktop.org, Daniel Vetter <daniel@ffwll.ch>
Subject: [Intel-xe] [PATCH 2/4] drm/xe/vm: Implement userptr page pinning
Date: Fri, 18 Aug 2023 17:08:43 +0200	[thread overview]
Message-ID: <20230818150845.96679-3-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20230818150845.96679-1-thomas.hellstrom@linux.intel.com>

Implement pinning of userptrs between VM_BIND and VM_UNBIND, which will
facilitate avoiding long hangs on non-preemptible workloads. But don't
hook it up to userspace just yet.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c       | 76 ++++++++++++++++++++++----------
 drivers/gpu/drm/xe/xe_vm.h       |  9 ++++
 drivers/gpu/drm/xe/xe_vm_types.h | 12 +++++
 3 files changed, 74 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 8bf7f62e6548..ecbcad696b60 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -74,10 +74,6 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	if (notifier_seq == vma->userptr.notifier_seq)
 		return 0;
 
-	pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
-	if (!pages)
-		return -ENOMEM;
-
 	if (vma->userptr.sg) {
 		dma_unmap_sgtable(xe->drm.dev,
 				  vma->userptr.sg,
@@ -87,6 +83,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 		vma->userptr.sg = NULL;
 	}
 
+	if (vma->userptr.pinned_pages) {
+		unpin_user_pages_dirty_lock(vma->userptr.pinned_pages,
+					    vma->userptr.num_pinned,
+					    !read_only);
+		pages = vma->userptr.pinned_pages;
+	} else {
+		pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
+		if (!pages)
+			return -ENOMEM;
+	}
+
 	pinned = ret = 0;
 	if (in_kthread) {
 		if (!mmget_not_zero(vma->userptr.notifier.mm)) {
@@ -97,11 +104,18 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	}
 
 	while (pinned < num_pages) {
-		ret = get_user_pages_fast(xe_vma_userptr(vma) +
-					  pinned * PAGE_SIZE,
-					  num_pages - pinned,
-					  read_only ? 0 : FOLL_WRITE,
-					  &pages[pinned]);
+		if (xe_vma_is_pinned(vma))
+			ret = pin_user_pages_fast(xe_vma_userptr(vma) +
+						  pinned * PAGE_SIZE,
+						  num_pages - pinned,
+						  read_only ? 0 : FOLL_WRITE,
+						  &pages[pinned]);
+		else
+			ret = get_user_pages_fast(xe_vma_userptr(vma) +
+						  pinned * PAGE_SIZE,
+						  num_pages - pinned,
+						  read_only ? 0 : FOLL_WRITE,
+						  &pages[pinned]);
 		if (ret < 0) {
 			if (in_kthread)
 				ret = 0;
@@ -137,19 +151,24 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	if (ret)
 		goto out_free_sg;
 
-	for (i = 0; i < pinned; ++i) {
-		if (!read_only) {
-			lock_page(pages[i]);
-			set_page_dirty(pages[i]);
-			unlock_page(pages[i]);
+	if (!xe_vma_is_pinned(vma)) {
+		for (i = 0; i < pinned; ++i) {
+			if (!read_only) {
+				lock_page(pages[i]);
+				set_page_dirty(pages[i]);
+				unlock_page(pages[i]);
+			}
+
+			mark_page_accessed(pages[i]);
 		}
 
-		mark_page_accessed(pages[i]);
+		release_pages(pages, pinned);
+		kvfree(pages);
+	} else {
+		vma->userptr.pinned_pages = pages;
+		vma->userptr.num_pinned = pinned;
 	}
 
-	release_pages(pages, pinned);
-	kvfree(pages);
-
 	vma->userptr.notifier_seq = notifier_seq;
 	if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
 		goto retry;
@@ -160,9 +179,14 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 	sg_free_table(vma->userptr.sg);
 	vma->userptr.sg = NULL;
 out_release_pages:
-	release_pages(pages, pinned);
+	if (!xe_vma_is_pinned(vma))
+		release_pages(pages, pinned);
+	else
+		unpin_user_pages(pages, pinned);
+	vma->userptr.num_pinned = 0;
 mm_closed:
 	kvfree(pages);
+	vma->userptr.pinned_pages = NULL;
 	return ret;
 }
 
@@ -721,7 +745,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
 	mmu_interval_set_seq(mni, cur_seq);
 
 	/* No need to stop gpu access if the userptr is not yet bound. */
-	if (!vma->userptr.initial_bind) {
+	if (xe_vma_is_pinned(vma) || !vma->userptr.initial_bind) {
 		up_write(&vm->userptr.notifier_lock);
 		return true;
 	}
@@ -976,10 +1000,16 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
 			vma->userptr.sg = NULL;
 		}
 
+		if (vma->userptr.pinned_pages) {
+			unpin_user_pages_dirty_lock(vma->userptr.pinned_pages,
+						    vma->userptr.num_pinned,
+						    !read_only);
+			kvfree(vma->userptr.pinned_pages);
+		}
+
 		/*
-		 * Since userptr pages are not pinned, we can't remove
-		 * the notifer until we're sure the GPU is not accessing
-		 * them anymore
+		 * We can't remove the notifer until we're sure the GPU is
+		 * not accessing the pages anymore
 		 */
 		mmu_interval_notifier_remove(&vma->userptr.notifier);
 		xe_vm_put(vm);
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 6de6e3edb24a..913544d7d995 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -139,6 +139,15 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
 	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
 }
 
+/**
+ * xe_vma_is_pinned() - User has requested the backing store of this vma
+ * to be pinned.
+ */
+static inline bool xe_vma_is_pinned(struct xe_vma *vma)
+{
+	return xe_vma_is_userptr(vma) && (vma->gpuva.flags & XE_VMA_PINNED);
+}
+
 #define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv)
 
 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 3681a5ff588b..9b90e649cd69 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -33,6 +33,8 @@ struct xe_vm;
 #define XE_VMA_PTE_4K		(DRM_GPUVA_USERBITS << 5)
 #define XE_VMA_PTE_2M		(DRM_GPUVA_USERBITS << 6)
 #define XE_VMA_PTE_1G		(DRM_GPUVA_USERBITS << 7)
+/* User requested backing store to be pinned */
+#define XE_VMA_PINNED           (DRM_GPUVA_USERBITS << 8)
 
 /** struct xe_userptr - User pointer */
 struct xe_userptr {
@@ -54,6 +56,16 @@ struct xe_userptr {
 	 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
 	 */
 	bool initial_bind;
+	/**
+	 * @pinned_pages: List of pinned pages if xe_vma_pinned(),
+	 * NULL otherwise. protected by the vm lock.
+	 */
+	struct page **pinned_pages;
+	/**
+	 * @num_pinned: Number of pointers to pinned pages in @pinned_pages.
+	 * protected by the vm lock.
+	 */
+	unsigned long num_pinned;
 #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
 	u32 divisor;
 #endif
-- 
2.41.0


  parent reply	other threads:[~2023-08-18 15:10 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-18 15:08 [PATCH 0/4] drm/xe: Support optional pinning of userptr pages Thomas Hellström
2023-08-18 15:08 ` [Intel-xe] " Thomas Hellström
2023-08-18 15:08 ` [PATCH 1/4] drm/xe/vm: Use onion unwind for xe_vma_userptr_pin_pages() Thomas Hellström
2023-08-18 15:08   ` [Intel-xe] " Thomas Hellström
2023-08-18 18:15   ` Matthew Brost
2023-08-18 18:15     ` [Intel-xe] " Matthew Brost
2023-08-18 15:08 ` Thomas Hellström [this message]
2023-08-18 15:08   ` [Intel-xe] [PATCH 2/4] drm/xe/vm: Implement userptr page pinning Thomas Hellström
2023-08-20  4:06   ` Matthew Brost
2023-08-20  4:06     ` [Intel-xe] " Matthew Brost
2023-08-22  8:23     ` Thomas Hellström
2023-08-22  8:23       ` [Intel-xe] " Thomas Hellström
2023-08-18 15:08 ` [PATCH 3/4] drm/xe/vm: Perform accounting of userptr pinned pages Thomas Hellström
2023-08-18 15:08   ` [Intel-xe] " Thomas Hellström
2023-08-20  3:43   ` Matthew Brost
2023-08-20  3:43     ` [Intel-xe] " Matthew Brost
2023-08-22  8:10     ` Thomas Hellström
2023-08-22  8:10       ` [Intel-xe] " Thomas Hellström
2023-08-18 15:08 ` [PATCH 4/4] drm/xe/uapi: Support pinning of userptr vmas Thomas Hellström
2023-08-18 15:08   ` [Intel-xe] " Thomas Hellström
2023-08-20  3:54   ` Matthew Brost
2023-08-20  3:54     ` [Intel-xe] " Matthew Brost
2023-08-22  8:25     ` Thomas Hellström
2023-08-22  8:25       ` [Intel-xe] " Thomas Hellström
2023-08-18 15:12 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: Support optional pinning of userptr pages Patchwork
2023-08-18 15:12 ` [Intel-xe] ✗ CI.checkpatch: warning " Patchwork
2023-08-18 15:14 ` [Intel-xe] ✓ CI.KUnit: success " Patchwork
2023-08-18 15:17 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-08-18 15:18 ` [Intel-xe] ✗ CI.Hooks: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230818150845.96679-3-thomas.hellstrom@linux.intel.com \
    --to=thomas.hellstrom@linux.intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.