All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: "Matthew Brost" <matthew.brost@intel.com>,
	"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	dri-devel@lists.freedesktop.org
Subject: [PATCH v2 3/4] drm/xe/vm: Perform accounting of userptr pinned pages
Date: Tue, 22 Aug 2023 18:21:35 +0200	[thread overview]
Message-ID: <20230822162136.25895-4-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20230822162136.25895-1-thomas.hellstrom@linux.intel.com>

Account these pages against RLIMIT_MEMLOCK following how RDMA does this
with CAP_IPC_LOCK bypassing the limit.

v2:
- Change the naming of the accounting functions and WARN if we try
  to account anything but userptr pages. (Matthew Brost)

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c | 52 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 50 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 037ac42f74a5..a645cfa131ca 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -34,6 +34,41 @@
 
 #define TEST_VM_ASYNC_OPS_ERROR
 
+/*
+ * Perform userptr PIN accounting against RLIMIT_MEMLOCK for now, similarly
+ * to how RDMA does this.
+ */
+static int
+xe_vma_userptr_mlock_reserve(struct xe_vma *vma, unsigned long num_pages)
+{
+	unsigned long lock_limit, new_pinned;
+	struct mm_struct *mm = vma->userptr.notifier.mm;
+
+	/* TODO: Convert to xe_assert() */
+	XE_WARN_ON(!xe_vma_is_userptr(vma));
+
+	if (!can_do_mlock())
+		return -EPERM;
+
+	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+	new_pinned = atomic64_add_return(num_pages, &mm->pinned_vm);
+	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+		atomic64_sub(num_pages, &mm->pinned_vm);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void
+xe_vma_userptr_mlock_release(struct xe_vma *vma, unsigned long num_pages)
+{
+	/* TODO: Convert to xe_assert() */
+	XE_WARN_ON(!xe_vma_is_userptr(vma));
+
+	atomic64_sub(num_pages, &vma->userptr.notifier.mm->pinned_vm);
+}
+
 /**
  * xe_vma_userptr_check_repin() - Advisory check for repin needed
  * @vma: The userptr vma
@@ -90,9 +125,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 					    !read_only);
 		pages = vma->userptr.pinned_pages;
 	} else {
+		if (xe_vma_is_pinned(vma)) {
+			ret = xe_vma_userptr_mlock_reserve(vma, num_pages);
+			if (ret)
+				return ret;
+		}
+
 		pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
-		if (!pages)
-			return -ENOMEM;
+		if (!pages) {
+			ret = -ENOMEM;
+			goto out_account;
+		}
 	}
 
 	pinned = ret = 0;
@@ -188,6 +231,9 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 mm_closed:
 	kvfree(pages);
 	vma->userptr.pinned_pages = NULL;
+out_account:
+	if (xe_vma_is_pinned(vma))
+		xe_vma_userptr_mlock_release(vma, num_pages);
 	return ret;
 }
 
@@ -1010,6 +1056,8 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
 			unpin_user_pages_dirty_lock(vma->userptr.pinned_pages,
 						    vma->userptr.num_pinned,
 						    !read_only);
+			xe_vma_userptr_mlock_release(vma, xe_vma_size(vma) >>
+						     PAGE_SHIFT);
 			kvfree(vma->userptr.pinned_pages);
 		}
 
-- 
2.41.0


WARNING: multiple messages have this Message-ID (diff)
From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	dri-devel@lists.freedesktop.org, Daniel Vetter <daniel@ffwll.ch>
Subject: [Intel-xe] [PATCH v2 3/4] drm/xe/vm: Perform accounting of userptr pinned pages
Date: Tue, 22 Aug 2023 18:21:35 +0200	[thread overview]
Message-ID: <20230822162136.25895-4-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20230822162136.25895-1-thomas.hellstrom@linux.intel.com>

Account these pages against RLIMIT_MEMLOCK following how RDMA does this
with CAP_IPC_LOCK bypassing the limit.

v2:
- Change the naming of the accounting functions and WARN if we try
  to account anything but userptr pages. (Matthew Brost)

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_vm.c | 52 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 50 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 037ac42f74a5..a645cfa131ca 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -34,6 +34,41 @@
 
 #define TEST_VM_ASYNC_OPS_ERROR
 
+/*
+ * Perform userptr PIN accounting against RLIMIT_MEMLOCK for now, similarly
+ * to how RDMA does this.
+ */
+static int
+xe_vma_userptr_mlock_reserve(struct xe_vma *vma, unsigned long num_pages)
+{
+	unsigned long lock_limit, new_pinned;
+	struct mm_struct *mm = vma->userptr.notifier.mm;
+
+	/* TODO: Convert to xe_assert() */
+	XE_WARN_ON(!xe_vma_is_userptr(vma));
+
+	if (!can_do_mlock())
+		return -EPERM;
+
+	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+	new_pinned = atomic64_add_return(num_pages, &mm->pinned_vm);
+	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+		atomic64_sub(num_pages, &mm->pinned_vm);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void
+xe_vma_userptr_mlock_release(struct xe_vma *vma, unsigned long num_pages)
+{
+	/* TODO: Convert to xe_assert() */
+	XE_WARN_ON(!xe_vma_is_userptr(vma));
+
+	atomic64_sub(num_pages, &vma->userptr.notifier.mm->pinned_vm);
+}
+
 /**
  * xe_vma_userptr_check_repin() - Advisory check for repin needed
  * @vma: The userptr vma
@@ -90,9 +125,17 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 					    !read_only);
 		pages = vma->userptr.pinned_pages;
 	} else {
+		if (xe_vma_is_pinned(vma)) {
+			ret = xe_vma_userptr_mlock_reserve(vma, num_pages);
+			if (ret)
+				return ret;
+		}
+
 		pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
-		if (!pages)
-			return -ENOMEM;
+		if (!pages) {
+			ret = -ENOMEM;
+			goto out_account;
+		}
 	}
 
 	pinned = ret = 0;
@@ -188,6 +231,9 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
 mm_closed:
 	kvfree(pages);
 	vma->userptr.pinned_pages = NULL;
+out_account:
+	if (xe_vma_is_pinned(vma))
+		xe_vma_userptr_mlock_release(vma, num_pages);
 	return ret;
 }
 
@@ -1010,6 +1056,8 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
 			unpin_user_pages_dirty_lock(vma->userptr.pinned_pages,
 						    vma->userptr.num_pinned,
 						    !read_only);
+			xe_vma_userptr_mlock_release(vma, xe_vma_size(vma) >>
+						     PAGE_SHIFT);
 			kvfree(vma->userptr.pinned_pages);
 		}
 
-- 
2.41.0


  parent reply	other threads:[~2023-08-22 16:22 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-22 16:21 [PATCH v2 0/4] drm/xe: Support optional pinning of userptr pages Thomas Hellström
2023-08-22 16:21 ` [Intel-xe] " Thomas Hellström
2023-08-22 16:21 ` [PATCH v2 1/4] drm/xe/vm: Use onion unwind for xe_vma_userptr_pin_pages() Thomas Hellström
2023-08-22 16:21   ` [Intel-xe] " Thomas Hellström
2023-08-22 16:21 ` [PATCH v2 2/4] drm/xe/vm: Implement userptr page pinning Thomas Hellström
2023-08-22 16:21   ` [Intel-xe] " Thomas Hellström
2023-08-22 23:58   ` Matthew Brost
2023-08-22 23:58     ` Matthew Brost
2023-08-22 16:21 ` Thomas Hellström [this message]
2023-08-22 16:21   ` [Intel-xe] [PATCH v2 3/4] drm/xe/vm: Perform accounting of userptr pinned pages Thomas Hellström
2023-08-22 16:21 ` [PATCH v2 4/4] drm/xe/uapi: Support pinning of userptr vmas Thomas Hellström
2023-08-22 16:21   ` [Intel-xe] " Thomas Hellström
2023-08-22 16:24 ` [Intel-xe] ✓ CI.Patch_applied: success for drm/xe: Support optional pinning of userptr pages (rev2) Patchwork
2023-08-22 16:24 ` [Intel-xe] ✗ CI.checkpatch: warning " Patchwork
2023-08-22 16:25 ` [Intel-xe] ✓ CI.KUnit: success " Patchwork
2023-08-22 16:29 ` [Intel-xe] ✓ CI.Build: " Patchwork
2023-08-22 16:30 ` [Intel-xe] ✗ CI.Hooks: failure " Patchwork
2023-09-08  8:44 ` [PATCH v2 0/4] drm/xe: Support optional pinning of userptr pages Joonas Lahtinen
2023-09-08  8:44   ` [Intel-xe] " Joonas Lahtinen
2023-09-15 18:31   ` Thomas Hellström
2023-09-15 18:31     ` [Intel-xe] " Thomas Hellström

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230822162136.25895-4-thomas.hellstrom@linux.intel.com \
    --to=thomas.hellstrom@linux.intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=matthew.brost@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.