All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH] drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list
Date: Fri, 17 Jan 2020 22:00:38 +0000	[thread overview]
Message-ID: <20200117220038.3409820-1-chris@chris-wilson.co.uk> (raw)

Currently we create a new mmap_offset for every call to
mmap_offset_ioctl. This exposes ourselves to an abusive client that may
simply create new mmap_offsets ad infinitum, which will exhaust physical
memory and the virtual address space. In addition to the exhaustion, a
very long linear list of mmap_offsets causes other clients using the
object to incur long list walks -- these long lists can also be
generated by simply having many clients generate their own mmap_offset.

Switching to an rbtree store for obj->mmo.offsets allows us to use a
binary search for duplicated mmap_offsets (preventing the exhaustion
from a trivial malicious client), and also quickly search for matching
mmap_offsets on object close.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 93 +++++++++++++++++--
 drivers/gpu/drm/i915/gem/i915_gem_object.c    | 46 +++++++--
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  4 +-
 3 files changed, 124 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b9fdac2f9003..2908a205faa9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -455,10 +455,11 @@ static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 
 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct i915_mmap_offset *mmo;
+	struct i915_mmap_offset *mmo, *mn;
 
 	spin_lock(&obj->mmo.lock);
-	list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+	rbtree_postorder_for_each_entry_safe(mmo, mn,
+					     &obj->mmo.offsets, offset) {
 		/*
 		 * vma_node_unmap for GTT mmaps handled already in
 		 * __i915_gem_object_release_mmap_gtt
@@ -487,6 +488,84 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 	i915_gem_object_release_mmap_offset(obj);
 }
 
+static struct i915_mmap_offset *
+lookup_mmo(struct drm_i915_gem_object *obj,
+	   enum i915_mmap_type mmap_type,
+	   struct drm_file *file)
+{
+	struct i915_mmap_offset *match = NULL;
+	struct rb_node *rb;
+
+	spin_lock(&obj->mmo.lock);
+	rb = obj->mmo.offsets.rb_node;
+	while (rb) {
+		struct i915_mmap_offset *mmo =
+			rb_entry(rb, typeof(*mmo), offset);
+
+		if (mmo->file < file) {
+			rb = rb->rb_right;
+			continue;
+		}
+
+		if (mmo->file > file) {
+			rb = rb->rb_left;
+			continue;
+		}
+
+		if (mmo->mmap_type < mmap_type) {
+			rb = rb->rb_right;
+			continue;
+		}
+
+		if (mmo->mmap_type > mmap_type) {
+			rb = rb->rb_left;
+			continue;
+		}
+
+		match = mmo;
+		break;
+	}
+	spin_unlock(&obj->mmo.lock);
+
+	return match;
+}
+
+static struct i915_mmap_offset *
+insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
+{
+	struct rb_node *rb, **p;
+
+	spin_lock(&obj->mmo.lock);
+	rb = NULL;
+	p = &obj->mmo.offsets.rb_node;
+	while (*p) {
+		struct i915_mmap_offset *pos;
+
+		rb = *p;
+		pos = rb_entry(rb, typeof(*pos), offset);
+
+		if (pos->file < mmo->file) {
+			p = &rb->rb_right;
+			continue;
+		}
+
+		if (pos->file > mmo->file) {
+			p = &rb->rb_left;
+			continue;
+		}
+
+		if (pos->mmap_type < mmo->mmap_type)
+			p = &rb->rb_right;
+		else
+			p = &rb->rb_left;
+	}
+	rb_link_node(&mmo->offset, rb, p);
+	rb_insert_color(&mmo->offset, &obj->mmo.offsets);
+	spin_unlock(&obj->mmo.lock);
+
+	return mmo;
+}
+
 static struct i915_mmap_offset *
 mmap_offset_attach(struct drm_i915_gem_object *obj,
 		   enum i915_mmap_type mmap_type,
@@ -496,6 +575,10 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
 	struct i915_mmap_offset *mmo;
 	int err;
 
+	mmo = lookup_mmo(obj, mmap_type, file);
+	if (mmo)
+		return mmo;
+
 	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
 	if (!mmo)
 		return ERR_PTR(-ENOMEM);
@@ -526,11 +609,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
 	if (file)
 		drm_vma_node_allow(&mmo->vma_node, file);
 
-	spin_lock(&obj->mmo.lock);
-	list_add(&mmo->offset, &obj->mmo.offsets);
-	spin_unlock(&obj->mmo.lock);
-
-	return mmo;
+	return insert_mmo(obj, mmo);
 
 err:
 	kfree(mmo);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 46bacc82ddc4..3cc9f83f0bae 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -63,7 +63,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&obj->lut_list);
 
 	spin_lock_init(&obj->mmo.lock);
-	INIT_LIST_HEAD(&obj->mmo.offsets);
+	obj->mmo.offsets = RB_ROOT;
 
 	init_rcu_head(&obj->rcu);
 
@@ -96,6 +96,37 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 		!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
 }
 
+static struct i915_mmap_offset *
+first_mmo(struct drm_i915_gem_object *obj, struct drm_file *file)
+{
+	struct i915_mmap_offset *first = NULL;
+	struct rb_node *pos;
+
+	pos = obj->mmo.offsets.rb_node;
+	while (pos) {
+		struct i915_mmap_offset *mmo =
+			rb_entry(pos, typeof(*mmo), offset);
+
+		if (mmo->file < file) {
+			pos = pos->rb_right;
+			continue;
+		}
+
+		pos = pos->rb_left;
+		if (mmo->file == file)
+			first = mmo;
+	}
+
+	return first;
+}
+
+static struct i915_mmap_offset *
+next_mmo(struct i915_mmap_offset *pos, struct drm_file *file)
+{
+	pos = rb_entry_safe(rb_next(&pos->offset), typeof(*pos), offset);
+	return pos && pos->file == file ? pos : NULL;
+}
+
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem);
@@ -117,14 +148,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	i915_gem_object_unlock(obj);
 
 	spin_lock(&obj->mmo.lock);
-	list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
-		if (mmo->file != file)
-			continue;
-
-		spin_unlock(&obj->mmo.lock);
+	for (mmo = first_mmo(obj, file); mmo; mmo = next_mmo(mmo, file))
 		drm_vma_node_revoke(&mmo->vma_node, file);
-		spin_lock(&obj->mmo.lock);
-	}
 	spin_unlock(&obj->mmo.lock);
 
 	list_for_each_entry_safe(lut, ln, &close, obj_link) {
@@ -203,12 +228,13 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 		i915_gem_object_release_mmap(obj);
 
-		list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+		rbtree_postorder_for_each_entry_safe(mmo, mn,
+						     &obj->mmo.offsets,
+						     offset) {
 			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
 					      &mmo->vma_node);
 			kfree(mmo);
 		}
-		INIT_LIST_HEAD(&obj->mmo.offsets);
 
 		GEM_BUG_ON(atomic_read(&obj->bind_count));
 		GEM_BUG_ON(obj->userfault_count);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 88e268633fdc..124fd2fb1bec 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -77,7 +77,7 @@ struct i915_mmap_offset {
 	struct drm_file *file;
 	enum i915_mmap_type mmap_type;
 
-	struct list_head offset;
+	struct rb_node offset;
 };
 
 struct drm_i915_gem_object {
@@ -137,7 +137,7 @@ struct drm_i915_gem_object {
 
 	struct {
 		spinlock_t lock; /* Protects access to mmo offsets */
-		struct list_head offsets;
+		struct rb_root offsets;
 	} mmo;
 
 	I915_SELFTEST_DECLARE(struct list_head st_link);
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

             reply	other threads:[~2020-01-17 22:01 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-17 22:00 Chris Wilson [this message]
2020-01-17 22:06 ` [Intel-gfx] [PATCH] drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list Chris Wilson
2020-01-17 22:29 ` [Intel-gfx] [PATCH v2] " Chris Wilson
2020-01-18  2:38 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list (rev2) Patchwork
2020-01-18  3:20 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-01-18  3:20 ` [Intel-gfx] ✗ Fi.CI.BUILD: warning " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200117220038.3409820-1-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.