All of lore.kernel.org
 help / color / mirror / Atom feed
From: Robert Beckett <bob.beckett@collabora.com>
To: intel-gfx@lists.freedesktop.org,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
	David Airlie <airlied@linux.ie>, Daniel Vetter <daniel@ffwll.ch>
Cc: Robert Beckett <bob.beckett@collabora.com>,
	linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org
Subject: [RFC PATCH 4/7] drm/i915: stolen memory use ttm backend
Date: Tue, 15 Mar 2022 18:04:41 +0000	[thread overview]
Message-ID: <20220315180444.3327283-5-bob.beckett@collabora.com> (raw)
In-Reply-To: <20220315180444.3327283-1-bob.beckett@collabora.com>

Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 385 ++-------------------
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h |   9 -
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c    |  14 +-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.h    |   7 +
 4 files changed, 40 insertions(+), 375 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 265133cb2a47..e58f9902ef47 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -4,19 +4,22 @@
  * Copyright © 2008-2012 Intel Corporation
  */
 
+#include "drm/ttm/ttm_placement.h"
+#include "gem/i915_gem_object_types.h"
 #include <linux/errno.h>
 #include <linux/mutex.h>
 
-#include <drm/drm_mm.h>
 #include <drm/i915_drm.h>
 
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
 #include "i915_reg.h"
 #include "i915_vgpu.h"
 #include "intel_mchbar_regs.h"
+#include "intel_region_ttm.h"
 
 /*
  * The BIOS typically reserves some of the system's memory for the exclusive
@@ -30,46 +33,6 @@
  * for is a boon.
  */
 
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
-					 struct drm_mm_node *node, u64 size,
-					 unsigned alignment, u64 start, u64 end)
-{
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return -ENODEV;
-
-	/* WaSkipStolenMemoryFirstPage:bdw+ */
-	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
-		start = 4096;
-
-	mutex_lock(&i915->mm.stolen_lock);
-	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
-					  size, alignment, 0,
-					  start, end, DRM_MM_INSERT_BEST);
-	mutex_unlock(&i915->mm.stolen_lock);
-
-	return ret;
-}
-
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
-				struct drm_mm_node *node, u64 size,
-				unsigned alignment)
-{
-	return i915_gem_stolen_insert_node_in_range(i915, node,
-						    size, alignment,
-						    I915_GEM_STOLEN_BIAS,
-						    U64_MAX);
-}
-
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
-				 struct drm_mm_node *node)
-{
-	mutex_lock(&i915->mm.stolen_lock);
-	drm_mm_remove_node(node);
-	mutex_unlock(&i915->mm.stolen_lock);
-}
-
 static int i915_adjust_stolen(struct drm_i915_private *i915,
 			      struct resource *dsm)
 {
@@ -170,14 +133,6 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
 	return 0;
 }
 
-static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
-{
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return;
-
-	drm_mm_takedown(&i915->mm.stolen);
-}
-
 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
 				    struct intel_uncore *uncore,
 				    resource_size_t *base,
@@ -510,216 +465,15 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
 		return 0;
 
 	/* Basic memrange allocator for stolen space. */
-	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
-
-	return 0;
-}
-
-static void dbg_poison(struct i915_ggtt *ggtt,
-		       dma_addr_t addr, resource_size_t size,
-		       u8 x)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-	if (!drm_mm_node_allocated(&ggtt->error_capture))
-		return;
-
-	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
-		return; /* beware stop_machine() inversion */
-
-	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
-	mutex_lock(&ggtt->error_mutex);
-	while (size) {
-		void __iomem *s;
-
-		ggtt->vm.insert_page(&ggtt->vm, addr,
-				     ggtt->error_capture.start,
-				     I915_CACHE_NONE, 0);
-		mb();
-
-		s = io_mapping_map_wc(&ggtt->iomap,
-				      ggtt->error_capture.start,
-				      PAGE_SIZE);
-		memset_io(s, x, PAGE_SIZE);
-		io_mapping_unmap(s);
-
-		addr += PAGE_SIZE;
-		size -= PAGE_SIZE;
-	}
-	mb();
-	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
-	mutex_unlock(&ggtt->error_mutex);
-#endif
-}
-
-static struct sg_table *
-i915_pages_create_for_stolen(struct drm_device *dev,
-			     resource_size_t offset, resource_size_t size)
-{
-	struct drm_i915_private *i915 = to_i915(dev);
-	struct sg_table *st;
-	struct scatterlist *sg;
-
-	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
-
-	/* We hide that we have no struct page backing our stolen object
-	 * by wrapping the contiguous physical allocation with a fake
-	 * dma mapping in a single scatterlist.
-	 */
-
-	st = kmalloc(sizeof(*st), GFP_KERNEL);
-	if (st == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
-		kfree(st);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	sg = st->sgl;
-	sg->offset = 0;
-	sg->length = size;
-
-	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
-	sg_dma_len(sg) = size;
-
-	return st;
-}
-
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct sg_table *pages =
-		i915_pages_create_for_stolen(obj->base.dev,
-					     obj->stolen->start,
-					     obj->stolen->size);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
-
-	dbg_poison(to_gt(i915)->ggtt,
-		   sg_dma_address(pages->sgl),
-		   sg_dma_len(pages->sgl),
-		   POISON_INUSE);
-
-	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
-
-	return 0;
-}
-
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
-					     struct sg_table *pages)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	/* Should only be called from i915_gem_object_release_stolen() */
-
-	dbg_poison(to_gt(i915)->ggtt,
-		   sg_dma_address(pages->sgl),
-		   sg_dma_len(pages->sgl),
-		   POISON_FREE);
-
-	sg_free_table(pages);
-	kfree(pages);
-}
-
-static void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
-
-	GEM_BUG_ON(!stolen);
-	i915_gem_stolen_remove_node(i915, stolen);
-	kfree(stolen);
-
-	i915_gem_object_release_memory_region(obj);
-}
-
-static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
-	.name = "i915_gem_object_stolen",
-	.get_pages = i915_gem_object_get_pages_stolen,
-	.put_pages = i915_gem_object_put_pages_stolen,
-	.release = i915_gem_object_release_stolen,
-};
-
-static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
-					   struct drm_i915_gem_object *obj,
-					   struct drm_mm_node *stolen)
-{
-	static struct lock_class_key lock_class;
-	unsigned int cache_level;
-	unsigned int flags;
-	int err;
-
-	/*
-	 * Stolen objects are always physically contiguous since we just
-	 * allocate one big block underneath using the drm_mm range allocator.
-	 */
-	flags = I915_BO_ALLOC_CONTIGUOUS;
-
-	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
-	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
-
-	obj->stolen = stolen;
-	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
-	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
-	i915_gem_object_set_cache_coherency(obj, cache_level);
-
-	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
-		return -EBUSY;
-
-	i915_gem_object_init_memory_region(obj, mem);
-
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
-		i915_gem_object_release_memory_region(obj);
-	i915_gem_object_unlock(obj);
-
-	return err;
-}
-
-static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
-					struct drm_i915_gem_object *obj,
-					resource_size_t size,
-					resource_size_t page_size,
-					unsigned int flags)
-{
-	struct drm_i915_private *i915 = mem->i915;
-	struct drm_mm_node *stolen;
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return -ENODEV;
-
-	if (size == 0)
-		return -EINVAL;
-
-	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-	if (!stolen)
-		return -ENOMEM;
-
-	ret = i915_gem_stolen_insert_node(i915, stolen, size,
-					  mem->min_page_size);
-	if (ret)
-		goto err_free;
-
-	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
-	if (ret)
-		goto err_remove;
-
-	return 0;
-
-err_remove:
-	i915_gem_stolen_remove_node(i915, stolen);
-err_free:
-	kfree(stolen);
-	return ret;
+	return intel_region_ttm_init(mem);
 }
 
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_i915_private *i915,
 			      resource_size_t size)
 {
-	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
+	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0,
+					     I915_BO_ALLOC_CONTIGUOUS);
 }
 
 static int init_stolen_smem(struct intel_memory_region *mem)
@@ -731,16 +485,11 @@ static int init_stolen_smem(struct intel_memory_region *mem)
 	return i915_gem_init_stolen(mem);
 }
 
-static int release_stolen_smem(struct intel_memory_region *mem)
-{
-	i915_gem_cleanup_stolen(mem->i915);
-	return 0;
-}
-
 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
 	.init = init_stolen_smem,
-	.release = release_stolen_smem,
-	.init_object = _i915_gem_object_stolen_init,
+	.release = intel_region_ttm_fini,
+	.init_object = __i915_gem_ttm_object_init,
+	.init_object_in_place = i915_gem_ttm_object_init_in_place,
 };
 
 static int init_stolen_lmem(struct intel_memory_region *mem)
@@ -774,14 +523,14 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
 static int release_stolen_lmem(struct intel_memory_region *mem)
 {
 	io_mapping_fini(&mem->iomap);
-	i915_gem_cleanup_stolen(mem->i915);
-	return 0;
+	return intel_region_ttm_fini(mem);
 }
 
 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
 	.init = init_stolen_lmem,
 	.release = release_stolen_lmem,
-	.init_object = _i915_gem_object_stolen_init,
+	.init_object = __i915_gem_ttm_object_init,
+	.init_object_in_place = i915_gem_ttm_object_init_in_place,
 };
 
 struct intel_memory_region *
@@ -855,56 +604,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
 					       resource_size_t stolen_offset,
 					       resource_size_t size)
 {
-	struct intel_memory_region *mem = i915->mm.stolen_region;
-	struct drm_i915_gem_object *obj;
-	struct drm_mm_node *stolen;
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return ERR_PTR(-ENODEV);
-
-	drm_dbg(&i915->drm,
-		"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
-		&stolen_offset, &size);
-
-	/* KISS and expect everything to be page-aligned */
-	if (GEM_WARN_ON(size == 0) ||
-	    GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
-	    GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
-		return ERR_PTR(-EINVAL);
-
-	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-	if (!stolen)
-		return ERR_PTR(-ENOMEM);
-
-	stolen->start = stolen_offset;
-	stolen->size = size;
-	mutex_lock(&i915->mm.stolen_lock);
-	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
-	mutex_unlock(&i915->mm.stolen_lock);
-	if (ret)
-		goto err_free;
-
-	obj = i915_gem_object_alloc();
-	if (!obj) {
-		ret = -ENOMEM;
-		goto err_stolen;
-	}
-
-	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
-	if (ret)
-		goto err_object_free;
-
-	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
-	return obj;
-
-err_object_free:
-	i915_gem_object_free(obj);
-err_stolen:
-	i915_gem_stolen_remove_node(i915, stolen);
-err_free:
-	kfree(stolen);
-	return ERR_PTR(ret);
+	return i915_gem_object_create_region_in_place(i915->mm.stolen_region, size, 0,
+						      I915_BO_ALLOC_CONTIGUOUS,
+						      stolen_offset,
+						      stolen_offset + size);
 }
 
 struct drm_i915_gem_object *
@@ -913,61 +616,25 @@ i915_gem_object_create_stolen_in_range(struct drm_i915_private *i915,
 				       resource_size_t alignment,
 				       u64 start, u64 end)
 {
-	struct intel_memory_region *mem = i915->mm.stolen_region;
-	struct drm_i915_gem_object *obj;
-	struct drm_mm_node *stolen;
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return ERR_PTR(-ENODEV);
-
-	/* KISS and expect everything to be page-aligned */
-	if (GEM_WARN_ON(size == 0) ||
-	    GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
-	    GEM_WARN_ON(!IS_ALIGNED(alignment, mem->min_page_size)))
-		return ERR_PTR(-EINVAL);
-
-	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-	if (!stolen)
-		return ERR_PTR(-ENOMEM);
-
-	ret = i915_gem_stolen_insert_node_in_range(i915, stolen, size,
-						   alignment, start, end);
-	if (ret)
-		goto err_free;
-
-	obj = i915_gem_object_alloc();
-	if (!obj) {
-		ret = -ENOMEM;
-		goto err_stolen;
-	}
-
-	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
-	if (ret)
-		goto err_object_free;
-
-	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
-	return obj;
-
-err_object_free:
-	i915_gem_object_free(obj);
-err_stolen:
-	i915_gem_stolen_remove_node(i915, stolen);
-err_free:
-	kfree(stolen);
-	return ERR_PTR(ret);
+	return i915_gem_object_create_region_in_place(i915->mm.stolen_region, size, 0,
+						      I915_BO_ALLOC_CONTIGUOUS,
+						      start, end);
 }
 
 u64 i915_gem_object_stolen_offset(const struct drm_i915_gem_object *obj)
 {
+	struct ttm_buffer_object *ttm_obj;
 	if (!obj || !i915_gem_object_is_stolen(obj))
 		return 0;
 
-	return obj->stolen->start;
+	ttm_obj = i915_gem_to_ttm((struct drm_i915_gem_object *)obj);
+
+	return ttm_obj->resource->start;
 }
 
 
 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
 {
-	return obj->ops == &i915_gem_object_stolen_ops;
+	return obj->mm.region->type == INTEL_MEMORY_STOLEN_SYSTEM ||
+	       obj->mm.region->type == INTEL_MEMORY_STOLEN_LOCAL;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 494e90f130f4..921e51a5bbc4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -12,15 +12,6 @@ struct drm_i915_private;
 struct drm_mm_node;
 struct drm_i915_gem_object;
 
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
-				struct drm_mm_node *node, u64 size,
-				unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
-					 struct drm_mm_node *node, u64 size,
-					 unsigned alignment, u64 start,
-					 u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
-				 struct drm_mm_node *node);
 struct intel_memory_region *
 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
 			   u16 instance);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 35d1bde19267..b26bde6a4bb9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1232,13 +1232,13 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
  *
  * Return: 0 on success, negative error code on failure.
  */
-static int i915_gem_ttm_object_init_in_place(struct intel_memory_region *mem,
-					     struct drm_i915_gem_object *obj,
-					     resource_size_t size,
-					     resource_size_t page_size,
-					     unsigned int flags,
-					     u64 start,
-					     u64 end)
+int i915_gem_ttm_object_init_in_place(struct intel_memory_region *mem,
+				      struct drm_i915_gem_object *obj,
+				      resource_size_t size,
+				      resource_size_t page_size,
+				      unsigned int flags,
+				      u64 start,
+				      u64 end)
 {
 	struct ttm_place place;
 	struct ttm_placement placement = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 9d698ad00853..f8ff52d81072 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -48,6 +48,13 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 			       resource_size_t size,
 			       resource_size_t page_size,
 			       unsigned int flags);
+int i915_gem_ttm_object_init_in_place(struct intel_memory_region *mem,
+				      struct drm_i915_gem_object *obj,
+				      resource_size_t size,
+				      resource_size_t page_size,
+				      unsigned int flags,
+				      u64 start,
+				      u64 end);
 
 /* Internal I915 TTM declarations and definitions below. */
 
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Robert Beckett <bob.beckett@collabora.com>
To: intel-gfx@lists.freedesktop.org,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
	David Airlie <airlied@linux.ie>, Daniel Vetter <daniel@ffwll.ch>
Cc: Robert Beckett <bob.beckett@collabora.com>,
	dri-devel@lists.freedesktop.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 4/7] drm/i915: stolen memory use ttm backend
Date: Tue, 15 Mar 2022 18:04:41 +0000	[thread overview]
Message-ID: <20220315180444.3327283-5-bob.beckett@collabora.com> (raw)
In-Reply-To: <20220315180444.3327283-1-bob.beckett@collabora.com>

Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 385 ++-------------------
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h |   9 -
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c    |  14 +-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.h    |   7 +
 4 files changed, 40 insertions(+), 375 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 265133cb2a47..e58f9902ef47 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -4,19 +4,22 @@
  * Copyright © 2008-2012 Intel Corporation
  */
 
+#include "drm/ttm/ttm_placement.h"
+#include "gem/i915_gem_object_types.h"
 #include <linux/errno.h>
 #include <linux/mutex.h>
 
-#include <drm/drm_mm.h>
 #include <drm/i915_drm.h>
 
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
 #include "i915_reg.h"
 #include "i915_vgpu.h"
 #include "intel_mchbar_regs.h"
+#include "intel_region_ttm.h"
 
 /*
  * The BIOS typically reserves some of the system's memory for the exclusive
@@ -30,46 +33,6 @@
  * for is a boon.
  */
 
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
-					 struct drm_mm_node *node, u64 size,
-					 unsigned alignment, u64 start, u64 end)
-{
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return -ENODEV;
-
-	/* WaSkipStolenMemoryFirstPage:bdw+ */
-	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
-		start = 4096;
-
-	mutex_lock(&i915->mm.stolen_lock);
-	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
-					  size, alignment, 0,
-					  start, end, DRM_MM_INSERT_BEST);
-	mutex_unlock(&i915->mm.stolen_lock);
-
-	return ret;
-}
-
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
-				struct drm_mm_node *node, u64 size,
-				unsigned alignment)
-{
-	return i915_gem_stolen_insert_node_in_range(i915, node,
-						    size, alignment,
-						    I915_GEM_STOLEN_BIAS,
-						    U64_MAX);
-}
-
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
-				 struct drm_mm_node *node)
-{
-	mutex_lock(&i915->mm.stolen_lock);
-	drm_mm_remove_node(node);
-	mutex_unlock(&i915->mm.stolen_lock);
-}
-
 static int i915_adjust_stolen(struct drm_i915_private *i915,
 			      struct resource *dsm)
 {
@@ -170,14 +133,6 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
 	return 0;
 }
 
-static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
-{
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return;
-
-	drm_mm_takedown(&i915->mm.stolen);
-}
-
 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
 				    struct intel_uncore *uncore,
 				    resource_size_t *base,
@@ -510,216 +465,15 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
 		return 0;
 
 	/* Basic memrange allocator for stolen space. */
-	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
-
-	return 0;
-}
-
-static void dbg_poison(struct i915_ggtt *ggtt,
-		       dma_addr_t addr, resource_size_t size,
-		       u8 x)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-	if (!drm_mm_node_allocated(&ggtt->error_capture))
-		return;
-
-	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
-		return; /* beware stop_machine() inversion */
-
-	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
-	mutex_lock(&ggtt->error_mutex);
-	while (size) {
-		void __iomem *s;
-
-		ggtt->vm.insert_page(&ggtt->vm, addr,
-				     ggtt->error_capture.start,
-				     I915_CACHE_NONE, 0);
-		mb();
-
-		s = io_mapping_map_wc(&ggtt->iomap,
-				      ggtt->error_capture.start,
-				      PAGE_SIZE);
-		memset_io(s, x, PAGE_SIZE);
-		io_mapping_unmap(s);
-
-		addr += PAGE_SIZE;
-		size -= PAGE_SIZE;
-	}
-	mb();
-	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
-	mutex_unlock(&ggtt->error_mutex);
-#endif
-}
-
-static struct sg_table *
-i915_pages_create_for_stolen(struct drm_device *dev,
-			     resource_size_t offset, resource_size_t size)
-{
-	struct drm_i915_private *i915 = to_i915(dev);
-	struct sg_table *st;
-	struct scatterlist *sg;
-
-	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
-
-	/* We hide that we have no struct page backing our stolen object
-	 * by wrapping the contiguous physical allocation with a fake
-	 * dma mapping in a single scatterlist.
-	 */
-
-	st = kmalloc(sizeof(*st), GFP_KERNEL);
-	if (st == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
-		kfree(st);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	sg = st->sgl;
-	sg->offset = 0;
-	sg->length = size;
-
-	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
-	sg_dma_len(sg) = size;
-
-	return st;
-}
-
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct sg_table *pages =
-		i915_pages_create_for_stolen(obj->base.dev,
-					     obj->stolen->start,
-					     obj->stolen->size);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
-
-	dbg_poison(to_gt(i915)->ggtt,
-		   sg_dma_address(pages->sgl),
-		   sg_dma_len(pages->sgl),
-		   POISON_INUSE);
-
-	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
-
-	return 0;
-}
-
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
-					     struct sg_table *pages)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	/* Should only be called from i915_gem_object_release_stolen() */
-
-	dbg_poison(to_gt(i915)->ggtt,
-		   sg_dma_address(pages->sgl),
-		   sg_dma_len(pages->sgl),
-		   POISON_FREE);
-
-	sg_free_table(pages);
-	kfree(pages);
-}
-
-static void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
-
-	GEM_BUG_ON(!stolen);
-	i915_gem_stolen_remove_node(i915, stolen);
-	kfree(stolen);
-
-	i915_gem_object_release_memory_region(obj);
-}
-
-static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
-	.name = "i915_gem_object_stolen",
-	.get_pages = i915_gem_object_get_pages_stolen,
-	.put_pages = i915_gem_object_put_pages_stolen,
-	.release = i915_gem_object_release_stolen,
-};
-
-static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
-					   struct drm_i915_gem_object *obj,
-					   struct drm_mm_node *stolen)
-{
-	static struct lock_class_key lock_class;
-	unsigned int cache_level;
-	unsigned int flags;
-	int err;
-
-	/*
-	 * Stolen objects are always physically contiguous since we just
-	 * allocate one big block underneath using the drm_mm range allocator.
-	 */
-	flags = I915_BO_ALLOC_CONTIGUOUS;
-
-	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
-	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
-
-	obj->stolen = stolen;
-	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
-	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
-	i915_gem_object_set_cache_coherency(obj, cache_level);
-
-	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
-		return -EBUSY;
-
-	i915_gem_object_init_memory_region(obj, mem);
-
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
-		i915_gem_object_release_memory_region(obj);
-	i915_gem_object_unlock(obj);
-
-	return err;
-}
-
-static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
-					struct drm_i915_gem_object *obj,
-					resource_size_t size,
-					resource_size_t page_size,
-					unsigned int flags)
-{
-	struct drm_i915_private *i915 = mem->i915;
-	struct drm_mm_node *stolen;
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return -ENODEV;
-
-	if (size == 0)
-		return -EINVAL;
-
-	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-	if (!stolen)
-		return -ENOMEM;
-
-	ret = i915_gem_stolen_insert_node(i915, stolen, size,
-					  mem->min_page_size);
-	if (ret)
-		goto err_free;
-
-	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
-	if (ret)
-		goto err_remove;
-
-	return 0;
-
-err_remove:
-	i915_gem_stolen_remove_node(i915, stolen);
-err_free:
-	kfree(stolen);
-	return ret;
+	return intel_region_ttm_init(mem);
 }
 
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_i915_private *i915,
 			      resource_size_t size)
 {
-	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
+	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0,
+					     I915_BO_ALLOC_CONTIGUOUS);
 }
 
 static int init_stolen_smem(struct intel_memory_region *mem)
@@ -731,16 +485,11 @@ static int init_stolen_smem(struct intel_memory_region *mem)
 	return i915_gem_init_stolen(mem);
 }
 
-static int release_stolen_smem(struct intel_memory_region *mem)
-{
-	i915_gem_cleanup_stolen(mem->i915);
-	return 0;
-}
-
 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
 	.init = init_stolen_smem,
-	.release = release_stolen_smem,
-	.init_object = _i915_gem_object_stolen_init,
+	.release = intel_region_ttm_fini,
+	.init_object = __i915_gem_ttm_object_init,
+	.init_object_in_place = i915_gem_ttm_object_init_in_place,
 };
 
 static int init_stolen_lmem(struct intel_memory_region *mem)
@@ -774,14 +523,14 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
 static int release_stolen_lmem(struct intel_memory_region *mem)
 {
 	io_mapping_fini(&mem->iomap);
-	i915_gem_cleanup_stolen(mem->i915);
-	return 0;
+	return intel_region_ttm_fini(mem);
 }
 
 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
 	.init = init_stolen_lmem,
 	.release = release_stolen_lmem,
-	.init_object = _i915_gem_object_stolen_init,
+	.init_object = __i915_gem_ttm_object_init,
+	.init_object_in_place = i915_gem_ttm_object_init_in_place,
 };
 
 struct intel_memory_region *
@@ -855,56 +604,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
 					       resource_size_t stolen_offset,
 					       resource_size_t size)
 {
-	struct intel_memory_region *mem = i915->mm.stolen_region;
-	struct drm_i915_gem_object *obj;
-	struct drm_mm_node *stolen;
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return ERR_PTR(-ENODEV);
-
-	drm_dbg(&i915->drm,
-		"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
-		&stolen_offset, &size);
-
-	/* KISS and expect everything to be page-aligned */
-	if (GEM_WARN_ON(size == 0) ||
-	    GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
-	    GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
-		return ERR_PTR(-EINVAL);
-
-	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-	if (!stolen)
-		return ERR_PTR(-ENOMEM);
-
-	stolen->start = stolen_offset;
-	stolen->size = size;
-	mutex_lock(&i915->mm.stolen_lock);
-	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
-	mutex_unlock(&i915->mm.stolen_lock);
-	if (ret)
-		goto err_free;
-
-	obj = i915_gem_object_alloc();
-	if (!obj) {
-		ret = -ENOMEM;
-		goto err_stolen;
-	}
-
-	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
-	if (ret)
-		goto err_object_free;
-
-	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
-	return obj;
-
-err_object_free:
-	i915_gem_object_free(obj);
-err_stolen:
-	i915_gem_stolen_remove_node(i915, stolen);
-err_free:
-	kfree(stolen);
-	return ERR_PTR(ret);
+	return i915_gem_object_create_region_in_place(i915->mm.stolen_region, size, 0,
+						      I915_BO_ALLOC_CONTIGUOUS,
+						      stolen_offset,
+						      stolen_offset + size);
 }
 
 struct drm_i915_gem_object *
@@ -913,61 +616,25 @@ i915_gem_object_create_stolen_in_range(struct drm_i915_private *i915,
 				       resource_size_t alignment,
 				       u64 start, u64 end)
 {
-	struct intel_memory_region *mem = i915->mm.stolen_region;
-	struct drm_i915_gem_object *obj;
-	struct drm_mm_node *stolen;
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return ERR_PTR(-ENODEV);
-
-	/* KISS and expect everything to be page-aligned */
-	if (GEM_WARN_ON(size == 0) ||
-	    GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
-	    GEM_WARN_ON(!IS_ALIGNED(alignment, mem->min_page_size)))
-		return ERR_PTR(-EINVAL);
-
-	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-	if (!stolen)
-		return ERR_PTR(-ENOMEM);
-
-	ret = i915_gem_stolen_insert_node_in_range(i915, stolen, size,
-						   alignment, start, end);
-	if (ret)
-		goto err_free;
-
-	obj = i915_gem_object_alloc();
-	if (!obj) {
-		ret = -ENOMEM;
-		goto err_stolen;
-	}
-
-	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
-	if (ret)
-		goto err_object_free;
-
-	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
-	return obj;
-
-err_object_free:
-	i915_gem_object_free(obj);
-err_stolen:
-	i915_gem_stolen_remove_node(i915, stolen);
-err_free:
-	kfree(stolen);
-	return ERR_PTR(ret);
+	return i915_gem_object_create_region_in_place(i915->mm.stolen_region, size, 0,
+						      I915_BO_ALLOC_CONTIGUOUS,
+						      start, end);
 }
 
 u64 i915_gem_object_stolen_offset(const struct drm_i915_gem_object *obj)
 {
+	struct ttm_buffer_object *ttm_obj;
 	if (!obj || !i915_gem_object_is_stolen(obj))
 		return 0;
 
-	return obj->stolen->start;
+	ttm_obj = i915_gem_to_ttm((struct drm_i915_gem_object *)obj);
+
+	return ttm_obj->resource->start;
 }
 
 
 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
 {
-	return obj->ops == &i915_gem_object_stolen_ops;
+	return obj->mm.region->type == INTEL_MEMORY_STOLEN_SYSTEM ||
+	       obj->mm.region->type == INTEL_MEMORY_STOLEN_LOCAL;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 494e90f130f4..921e51a5bbc4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -12,15 +12,6 @@ struct drm_i915_private;
 struct drm_mm_node;
 struct drm_i915_gem_object;
 
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
-				struct drm_mm_node *node, u64 size,
-				unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
-					 struct drm_mm_node *node, u64 size,
-					 unsigned alignment, u64 start,
-					 u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
-				 struct drm_mm_node *node);
 struct intel_memory_region *
 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
 			   u16 instance);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 35d1bde19267..b26bde6a4bb9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1232,13 +1232,13 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
  *
  * Return: 0 on success, negative error code on failure.
  */
-static int i915_gem_ttm_object_init_in_place(struct intel_memory_region *mem,
-					     struct drm_i915_gem_object *obj,
-					     resource_size_t size,
-					     resource_size_t page_size,
-					     unsigned int flags,
-					     u64 start,
-					     u64 end)
+int i915_gem_ttm_object_init_in_place(struct intel_memory_region *mem,
+				      struct drm_i915_gem_object *obj,
+				      resource_size_t size,
+				      resource_size_t page_size,
+				      unsigned int flags,
+				      u64 start,
+				      u64 end)
 {
 	struct ttm_place place;
 	struct ttm_placement placement = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 9d698ad00853..f8ff52d81072 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -48,6 +48,13 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 			       resource_size_t size,
 			       resource_size_t page_size,
 			       unsigned int flags);
+int i915_gem_ttm_object_init_in_place(struct intel_memory_region *mem,
+				      struct drm_i915_gem_object *obj,
+				      resource_size_t size,
+				      resource_size_t page_size,
+				      unsigned int flags,
+				      u64 start,
+				      u64 end);
 
 /* Internal I915 TTM declarations and definitions below. */
 
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Robert Beckett <bob.beckett@collabora.com>
To: intel-gfx@lists.freedesktop.org,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	Rodrigo Vivi <rodrigo.vivi@intel.com>,
	Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
	David Airlie <airlied@linux.ie>, Daniel Vetter <daniel@ffwll.ch>
Cc: linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org
Subject: [Intel-gfx] [RFC PATCH 4/7] drm/i915: stolen memory use ttm backend
Date: Tue, 15 Mar 2022 18:04:41 +0000	[thread overview]
Message-ID: <20220315180444.3327283-5-bob.beckett@collabora.com> (raw)
In-Reply-To: <20220315180444.3327283-1-bob.beckett@collabora.com>

Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 385 ++-------------------
 drivers/gpu/drm/i915/gem/i915_gem_stolen.h |   9 -
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c    |  14 +-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.h    |   7 +
 4 files changed, 40 insertions(+), 375 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 265133cb2a47..e58f9902ef47 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -4,19 +4,22 @@
  * Copyright © 2008-2012 Intel Corporation
  */
 
+#include "drm/ttm/ttm_placement.h"
+#include "gem/i915_gem_object_types.h"
 #include <linux/errno.h>
 #include <linux/mutex.h>
 
-#include <drm/drm_mm.h>
 #include <drm/i915_drm.h>
 
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
 #include "i915_drv.h"
 #include "i915_gem_stolen.h"
 #include "i915_reg.h"
 #include "i915_vgpu.h"
 #include "intel_mchbar_regs.h"
+#include "intel_region_ttm.h"
 
 /*
  * The BIOS typically reserves some of the system's memory for the exclusive
@@ -30,46 +33,6 @@
  * for is a boon.
  */
 
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
-					 struct drm_mm_node *node, u64 size,
-					 unsigned alignment, u64 start, u64 end)
-{
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return -ENODEV;
-
-	/* WaSkipStolenMemoryFirstPage:bdw+ */
-	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
-		start = 4096;
-
-	mutex_lock(&i915->mm.stolen_lock);
-	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
-					  size, alignment, 0,
-					  start, end, DRM_MM_INSERT_BEST);
-	mutex_unlock(&i915->mm.stolen_lock);
-
-	return ret;
-}
-
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
-				struct drm_mm_node *node, u64 size,
-				unsigned alignment)
-{
-	return i915_gem_stolen_insert_node_in_range(i915, node,
-						    size, alignment,
-						    I915_GEM_STOLEN_BIAS,
-						    U64_MAX);
-}
-
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
-				 struct drm_mm_node *node)
-{
-	mutex_lock(&i915->mm.stolen_lock);
-	drm_mm_remove_node(node);
-	mutex_unlock(&i915->mm.stolen_lock);
-}
-
 static int i915_adjust_stolen(struct drm_i915_private *i915,
 			      struct resource *dsm)
 {
@@ -170,14 +133,6 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
 	return 0;
 }
 
-static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
-{
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return;
-
-	drm_mm_takedown(&i915->mm.stolen);
-}
-
 static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
 				    struct intel_uncore *uncore,
 				    resource_size_t *base,
@@ -510,216 +465,15 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
 		return 0;
 
 	/* Basic memrange allocator for stolen space. */
-	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
-
-	return 0;
-}
-
-static void dbg_poison(struct i915_ggtt *ggtt,
-		       dma_addr_t addr, resource_size_t size,
-		       u8 x)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-	if (!drm_mm_node_allocated(&ggtt->error_capture))
-		return;
-
-	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
-		return; /* beware stop_machine() inversion */
-
-	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
-
-	mutex_lock(&ggtt->error_mutex);
-	while (size) {
-		void __iomem *s;
-
-		ggtt->vm.insert_page(&ggtt->vm, addr,
-				     ggtt->error_capture.start,
-				     I915_CACHE_NONE, 0);
-		mb();
-
-		s = io_mapping_map_wc(&ggtt->iomap,
-				      ggtt->error_capture.start,
-				      PAGE_SIZE);
-		memset_io(s, x, PAGE_SIZE);
-		io_mapping_unmap(s);
-
-		addr += PAGE_SIZE;
-		size -= PAGE_SIZE;
-	}
-	mb();
-	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
-	mutex_unlock(&ggtt->error_mutex);
-#endif
-}
-
-static struct sg_table *
-i915_pages_create_for_stolen(struct drm_device *dev,
-			     resource_size_t offset, resource_size_t size)
-{
-	struct drm_i915_private *i915 = to_i915(dev);
-	struct sg_table *st;
-	struct scatterlist *sg;
-
-	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
-
-	/* We hide that we have no struct page backing our stolen object
-	 * by wrapping the contiguous physical allocation with a fake
-	 * dma mapping in a single scatterlist.
-	 */
-
-	st = kmalloc(sizeof(*st), GFP_KERNEL);
-	if (st == NULL)
-		return ERR_PTR(-ENOMEM);
-
-	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
-		kfree(st);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	sg = st->sgl;
-	sg->offset = 0;
-	sg->length = size;
-
-	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
-	sg_dma_len(sg) = size;
-
-	return st;
-}
-
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct sg_table *pages =
-		i915_pages_create_for_stolen(obj->base.dev,
-					     obj->stolen->start,
-					     obj->stolen->size);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
-
-	dbg_poison(to_gt(i915)->ggtt,
-		   sg_dma_address(pages->sgl),
-		   sg_dma_len(pages->sgl),
-		   POISON_INUSE);
-
-	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
-
-	return 0;
-}
-
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
-					     struct sg_table *pages)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	/* Should only be called from i915_gem_object_release_stolen() */
-
-	dbg_poison(to_gt(i915)->ggtt,
-		   sg_dma_address(pages->sgl),
-		   sg_dma_len(pages->sgl),
-		   POISON_FREE);
-
-	sg_free_table(pages);
-	kfree(pages);
-}
-
-static void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
-
-	GEM_BUG_ON(!stolen);
-	i915_gem_stolen_remove_node(i915, stolen);
-	kfree(stolen);
-
-	i915_gem_object_release_memory_region(obj);
-}
-
-static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
-	.name = "i915_gem_object_stolen",
-	.get_pages = i915_gem_object_get_pages_stolen,
-	.put_pages = i915_gem_object_put_pages_stolen,
-	.release = i915_gem_object_release_stolen,
-};
-
-static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
-					   struct drm_i915_gem_object *obj,
-					   struct drm_mm_node *stolen)
-{
-	static struct lock_class_key lock_class;
-	unsigned int cache_level;
-	unsigned int flags;
-	int err;
-
-	/*
-	 * Stolen objects are always physically contiguous since we just
-	 * allocate one big block underneath using the drm_mm range allocator.
-	 */
-	flags = I915_BO_ALLOC_CONTIGUOUS;
-
-	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
-	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
-
-	obj->stolen = stolen;
-	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
-	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
-	i915_gem_object_set_cache_coherency(obj, cache_level);
-
-	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
-		return -EBUSY;
-
-	i915_gem_object_init_memory_region(obj, mem);
-
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
-		i915_gem_object_release_memory_region(obj);
-	i915_gem_object_unlock(obj);
-
-	return err;
-}
-
-static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
-					struct drm_i915_gem_object *obj,
-					resource_size_t size,
-					resource_size_t page_size,
-					unsigned int flags)
-{
-	struct drm_i915_private *i915 = mem->i915;
-	struct drm_mm_node *stolen;
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return -ENODEV;
-
-	if (size == 0)
-		return -EINVAL;
-
-	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-	if (!stolen)
-		return -ENOMEM;
-
-	ret = i915_gem_stolen_insert_node(i915, stolen, size,
-					  mem->min_page_size);
-	if (ret)
-		goto err_free;
-
-	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
-	if (ret)
-		goto err_remove;
-
-	return 0;
-
-err_remove:
-	i915_gem_stolen_remove_node(i915, stolen);
-err_free:
-	kfree(stolen);
-	return ret;
+	return intel_region_ttm_init(mem);
 }
 
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_i915_private *i915,
 			      resource_size_t size)
 {
-	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
+	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0,
+					     I915_BO_ALLOC_CONTIGUOUS);
 }
 
 static int init_stolen_smem(struct intel_memory_region *mem)
@@ -731,16 +485,11 @@ static int init_stolen_smem(struct intel_memory_region *mem)
 	return i915_gem_init_stolen(mem);
 }
 
-static int release_stolen_smem(struct intel_memory_region *mem)
-{
-	i915_gem_cleanup_stolen(mem->i915);
-	return 0;
-}
-
 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
 	.init = init_stolen_smem,
-	.release = release_stolen_smem,
-	.init_object = _i915_gem_object_stolen_init,
+	.release = intel_region_ttm_fini,
+	.init_object = __i915_gem_ttm_object_init,
+	.init_object_in_place = i915_gem_ttm_object_init_in_place,
 };
 
 static int init_stolen_lmem(struct intel_memory_region *mem)
@@ -774,14 +523,14 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
 static int release_stolen_lmem(struct intel_memory_region *mem)
 {
 	io_mapping_fini(&mem->iomap);
-	i915_gem_cleanup_stolen(mem->i915);
-	return 0;
+	return intel_region_ttm_fini(mem);
 }
 
 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
 	.init = init_stolen_lmem,
 	.release = release_stolen_lmem,
-	.init_object = _i915_gem_object_stolen_init,
+	.init_object = __i915_gem_ttm_object_init,
+	.init_object_in_place = i915_gem_ttm_object_init_in_place,
 };
 
 struct intel_memory_region *
@@ -855,56 +604,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
 					       resource_size_t stolen_offset,
 					       resource_size_t size)
 {
-	struct intel_memory_region *mem = i915->mm.stolen_region;
-	struct drm_i915_gem_object *obj;
-	struct drm_mm_node *stolen;
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return ERR_PTR(-ENODEV);
-
-	drm_dbg(&i915->drm,
-		"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
-		&stolen_offset, &size);
-
-	/* KISS and expect everything to be page-aligned */
-	if (GEM_WARN_ON(size == 0) ||
-	    GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
-	    GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
-		return ERR_PTR(-EINVAL);
-
-	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-	if (!stolen)
-		return ERR_PTR(-ENOMEM);
-
-	stolen->start = stolen_offset;
-	stolen->size = size;
-	mutex_lock(&i915->mm.stolen_lock);
-	ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
-	mutex_unlock(&i915->mm.stolen_lock);
-	if (ret)
-		goto err_free;
-
-	obj = i915_gem_object_alloc();
-	if (!obj) {
-		ret = -ENOMEM;
-		goto err_stolen;
-	}
-
-	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
-	if (ret)
-		goto err_object_free;
-
-	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
-	return obj;
-
-err_object_free:
-	i915_gem_object_free(obj);
-err_stolen:
-	i915_gem_stolen_remove_node(i915, stolen);
-err_free:
-	kfree(stolen);
-	return ERR_PTR(ret);
+	return i915_gem_object_create_region_in_place(i915->mm.stolen_region, size, 0,
+						      I915_BO_ALLOC_CONTIGUOUS,
+						      stolen_offset,
+						      stolen_offset + size);
 }
 
 struct drm_i915_gem_object *
@@ -913,61 +616,25 @@ i915_gem_object_create_stolen_in_range(struct drm_i915_private *i915,
 				       resource_size_t alignment,
 				       u64 start, u64 end)
 {
-	struct intel_memory_region *mem = i915->mm.stolen_region;
-	struct drm_i915_gem_object *obj;
-	struct drm_mm_node *stolen;
-	int ret;
-
-	if (!drm_mm_initialized(&i915->mm.stolen))
-		return ERR_PTR(-ENODEV);
-
-	/* KISS and expect everything to be page-aligned */
-	if (GEM_WARN_ON(size == 0) ||
-	    GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
-	    GEM_WARN_ON(!IS_ALIGNED(alignment, mem->min_page_size)))
-		return ERR_PTR(-EINVAL);
-
-	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-	if (!stolen)
-		return ERR_PTR(-ENOMEM);
-
-	ret = i915_gem_stolen_insert_node_in_range(i915, stolen, size,
-						   alignment, start, end);
-	if (ret)
-		goto err_free;
-
-	obj = i915_gem_object_alloc();
-	if (!obj) {
-		ret = -ENOMEM;
-		goto err_stolen;
-	}
-
-	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
-	if (ret)
-		goto err_object_free;
-
-	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
-	return obj;
-
-err_object_free:
-	i915_gem_object_free(obj);
-err_stolen:
-	i915_gem_stolen_remove_node(i915, stolen);
-err_free:
-	kfree(stolen);
-	return ERR_PTR(ret);
+	return i915_gem_object_create_region_in_place(i915->mm.stolen_region, size, 0,
+						      I915_BO_ALLOC_CONTIGUOUS,
+						      start, end);
 }
 
 u64 i915_gem_object_stolen_offset(const struct drm_i915_gem_object *obj)
 {
+	struct ttm_buffer_object *ttm_obj;
 	if (!obj || !i915_gem_object_is_stolen(obj))
 		return 0;
 
-	return obj->stolen->start;
+	ttm_obj = i915_gem_to_ttm((struct drm_i915_gem_object *)obj);
+
+	return ttm_obj->resource->start;
 }
 
 
 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
 {
-	return obj->ops == &i915_gem_object_stolen_ops;
+	return obj->mm.region->type == INTEL_MEMORY_STOLEN_SYSTEM ||
+	       obj->mm.region->type == INTEL_MEMORY_STOLEN_LOCAL;
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 494e90f130f4..921e51a5bbc4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -12,15 +12,6 @@ struct drm_i915_private;
 struct drm_mm_node;
 struct drm_i915_gem_object;
 
-int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
-				struct drm_mm_node *node, u64 size,
-				unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
-					 struct drm_mm_node *node, u64 size,
-					 unsigned alignment, u64 start,
-					 u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
-				 struct drm_mm_node *node);
 struct intel_memory_region *
 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
 			   u16 instance);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 35d1bde19267..b26bde6a4bb9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1232,13 +1232,13 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
  *
  * Return: 0 on success, negative error code on failure.
  */
-static int i915_gem_ttm_object_init_in_place(struct intel_memory_region *mem,
-					     struct drm_i915_gem_object *obj,
-					     resource_size_t size,
-					     resource_size_t page_size,
-					     unsigned int flags,
-					     u64 start,
-					     u64 end)
+int i915_gem_ttm_object_init_in_place(struct intel_memory_region *mem,
+				      struct drm_i915_gem_object *obj,
+				      resource_size_t size,
+				      resource_size_t page_size,
+				      unsigned int flags,
+				      u64 start,
+				      u64 end)
 {
 	struct ttm_place place;
 	struct ttm_placement placement = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 9d698ad00853..f8ff52d81072 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -48,6 +48,13 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 			       resource_size_t size,
 			       resource_size_t page_size,
 			       unsigned int flags);
+int i915_gem_ttm_object_init_in_place(struct intel_memory_region *mem,
+				      struct drm_i915_gem_object *obj,
+				      resource_size_t size,
+				      resource_size_t page_size,
+				      unsigned int flags,
+				      u64 start,
+				      u64 end);
 
 /* Internal I915 TTM declarations and definitions below. */
 
-- 
2.25.1


  parent reply	other threads:[~2022-03-15 18:05 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-15 18:04 [Intel-gfx] [RFC PATCH 0/7] drm/i915: ttm for stolen Robert Beckett
2022-03-15 18:04 ` [RFC PATCH 1/7] drm/i915: instantiate ttm ranger manager for stolen memory Robert Beckett
2022-03-15 18:04   ` [Intel-gfx] " Robert Beckett
2022-03-15 18:04   ` Robert Beckett
2022-03-15 18:04 ` [RFC PATCH 2/7] drm/i915: add ability to create memory region object in place Robert Beckett
2022-03-15 18:04   ` [Intel-gfx] " Robert Beckett
2022-03-15 18:04   ` Robert Beckett
2022-03-15 18:04 ` [RFC PATCH 3/7] drm/i915: use gem objects to track stolen nodes Robert Beckett
2022-03-15 18:04   ` [Intel-gfx] " Robert Beckett
2022-03-15 18:04   ` Robert Beckett
2022-03-15 18:04 ` Robert Beckett [this message]
2022-03-15 18:04   ` [Intel-gfx] [RFC PATCH 4/7] drm/i915: stolen memory use ttm backend Robert Beckett
2022-03-15 18:04   ` Robert Beckett
2022-03-15 18:04 ` [RFC PATCH 5/7] drm/ttm: add range busy check for range manager Robert Beckett
2022-03-15 18:04   ` [Intel-gfx] " Robert Beckett
2022-03-15 18:04   ` Robert Beckett
2022-03-16  9:54   ` Christian König
2022-03-16  9:54     ` Christian König
2022-03-16  9:54     ` [Intel-gfx] " Christian König
2022-03-16 13:19     ` Robert Beckett
2022-03-16 13:19       ` Robert Beckett
2022-03-16 13:19       ` [Intel-gfx] " Robert Beckett
2022-03-16 13:43       ` Christian König
2022-03-16 13:43         ` [Intel-gfx] " Christian König
2022-03-16 13:43         ` Christian König
2022-03-16 14:26         ` Robert Beckett
2022-03-16 14:26           ` [Intel-gfx] " Robert Beckett
2022-03-16 14:26           ` Robert Beckett
2022-03-16 14:39           ` Christian König
2022-03-16 14:39             ` [Intel-gfx] " Christian König
2022-03-16 14:39             ` Christian König
2022-03-16 15:28             ` Robert Beckett
2022-03-16 15:28               ` [Intel-gfx] " Robert Beckett
2022-03-17  7:00               ` Christian König
2022-03-17  7:00                 ` [Intel-gfx] " Christian König
2022-03-15 18:04 ` [RFC PATCH 6/7] drm/i915: add range busy check for ttm region Robert Beckett
2022-03-15 18:04   ` [Intel-gfx] " Robert Beckett
2022-03-15 18:04   ` Robert Beckett
2022-03-15 18:04 ` [RFC PATCH 7/7] drm/i915: cleanup old stolen state Robert Beckett
2022-03-15 18:04   ` [Intel-gfx] " Robert Beckett
2022-03-15 18:04   ` Robert Beckett
2022-03-15 19:28 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: ttm for stolen Patchwork
2022-03-15 19:30 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2022-03-15 20:09 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2022-03-15 20:09 ` [Intel-gfx] ✗ Fi.CI.BUILD: warning " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220315180444.3327283-5-bob.beckett@collabora.com \
    --to=bob.beckett@collabora.com \
    --cc=airlied@linux.ie \
    --cc=daniel@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=jani.nikula@linux.intel.com \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=rodrigo.vivi@intel.com \
    --cc=tvrtko.ursulin@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.