All of lore.kernel.org
 help / color / mirror / Atom feed
From: Brian Welty <brian.welty-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	"Daniel Vetter" <daniel-/w4YWyX8dFk@public.gmane.org>,
	"Christian König" <christian.koenig-5C7GfCeVMHo@public.gmane.org>,
	"Joonas Lahtinen"
	<joonas.lahtinen-VuQAYsv1563Yd54FQh9/CA@public.gmane.org>
Subject: [RFC PATCH 1/3] drm: introduce new struct drm_mem_region
Date: Mon, 29 Jul 2019 20:32:23 -0400	[thread overview]
Message-ID: <20190730003225.322-2-brian.welty@intel.com> (raw)
In-Reply-To: <20190730003225.322-1-brian.welty-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>

Move basic members of ttm_mem_type_manager into a new DRM memory region
structure.  The idea is for this base structure to be nested inside
the TTM structure and later in Intel's proposed intel_memory_region.

As comments in the code suggest, the following future work can extend
the usefulness of this:
- Create common memory region types (next patch)
- Create common set of memory_region function callbacks (based on
  ttm_mem_type_manager_funcs and intel_memory_regions_ops)
- Create common helpers that operate on drm_mem_region to be leveraged
  by both TTM drivers and i915, reducing code duplication
- Above might start with refactoring ttm_bo_manager.c as these are
  helpers for using drm_mm's range allocator and could be made to
  operate on DRM structures instead of TTM ones.
- Larger goal might be to make LRU management of GEM objects common, and
  migrate those fields into drm_mem_region and drm_gem_object strucures.

vmwgfx changes included here as just example of what driver updates will
look like, and can be moved later to separate patch.  Other TTM drivers
need to be updated similarly.

Signed-off-by: Brian Welty <brian.welty@intel.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c                  | 34 +++++++++++--------
 drivers/gpu/drm/ttm/ttm_bo_manager.c          | 14 ++++----
 drivers/gpu/drm/ttm/ttm_bo_util.c             | 11 +++---
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c |  8 ++---
 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c    |  4 +--
 include/drm/drm_mm.h                          | 31 +++++++++++++++--
 include/drm/ttm/ttm_bo_api.h                  |  2 +-
 include/drm/ttm/ttm_bo_driver.h               | 16 ++++-----
 8 files changed, 75 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 58c403eda04e..45434ea513dd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -84,8 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p
 	drm_printf(p, "    has_type: %d\n", man->has_type);
 	drm_printf(p, "    use_type: %d\n", man->use_type);
 	drm_printf(p, "    flags: 0x%08X\n", man->flags);
-	drm_printf(p, "    gpu_offset: 0x%08llX\n", man->gpu_offset);
-	drm_printf(p, "    size: %llu\n", man->size);
+	drm_printf(p, "    gpu_offset: 0x%08llX\n", man->region.start);
+	drm_printf(p, "    size: %llu\n", man->region.size);
 	drm_printf(p, "    available_caching: 0x%08X\n", man->available_caching);
 	drm_printf(p, "    default_caching: 0x%08X\n", man->default_caching);
 	if (mem_type != TTM_PL_SYSTEM)
@@ -399,7 +399,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
 	if (bo->mem.mm_node)
 		bo->offset = (bo->mem.start << PAGE_SHIFT) +
-		    bdev->man[bo->mem.mem_type].gpu_offset;
+		    bdev->man[bo->mem.mem_type].region.start;
 	else
 		bo->offset = 0;
 
@@ -926,9 +926,9 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
 	struct dma_fence *fence;
 	int ret;
 
-	spin_lock(&man->move_lock);
-	fence = dma_fence_get(man->move);
-	spin_unlock(&man->move_lock);
+	spin_lock(&man->region.move_lock);
+	fence = dma_fence_get(man->region.move);
+	spin_unlock(&man->region.move_lock);
 
 	if (fence) {
 		reservation_object_add_shared_fence(bo->resv, fence);
@@ -1490,9 +1490,9 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 	}
 	spin_unlock(&glob->lru_lock);
 
-	spin_lock(&man->move_lock);
-	fence = dma_fence_get(man->move);
-	spin_unlock(&man->move_lock);
+	spin_lock(&man->region.move_lock);
+	fence = dma_fence_get(man->region.move);
+	spin_unlock(&man->region.move_lock);
 
 	if (fence) {
 		ret = dma_fence_wait(fence, false);
@@ -1535,8 +1535,8 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 		ret = (*man->func->takedown)(man);
 	}
 
-	dma_fence_put(man->move);
-	man->move = NULL;
+	dma_fence_put(man->region.move);
+	man->region.move = NULL;
 
 	return ret;
 }
@@ -1561,7 +1561,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 EXPORT_SYMBOL(ttm_bo_evict_mm);
 
 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
-			unsigned long p_size)
+		   resource_size_t p_size)
 {
 	int ret;
 	struct ttm_mem_type_manager *man;
@@ -1570,10 +1570,16 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
 	BUG_ON(type >= TTM_NUM_MEM_TYPES);
 	man = &bdev->man[type];
 	BUG_ON(man->has_type);
+
+	/* FIXME: add call to (new) drm_mem_region_init ? */
+	man->region.size = p_size;
+	man->region.type = type;
+	spin_lock_init(&man->region.move_lock);
+	man->region.move = NULL;
+
 	man->io_reserve_fastpath = true;
 	man->use_io_reserve_lru = false;
 	mutex_init(&man->io_reserve_mutex);
-	spin_lock_init(&man->move_lock);
 	INIT_LIST_HEAD(&man->io_reserve_lru);
 
 	ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1588,11 +1594,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
 	}
 	man->has_type = true;
 	man->use_type = true;
-	man->size = p_size;
 
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
 		INIT_LIST_HEAD(&man->lru[i]);
-	man->move = NULL;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 18d3debcc949..0a99b3d5b482 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -53,7 +53,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 			       const struct ttm_place *place,
 			       struct ttm_mem_reg *mem)
 {
-	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->region.priv;
 	struct drm_mm *mm = &rman->mm;
 	struct drm_mm_node *node;
 	enum drm_mm_insert_mode mode;
@@ -62,7 +62,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 
 	lpfn = place->lpfn;
 	if (!lpfn)
-		lpfn = man->size;
+		lpfn = man->region.size;
 
 	node = kzalloc(sizeof(*node), GFP_KERNEL);
 	if (!node)
@@ -92,7 +92,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 				struct ttm_mem_reg *mem)
 {
-	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->region.priv;
 
 	if (mem->mm_node) {
 		spin_lock(&rman->lock);
@@ -115,13 +115,13 @@ static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
 
 	drm_mm_init(&rman->mm, 0, p_size);
 	spin_lock_init(&rman->lock);
-	man->priv = rman;
+	man->region.priv = rman;
 	return 0;
 }
 
 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
 {
-	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->region.priv;
 	struct drm_mm *mm = &rman->mm;
 
 	spin_lock(&rman->lock);
@@ -129,7 +129,7 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
 		drm_mm_takedown(mm);
 		spin_unlock(&rman->lock);
 		kfree(rman);
-		man->priv = NULL;
+		man->region.priv = NULL;
 		return 0;
 	}
 	spin_unlock(&rman->lock);
@@ -139,7 +139,7 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
 			     struct drm_printer *printer)
 {
-	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->region.priv;
 
 	spin_lock(&rman->lock);
 	drm_mm_print(&rman->mm, printer);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 9f918b992f7e..e44d0b7d60b4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -795,12 +795,13 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 		 * this eviction and free up the allocation
 		 */
 
-		spin_lock(&from->move_lock);
-		if (!from->move || dma_fence_is_later(fence, from->move)) {
-			dma_fence_put(from->move);
-			from->move = dma_fence_get(fence);
+		spin_lock(&from->region.move_lock);
+		if (!from->region.move ||
+		    dma_fence_is_later(fence, from->region.move)) {
+			dma_fence_put(from->region.move);
+			from->region.move = dma_fence_get(fence);
 		}
-		spin_unlock(&from->move_lock);
+		spin_unlock(&from->region.move_lock);
 
 		ttm_bo_free_old_node(bo);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 7da752ca1c34..dd4f85accc4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -50,7 +50,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 				  struct ttm_mem_reg *mem)
 {
 	struct vmwgfx_gmrid_man *gman =
-		(struct vmwgfx_gmrid_man *)man->priv;
+		(struct vmwgfx_gmrid_man *)man->region.priv;
 	int id;
 
 	mem->mm_node = NULL;
@@ -85,7 +85,7 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
 				   struct ttm_mem_reg *mem)
 {
 	struct vmwgfx_gmrid_man *gman =
-		(struct vmwgfx_gmrid_man *)man->priv;
+		(struct vmwgfx_gmrid_man *)man->region.priv;
 
 	if (mem->mm_node) {
 		ida_free(&gman->gmr_ida, mem->start);
@@ -123,14 +123,14 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
 	default:
 		BUG();
 	}
-	man->priv = (void *) gman;
+	man->region.priv = (void *) gman;
 	return 0;
 }
 
 static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
 {
 	struct vmwgfx_gmrid_man *gman =
-		(struct vmwgfx_gmrid_man *)man->priv;
+		(struct vmwgfx_gmrid_man *)man->region.priv;
 
 	if (gman) {
 		ida_destroy(&gman->gmr_ida);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index d8ea3dd10af0..c6e99893e993 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -755,7 +755,7 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 	case TTM_PL_VRAM:
 		/* "On-card" video ram */
 		man->func = &ttm_bo_manager_func;
-		man->gpu_offset = 0;
+		man->region.start = 0;
 		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
 		man->available_caching = TTM_PL_FLAG_CACHED;
 		man->default_caching = TTM_PL_FLAG_CACHED;
@@ -768,7 +768,7 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		 *  slots as well as the bo size.
 		 */
 		man->func = &vmw_gmrid_manager_func;
-		man->gpu_offset = 0;
+		man->region.start = 0;
 		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
 		man->available_caching = TTM_PL_FLAG_CACHED;
 		man->default_caching = TTM_PL_FLAG_CACHED;
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 2c3bbb43c7d1..465f8d10d863 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -38,10 +38,12 @@
  * Generic range manager structs
  */
 #include <linux/bug.h>
-#include <linux/rbtree.h>
+#include <linux/dma-fence.h>
+#include <linux/io-mapping.h>
 #include <linux/kernel.h>
-#include <linux/mm_types.h>
 #include <linux/list.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
 #include <linux/spinlock.h>
 #ifdef CONFIG_DRM_DEBUG_MM
 #include <linux/stackdepot.h>
@@ -54,6 +56,31 @@
 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #endif
 
+struct drm_device;
+struct drm_mm;
+
+/**
+ * struct drm_mem_region
+ *
+ * Base memory region structure to be nested inside TTM memory regions
+ * (ttm_mem_type_manager) and i915 memory regions (intel_memory_region).
+ */
+struct drm_mem_region {
+	resource_size_t start; /* within GPU physical address space */
+	resource_size_t io_start; /* BAR address (CPU accessible) */
+	resource_size_t size;
+	struct io_mapping iomap;
+	u8 type;
+
+	union {
+		struct drm_mm *mm;
+		/* FIXME (for i915): struct drm_buddy_mm *buddy_mm; */
+		void *priv;
+	};
+	spinlock_t move_lock;
+	struct dma_fence *move;
+};
+
 /**
  * enum drm_mm_insert_mode - control search and allocation behaviour
  *
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 49d9cdfc58f2..f8cb332f0eeb 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -615,7 +615,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
  * May also return driver-specified errors.
  */
 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
-		   unsigned long p_size);
+		   resource_size_t p_size);
 
 /**
  * ttm_bo_clean_mm
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index c9b8ba492f24..4066ee315469 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -51,6 +51,12 @@
 
 struct ttm_mem_type_manager;
 
+/* FIXME:
+ * Potentially can rework this as common callbacks for drm_mem_region
+ * instead of ttm_mem_type_manager.
+ * Then the intel_memory_region_ops proposed by LMEM patch series could
+ * be folded into here.
+ */
 struct ttm_mem_type_manager_func {
 	/**
 	 * struct ttm_mem_type_manager member init
@@ -168,6 +174,7 @@ struct ttm_mem_type_manager_func {
 
 
 struct ttm_mem_type_manager {
+	struct drm_mem_region region;
 	struct ttm_bo_device *bdev;
 
 	/*
@@ -177,16 +184,12 @@ struct ttm_mem_type_manager {
 	bool has_type;
 	bool use_type;
 	uint32_t flags;
-	uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
-	uint64_t size;
 	uint32_t available_caching;
 	uint32_t default_caching;
 	const struct ttm_mem_type_manager_func *func;
-	void *priv;
 	struct mutex io_reserve_mutex;
 	bool use_io_reserve_lru;
 	bool io_reserve_fastpath;
-	spinlock_t move_lock;
 
 	/*
 	 * Protected by @io_reserve_mutex:
@@ -199,11 +202,6 @@ struct ttm_mem_type_manager {
 	 */
 
 	struct list_head lru[TTM_MAX_BO_PRIORITY];
-
-	/*
-	 * Protected by @move_lock.
-	 */
-	struct dma_fence *move;
 };
 
 /**
-- 
2.21.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2019-07-30  0:32 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-30  0:32 [RFC PATCH 0/3] Propose new struct drm_mem_region Brian Welty
2019-07-30  0:32 ` [RFC PATCH 2/3] drm: Introduce DRM_MEM defines for specifying type of drm_mem_region Brian Welty
     [not found] ` <20190730003225.322-1-brian.welty-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
2019-07-30  0:32   ` Brian Welty [this message]
2019-07-30  0:32   ` [RFC PATCH 3/3] drm/i915: Update intel_memory_region to use nested drm_mem_region Brian Welty
2019-07-30  8:45   ` [RFC PATCH 0/3] Propose new struct drm_mem_region Koenig, Christian
2019-07-30  9:34     ` Daniel Vetter
     [not found]       ` <20190730093421.GN15868-dv86pmgwkMBes7Z6vYuT8azUEOm+Xw19@public.gmane.org>
2019-07-31  1:19         ` Brian Welty
2019-07-30  9:38     ` Daniel Vetter
     [not found]       ` <20190730093847.GP15868-dv86pmgwkMBes7Z6vYuT8azUEOm+Xw19@public.gmane.org>
2019-07-30 10:24         ` Koenig, Christian
2019-07-30 10:45           ` Daniel Vetter
     [not found]             ` <CAKMK7uHrGgn7FqSBD+qDYYHxyPLvv5OqzwLTACWuqbjANKFuQA-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-07-30 14:30               ` Michel Dänzer
2019-07-30 14:33                 ` Daniel Vetter
2019-07-31  0:51             ` Brian Welty
     [not found]               ` <54163ae1-68fc-93c4-c19a-e30d31de3961@amd.com>
2019-07-31  8:05                 ` Daniel Vetter
     [not found]                   ` <CAKMK7uFU-Ub4Bj7F9K=S-XQM26PO+ctMNATvrh_OuK9px0X=yw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-07-31  8:25                     ` Christian König
2019-07-31  8:33                       ` Daniel Vetter
  -- strict thread matches above, loose matches on Subject: below --
2019-07-29 16:54 Brian Welty
2019-07-29 16:54 ` [RFC PATCH 1/3] drm: introduce " Brian Welty
2019-07-29 17:55   ` Sam Ravnborg

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190730003225.322-2-brian.welty@intel.com \
    --to=brian.welty-ral2jqcrhueavxtiumwx3w@public.gmane.org \
    --cc=amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=christian.koenig-5C7GfCeVMHo@public.gmane.org \
    --cc=daniel-/w4YWyX8dFk@public.gmane.org \
    --cc=dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=intel-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org \
    --cc=joonas.lahtinen-VuQAYsv1563Yd54FQh9/CA@public.gmane.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.