All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [RFC PATCH 18/42] drm/i915/lmem: support CPU relocations
Date: Thu, 14 Feb 2019 14:57:16 +0000	[thread overview]
Message-ID: <20190214145740.14521-19-matthew.auld@intel.com> (raw)
In-Reply-To: <20190214145740.14521-1-matthew.auld@intel.com>

We need to support doing relocations from the CPU when dealing with LMEM
objects.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 67 ++++++++++++++++++----
 1 file changed, 56 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 02adcaf6ebea..390691128c1d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -265,6 +265,7 @@ struct i915_execbuffer {
 		bool has_llc : 1;
 		bool has_fence : 1;
 		bool needs_unfenced : 1;
+		bool is_lmem : 1;
 
 		struct i915_request *rq;
 		u32 *rq_cmd;
@@ -973,6 +974,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
 	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
 	cache->has_fence = cache->gen < 4;
 	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
+	cache->is_lmem = false;
 	cache->node.allocated = false;
 	cache->rq = NULL;
 	cache->rq_size = 0;
@@ -1027,16 +1029,23 @@ static void reloc_cache_reset(struct reloc_cache *cache)
 		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
 	} else {
 		wmb();
-		io_mapping_unmap_atomic((void __iomem *)vaddr);
-		if (cache->node.allocated) {
-			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
-
-			ggtt->vm.clear_range(&ggtt->vm,
-					     cache->node.start,
-					     cache->node.size);
-			drm_mm_remove_node(&cache->node);
+
+		if (cache->is_lmem) {
+			io_mapping_unmap_atomic((void __iomem *)vaddr);
+			i915_gem_object_unpin_pages((struct drm_i915_gem_object *)cache->node.mm);
+			cache->is_lmem = false;
 		} else {
-			i915_vma_unpin((struct i915_vma *)cache->node.mm);
+			io_mapping_unmap_atomic((void __iomem *)vaddr);
+			if (cache->node.allocated) {
+				struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+				ggtt->vm.clear_range(&ggtt->vm,
+						     cache->node.start,
+						     cache->node.size);
+				drm_mm_remove_node(&cache->node);
+			} else {
+				i915_vma_unpin((struct i915_vma *)cache->node.mm);
+			}
 		}
 	}
 
@@ -1076,6 +1085,38 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
 	return vaddr;
 }
 
+static void *reloc_lmem(struct drm_i915_gem_object *obj,
+			struct reloc_cache *cache,
+			unsigned long page)
+{
+	void *vaddr;
+	int err;
+
+	GEM_BUG_ON(use_cpu_reloc(cache, obj));
+
+	if (cache->vaddr) {
+		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
+	} else {
+		err = i915_gem_object_set_to_wc_domain(obj, true);
+		if (err)
+			return ERR_PTR(err);
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			return ERR_PTR(err);
+
+		cache->node.mm = (void *)obj;
+		cache->is_lmem = true;
+	}
+
+	vaddr = i915_gem_object_lmem_io_map_page(obj, page);
+
+	cache->vaddr = (unsigned long)vaddr;
+	cache->page = page;
+
+	return vaddr;
+}
+
 static void *reloc_iomap(struct drm_i915_gem_object *obj,
 			 struct reloc_cache *cache,
 			 unsigned long page)
@@ -1150,8 +1191,12 @@ static void *reloc_vaddr(struct drm_i915_gem_object *obj,
 		vaddr = unmask_page(cache->vaddr);
 	} else {
 		vaddr = NULL;
-		if ((cache->vaddr & KMAP) == 0)
-			vaddr = reloc_iomap(obj, cache, page);
+		if ((cache->vaddr & KMAP) == 0) {
+			if (i915_gem_object_is_lmem(obj))
+				vaddr = reloc_lmem(obj, cache, page);
+			else
+				vaddr = reloc_iomap(obj, cache, page);
+		}
 		if (!vaddr)
 			vaddr = reloc_kmap(obj, cache, page);
 	}
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-02-14 14:58 UTC|newest]

Thread overview: 97+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-14 14:56 [RFC PATCH 00/42] Introduce memory region concept (including device local memory) Matthew Auld
2019-02-14 14:56 ` [RFC PATCH 01/42] drm/i915: support 1G pages for the 48b PPGTT Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 02/42] drm/i915: enable platform support for 1G pages Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 03/42] drm/i915: buddy allocator Matthew Auld
2019-02-15 12:34   ` Jani Nikula
2019-02-15 15:03     ` Chris Wilson
2019-02-18 11:35       ` Jani Nikula
2019-02-14 14:57 ` [RFC PATCH 04/42] drm/i915: introduce intel_memory_region Matthew Auld
2019-02-14 15:16   ` Chris Wilson
2019-02-26 14:03     ` Matthew Auld
2019-02-26 14:18       ` Chris Wilson
2019-02-26 13:00   ` Tvrtko Ursulin
2019-02-26 14:20     ` Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 05/42] drm/i915/region: support basic eviction Matthew Auld
2019-02-14 15:25   ` Chris Wilson
2019-02-26 14:58     ` Matthew Auld
2019-02-26 16:49       ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 06/42] drm/i915/region: support continuous allocations Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 07/42] drm/i915/region: support volatile objects Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 08/42] drm/i915: Add memory region information to device_info Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 09/42] drm/i915: support creating LMEM objects Matthew Auld
2019-02-14 15:30   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 10/42] drm/i915/lmem: add helper to get CPU visible pfn Matthew Auld
2019-02-14 15:33   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 11/42] drm/i915/selftests: exercise writes to LMEM Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 12/42] drm/i915/selftests: exercise huge-pages for LMEM Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 13/42] drm/i915: support object clearing via blitter engine Matthew Auld
2019-02-14 15:37   ` Chris Wilson
2019-02-14 15:38   ` Chris Wilson
2019-02-14 15:40   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 14/42] drm/i915: introduce kernel blitter_context Matthew Auld
2019-02-14 15:42   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 15/42] drm/i915: support copying objects via blitter engine Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 16/42] drm/i915: support basic object migration Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 17/42] drm/i915/lmem: support kernel mapping Matthew Auld
2019-02-14 14:57 ` Matthew Auld [this message]
2019-02-14 15:48   ` [RFC PATCH 18/42] drm/i915/lmem: support CPU relocations Chris Wilson
2019-02-26 18:53     ` Matthew Auld
2019-02-26 18:58       ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 19/42] drm/i915: add vfunc for pread Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 20/42] drm/i915/lmem: support pread Matthew Auld
2019-02-14 15:50   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 21/42] drm/i915/lmem: support pwrite Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 22/42] drm/i915: define HAS_MAPPABLE_APERTURE Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 23/42] drm/i915: do not map aperture if it is not available Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 24/42] drm/i915: expose missing map_gtt support to users Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 25/42] drm/i915: set num_fence_regs to 0 if there is no aperture Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 26/42] drm/i915: error capture with no ggtt slot Matthew Auld
2019-02-14 15:56   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 27/42] drm/i915: Don't try to place HWS in non-existing mappable region Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 28/42] drm/i915: Split out GTT fault handler to make it generic Matthew Auld
2019-02-14 16:00   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 29/42] drm/i915: Set correct vmf source pages for gem objects Matthew Auld
2019-02-14 16:02   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 30/42] drm/i915: Introduce DRM_I915_GEM_MMAP_OFFSET Matthew Auld
2019-02-14 16:05   ` Chris Wilson
2019-02-26 13:34   ` Tvrtko Ursulin
2019-02-26 13:37     ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 31/42] drm/i915: cpu-map based dumb buffers Matthew Auld
2019-02-14 16:06   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 32/42] drm/i915: Add fill_pages handler for dma_buf imported objects Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 33/42] UPSTREAM: drm/i915/query: Split out query item checks Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 34/42] drm/i915/query: Expose memory regions through the query uAPI Matthew Auld
2019-02-14 16:31   ` Chris Wilson
2019-02-14 16:33     ` Chris Wilson
2019-02-14 21:12       ` Chris Wilson
2019-02-14 21:15   ` Chris Wilson
2019-02-14 21:21     ` Chris Wilson
2019-02-20 18:56     ` Jason Ekstrand
2019-02-14 14:57 ` [RFC PATCH 35/42] drm/i915: Introduce GEM_OBJECT_SETPARAM with I915_PARAM_MEMORY_REGION Matthew Auld
2019-02-14 16:20   ` Chris Wilson
2019-02-14 14:57 ` [RFC PATCH 36/42] drm/i915/lmem: include debugfs metrics Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 37/42] drm/i915: enumerate and init each supported region Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 38/42] drm/i915: treat shmem as a region Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 39/42] drm/i915: treat stolen " Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 40/42] drm/i915: setup io-mapping for LMEM Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 41/42] HAX drm/i915: add the fake lmem region Matthew Auld
2019-02-14 14:57 ` [RFC PATCH 42/42] HAX drm/i915/lmem: default userspace allocations to LMEM Matthew Auld
2019-02-14 16:13   ` Chris Wilson
2019-02-18 12:44     ` Chris Wilson
2019-02-19 17:44     ` Chris Wilson
2019-02-14 16:22   ` Chris Wilson
2019-02-14 17:58 ` ✗ Fi.CI.CHECKPATCH: warning for Introduce memory region concept (including device local memory) Patchwork
2019-02-14 18:15 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-02-15  0:47 ` [RFC PATCH 00/42] " Dave Airlie
2019-02-19 13:32   ` Joonas Lahtinen
2019-02-25 20:24     ` Dave Airlie
2019-02-26  2:35       ` [Intel-gfx] " Joonas Lahtinen
2019-02-26  5:31         ` Alex Deucher
2019-02-26 10:41           ` Jani Nikula
2019-02-26 12:17           ` Joonas Lahtinen
2019-02-26 17:20             ` Alex Deucher
2019-02-26 18:52               ` Christian König
2019-02-26 19:14               ` Alex Deucher
2019-02-26 23:04                 ` Dave Airlie
2019-02-27 12:17                   ` Christian König
2019-02-27 14:40                     ` Joonas Lahtinen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190214145740.14521-19-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.