All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/11] drm/amdgpu: reserve the first 2x512 of GART
@ 2017-07-03  9:44 Christian König
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

We want to use them as remap address space.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 5 ++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h     | 3 +++
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 1ef6255..f46a97d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -43,12 +43,15 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
 			       unsigned long p_size)
 {
 	struct amdgpu_gtt_mgr *mgr;
+	uint64_t start, size;
 
 	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
 	if (!mgr)
 		return -ENOMEM;
 
-	drm_mm_init(&mgr->mm, 0, p_size);
+	start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
+	size = p_size - start;
+	drm_mm_init(&mgr->mm, start, size);
 	spin_lock_init(&mgr->lock);
 	mgr->available = p_size;
 	man->priv = mgr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 776a20a..c8059f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -34,6 +34,9 @@
 #define AMDGPU_PL_FLAG_GWS		(TTM_PL_FLAG_PRIV << 1)
 #define AMDGPU_PL_FLAG_OA		(TTM_PL_FLAG_PRIV << 2)
 
+#define AMDGPU_GTT_MAX_TRANSFER_SIZE	512
+#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS	2
+
 struct amdgpu_mman {
 	struct ttm_bo_global_ref        bo_global_ref;
 	struct drm_global_reference	mem_global_ref;
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 02/11] drm/amdgpu: add amdgpu_gart_map function v2
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-2-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 03/11] drm/amdgpu: use the GTT windows for BO moves v2 Christian König
                     ` (9 subsequent siblings)
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

This allows us to write the mapped PTEs into
an IB instead of the table directly.

v2: fix build with debugfs enabled, remove unused assignment

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h      |  3 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 62 ++++++++++++++++++++++++--------
 2 files changed, 51 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 810796a..4a2b33d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -572,6 +572,9 @@ int amdgpu_gart_init(struct amdgpu_device *adev);
 void amdgpu_gart_fini(struct amdgpu_device *adev);
 int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
 			int pages);
+int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+		    int pages, dma_addr_t *dma_addr, uint64_t flags,
+		    void *dst);
 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 		     int pages, struct page **pagelist,
 		     dma_addr_t *dma_addr, uint64_t flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 8877015..c808388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -280,6 +280,41 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
 }
 
 /**
+ * amdgpu_gart_map - map dma_addresses into GART entries
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Map the dma_addresses into GART entries (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+		    int pages, dma_addr_t *dma_addr, uint64_t flags,
+		    void *dst)
+{
+	uint64_t page_base;
+	unsigned i, j, t;
+
+	if (!adev->gart.ready) {
+		WARN(1, "trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+
+	for (i = 0; i < pages; i++) {
+		page_base = dma_addr[i];
+		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+			amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
+			page_base += AMDGPU_GPU_PAGE_SIZE;
+		}
+	}
+	return 0;
+}
+
+/**
  * amdgpu_gart_bind - bind pages into the gart page table
  *
  * @adev: amdgpu_device pointer
@@ -296,31 +331,30 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
 		     uint64_t flags)
 {
-	unsigned t;
-	unsigned p;
-	uint64_t page_base;
-	int i, j;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+	unsigned i,t,p;
+#endif
+	int r;
 
 	if (!adev->gart.ready) {
 		WARN(1, "trying to bind memory to uninitialized GART !\n");
 		return -EINVAL;
 	}
 
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
 	t = offset / AMDGPU_GPU_PAGE_SIZE;
 	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-
-	for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+	for (i = 0; i < pages; i++, p++)
 		adev->gart.pages[p] = pagelist[i];
 #endif
-		if (adev->gart.ptr) {
-			page_base = dma_addr[i];
-			for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-				amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
-				page_base += AMDGPU_GPU_PAGE_SIZE;
-			}
-		}
+
+	if (adev->gart.ptr) {
+		r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+			    adev->gart.ptr);
+		if (r)
+			return r;
 	}
+
 	mb();
 	amdgpu_gart_flush_gpu_tlb(adev, 0);
 	return 0;
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 03/11] drm/amdgpu: use the GTT windows for BO moves v2
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 02/11] drm/amdgpu: add amdgpu_gart_map function v2 Christian König
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-3-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 04/11] drm/amdgpu: stop mapping BOs to GTT Christian König
                     ` (8 subsequent siblings)
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

This way we don't need to map the full BO at a time any more.

v2: use fixed windows for src/dst

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 125 +++++++++++++++++++++++++++-----
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |   2 +
 2 files changed, 108 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 15148f1..1fc9866 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -47,10 +47,15 @@
 
 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
 
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *mem, unsigned num_pages,
+			     uint64_t offset, unsigned window,
+			     struct amdgpu_ring *ring,
+			     uint64_t *addr);
+
 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
 
-
 /*
  * Global memory.
  */
@@ -97,6 +102,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 		goto error_bo;
 	}
 
+	mutex_init(&adev->mman.gtt_window_lock);
+
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
 	r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
@@ -123,6 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
 	if (adev->mman.mem_global_referenced) {
 		amd_sched_entity_fini(adev->mman.entity.sched,
 				      &adev->mman.entity);
+		mutex_destroy(&adev->mman.gtt_window_lock);
 		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 		drm_global_item_unref(&adev->mman.mem_global_ref);
 		adev->mman.mem_global_referenced = false;
@@ -256,10 +264,13 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 				    struct drm_mm_node *mm_node,
 				    struct ttm_mem_reg *mem)
 {
-	uint64_t addr;
+	uint64_t addr = 0;
 
-	addr = mm_node->start << PAGE_SHIFT;
-	addr += bo->bdev->man[mem->mem_type].gpu_offset;
+	if (mem->mem_type != TTM_PL_TT ||
+	    amdgpu_gtt_mgr_is_allocated(mem)) {
+		addr = mm_node->start << PAGE_SHIFT;
+		addr += bo->bdev->man[mem->mem_type].gpu_offset;
+	}
 	return addr;
 }
 
@@ -284,34 +295,41 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 		return -EINVAL;
 	}
 
-	if (old_mem->mem_type == TTM_PL_TT) {
-		r = amdgpu_ttm_bind(bo, old_mem);
-		if (r)
-			return r;
-	}
-
 	old_mm = old_mem->mm_node;
 	old_size = old_mm->size;
 	old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
 
-	if (new_mem->mem_type == TTM_PL_TT) {
-		r = amdgpu_ttm_bind(bo, new_mem);
-		if (r)
-			return r;
-	}
-
 	new_mm = new_mem->mm_node;
 	new_size = new_mm->size;
 	new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
 
 	num_pages = new_mem->num_pages;
+	mutex_lock(&adev->mman.gtt_window_lock);
 	while (num_pages) {
-		unsigned long cur_pages = min(old_size, new_size);
+		unsigned long cur_pages = min(min(old_size, new_size),
+					      (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
+		uint64_t from = old_start, to = new_start;
 		struct dma_fence *next;
 
-		r = amdgpu_copy_buffer(ring, old_start, new_start,
+		if (old_mem->mem_type == TTM_PL_TT &&
+		    !amdgpu_gtt_mgr_is_allocated(old_mem)) {
+			r = amdgpu_map_buffer(bo, old_mem, cur_pages,
+					      old_start, 0, ring, &from);
+			if (r)
+				goto error;
+		}
+
+		if (new_mem->mem_type == TTM_PL_TT &&
+		    !amdgpu_gtt_mgr_is_allocated(new_mem)) {
+			r = amdgpu_map_buffer(bo, new_mem, cur_pages,
+					      new_start, 1, ring, &to);
+			if (r)
+				goto error;
+		}
+
+		r = amdgpu_copy_buffer(ring, from, to,
 				       cur_pages * PAGE_SIZE,
-				       bo->resv, &next, false, false);
+				       bo->resv, &next, false, true);
 		if (r)
 			goto error;
 
@@ -338,12 +356,15 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 			new_start += cur_pages * PAGE_SIZE;
 		}
 	}
+	mutex_unlock(&adev->mman.gtt_window_lock);
 
 	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
 	dma_fence_put(fence);
 	return r;
 
 error:
+	mutex_unlock(&adev->mman.gtt_window_lock);
+
 	if (fence)
 		dma_fence_wait(fence, false);
 	dma_fence_put(fence);
@@ -1253,6 +1274,72 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
 	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
 }
 
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+			     struct ttm_mem_reg *mem, unsigned num_pages,
+			     uint64_t offset, unsigned window,
+			     struct amdgpu_ring *ring,
+			     uint64_t *addr)
+{
+	struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+	struct amdgpu_device *adev = ring->adev;
+	struct ttm_tt *ttm = bo->ttm;
+	struct amdgpu_job *job;
+	unsigned num_dw, num_bytes;
+	dma_addr_t *dma_address;
+	struct dma_fence *fence;
+	uint64_t src_addr, dst_addr;
+	uint64_t flags;
+	int r;
+
+	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+	*addr = adev->mc.gtt_start;
+	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+		AMDGPU_GPU_PAGE_SIZE;
+
+	num_dw = adev->mman.buffer_funcs->copy_num_dw;
+	while (num_dw & 0x7)
+		num_dw++;
+
+	num_bytes = num_pages * 8;
+
+	r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
+	if (r)
+		return r;
+
+	src_addr = num_dw * 4;
+	src_addr += job->ibs[0].gpu_addr;
+
+	dst_addr = adev->gart.table_addr;
+	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+				dst_addr, num_bytes);
+
+	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+	WARN_ON(job->ibs[0].length_dw > num_dw);
+
+	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
+	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
+	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+			    &job->ibs[0].ptr[num_dw]);
+	if (r)
+		goto error_free;
+
+	r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+	if (r)
+		goto error_free;
+
+	dma_fence_put(fence);
+
+	return r;
+
+error_free:
+	amdgpu_job_free(job);
+	return r;
+}
+
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 		       uint64_t dst_offset, uint32_t byte_count,
 		       struct reservation_object *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index c8059f0..4f5c1da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -52,6 +52,8 @@ struct amdgpu_mman {
 	/* buffer handling */
 	const struct amdgpu_buffer_funcs	*buffer_funcs;
 	struct amdgpu_ring			*buffer_funcs_ring;
+
+	struct mutex				gtt_window_lock;
 	/* Scheduler entity for buffer moves */
 	struct amd_sched_entity			entity;
 };
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 04/11] drm/amdgpu: stop mapping BOs to GTT
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 02/11] drm/amdgpu: add amdgpu_gart_map function v2 Christian König
  2017-07-03  9:44   ` [PATCH 03/11] drm/amdgpu: use the GTT windows for BO moves v2 Christian König
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-4-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 05/11] drm/amdgpu: remove maximum BO size limitation v2 Christian König
                     ` (7 subsequent siblings)
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

No need to map BOs to GTT on eviction and intermediate transfers any more.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 19 ++-----------------
 1 file changed, 2 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1fc9866..5c7a6c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -199,7 +199,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 		.lpfn = 0,
 		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
 	};
-	unsigned i;
 
 	if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
 		placement->placement = &placements;
@@ -217,20 +216,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
 		} else {
 			amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
-			for (i = 0; i < abo->placement.num_placement; ++i) {
-				if (!(abo->placements[i].flags &
-				      TTM_PL_FLAG_TT))
-					continue;
-
-				if (abo->placements[i].lpfn)
-					continue;
-
-				/* set an upper limit to force directly
-				 * allocating address space for the BO.
-				 */
-				abo->placements[i].lpfn =
-					adev->mc.gtt_size >> PAGE_SHIFT;
-			}
 		}
 		break;
 	case TTM_PL_TT:
@@ -391,7 +376,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements.fpfn = 0;
-	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+	placements.lpfn = 0;
 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait_gpu);
@@ -438,7 +423,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
 	placement.num_busy_placement = 1;
 	placement.busy_placement = &placements;
 	placements.fpfn = 0;
-	placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+	placements.lpfn = 0;
 	placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait_gpu);
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 05/11] drm/amdgpu: remove maximum BO size limitation v2
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
                     ` (2 preceding siblings ...)
  2017-07-03  9:44   ` [PATCH 04/11] drm/amdgpu: stop mapping BOs to GTT Christian König
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-5-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 06/11] drm/amdgpu: use TTM values instead of MC values for the info queries Christian König
                     ` (6 subsequent siblings)
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

We can finally remove this now.

v2: remove now unused max_size variable as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 12 ------------
 1 file changed, 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 96c4493..917ac5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -49,7 +49,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 				struct drm_gem_object **obj)
 {
 	struct amdgpu_bo *robj;
-	unsigned long max_size;
 	int r;
 
 	*obj = NULL;
@@ -58,17 +57,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 		alignment = PAGE_SIZE;
 	}
 
-	if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
-		/* Maximum bo size is the unpinned gtt size since we use the gtt to
-		 * handle vram to system pool migrations.
-		 */
-		max_size = adev->mc.gtt_size - adev->gart_pin_size;
-		if (size > max_size) {
-			DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
-				  size >> 20, max_size >> 20);
-			return -ENOMEM;
-		}
-	}
 retry:
 	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
 			     flags, NULL, NULL, &robj);
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 06/11] drm/amdgpu: use TTM values instead of MC values for the info queries
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
                     ` (3 preceding siblings ...)
  2017-07-03  9:44   ` [PATCH 05/11] drm/amdgpu: remove maximum BO size limitation v2 Christian König
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-6-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 07/11] drm/amdgpu: rename GART to SYSVM Christian König
                     ` (5 subsequent siblings)
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

Use the TTM values instead of the hardware config here.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 00ef2fc..7a8da32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -484,7 +484,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		vram_gtt.vram_size -= adev->vram_pin_size;
 		vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
 		vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
-		vram_gtt.gtt_size  = adev->mc.gtt_size;
+		vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
+		vram_gtt.gtt_size *= PAGE_SIZE;
 		vram_gtt.gtt_size -= adev->gart_pin_size;
 		return copy_to_user(out, &vram_gtt,
 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
@@ -509,9 +510,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		mem.cpu_accessible_vram.max_allocation =
 			mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
 
-		mem.gtt.total_heap_size = adev->mc.gtt_size;
-		mem.gtt.usable_heap_size =
-			adev->mc.gtt_size - adev->gart_pin_size;
+		mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
+		mem.gtt.total_heap_size *= PAGE_SIZE;
+		mem.gtt.usable_heap_size = mem.gtt.total_heap_size
+			- adev->gart_pin_size;
 		mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
 		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
 
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
                     ` (4 preceding siblings ...)
  2017-07-03  9:44   ` [PATCH 06/11] drm/amdgpu: use TTM values instead of MC values for the info queries Christian König
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-7-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 08/11] drm/amdgpu: move SYSVM struct and function into amdgpu_sysvm.h Christian König
                     ` (4 subsequent siblings)
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

Just mass rename all names related to the hardware GART/GTT functions to SYSVM.

The name of symbols related to the TTM TT domain stay the same.

This should improve the distinction between the two.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Kconfig         |   9 +-
 drivers/gpu/drm/amd/amdgpu/Makefile        |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  58 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  48 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 423 -----------------------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c  | 423 +++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c   |  84 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  76 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     |  30 +-
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      |   4 +-
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   |  16 +-
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h   |   4 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  66 ++---
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  70 ++---
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  70 ++---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      |  66 ++---
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c    |  16 +-
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h    |   4 +-
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     |   4 +-
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c      |   8 +-
 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c      |   4 +-
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c      |   8 +-
 24 files changed, 749 insertions(+), 748 deletions(-)
 delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c

diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index e8af1f5..ebbac01 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -31,14 +31,15 @@ config DRM_AMDGPU_USERPTR
 	  This option selects CONFIG_MMU_NOTIFIER if it isn't already
 	  selected to enabled full userptr support.
 
-config DRM_AMDGPU_GART_DEBUGFS
-	bool "Allow GART access through debugfs"
+config DRM_AMDGPU_SYSVM_DEBUGFS
+	bool "Allow SYSVM access through debugfs"
 	depends on DRM_AMDGPU
 	depends on DEBUG_FS
 	default n
 	help
-	  Selecting this option creates a debugfs file to inspect the mapped
-	  pages. Uses more memory for housekeeping, enable only for debugging.
+	  Selecting this option creates a debugfs file to inspect the SYSVM
+	  mapped pages. Uses more memory for housekeeping, enable only for
+	  debugging.
 
 source "drivers/gpu/drm/amd/acp/Kconfig"
 source "drivers/gpu/drm/amd/display/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 3661110..d80d49f 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -22,7 +22,7 @@ amdgpu-y := amdgpu_drv.o
 # add KMS driver
 amdgpu-y += amdgpu_device.o amdgpu_kms.o \
 	amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
-	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
+	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_sysvm.o \
 	amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
 	amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
 	amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4a2b33d..abe191f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -305,7 +305,7 @@ struct amdgpu_vm_pte_funcs {
 };
 
 /* provided by the gmc block */
-struct amdgpu_gart_funcs {
+struct amdgpu_sysvm_funcs {
 	/* flush the vm tlb via mmio */
 	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
 			      uint32_t vmid);
@@ -543,39 +543,39 @@ struct amdgpu_mc;
 #define AMDGPU_GPU_PAGE_SHIFT 12
 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
 
-struct amdgpu_gart {
+struct amdgpu_sysvm {
 	dma_addr_t			table_addr;
 	struct amdgpu_bo		*robj;
 	void				*ptr;
 	unsigned			num_gpu_pages;
 	unsigned			num_cpu_pages;
 	unsigned			table_size;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
 	struct page			**pages;
 #endif
 	bool				ready;
 
 	/* Asic default pte flags */
-	uint64_t			gart_pte_flags;
+	uint64_t			sysvm_pte_flags;
 
-	const struct amdgpu_gart_funcs *gart_funcs;
+	const struct amdgpu_sysvm_funcs *sysvm_funcs;
 };
 
-void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
-int amdgpu_gart_init(struct amdgpu_device *adev);
-void amdgpu_gart_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
+int amdgpu_sysvm_init(struct amdgpu_device *adev);
+void amdgpu_sysvm_fini(struct amdgpu_device *adev);
+int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
 			int pages);
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
 		    int pages, dma_addr_t *dma_addr, uint64_t flags,
 		    void *dst);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
 		     int pages, struct page **pagelist,
 		     dma_addr_t *dma_addr, uint64_t flags);
 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
@@ -604,15 +604,15 @@ struct amdgpu_mc {
 	 * about vram size near mc fb location */
 	u64			mc_vram_size;
 	u64			visible_vram_size;
-	u64			gtt_size;
-	u64			gtt_start;
-	u64			gtt_end;
+	u64			sysvm_size;
+	u64			sysvm_start;
+	u64			sysvm_end;
 	u64			vram_start;
 	u64			vram_end;
 	unsigned		vram_width;
 	u64			real_vram_size;
 	int			vram_mtrr;
-	u64                     gtt_base_align;
+	u64                     sysvm_base_align;
 	u64                     mc_mask;
 	const struct firmware   *fw;	/* MC firmware */
 	uint32_t                fw_version;
@@ -1575,7 +1575,7 @@ struct amdgpu_device {
 
 	/* MC */
 	struct amdgpu_mc		mc;
-	struct amdgpu_gart		gart;
+	struct amdgpu_sysvm		sysvm;
 	struct amdgpu_dummy_page	dummy_page;
 	struct amdgpu_vm_manager	vm_manager;
 	struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
@@ -1686,8 +1686,8 @@ struct amdgpu_device {
 	struct list_head                shadow_list;
 	struct mutex                    shadow_list_lock;
 	/* link all gtt */
-	spinlock_t			gtt_list_lock;
-	struct list_head                gtt_list;
+	spinlock_t			sysvm_list_lock;
+	struct list_head                sysvm_list;
 	/* keep an lru list of rings by HW IP */
 	struct list_head		ring_lru_list;
 	spinlock_t			ring_lru_list_lock;
@@ -1835,13 +1835,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
-#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
-#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
+#define amdgpu_sysvm_flush_gpu_tlb(adev, vmid) (adev)->sysvm.sysvm_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_sysvm_set_pte_pde(adev, pt, idx, addr, flags) (adev)->sysvm.sysvm_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
+#define amdgpu_sysvm_get_vm_pde(adev, addr) (adev)->sysvm.sysvm_funcs->get_vm_pde((adev), (addr))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
-#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
+#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->sysvm.sysvm_funcs->get_vm_pte_flags((adev),(flags))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
@@ -1908,7 +1908,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 				 struct ttm_mem_reg *mem);
 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
 int amdgpu_ttm_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5b1220f..46a82d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -667,39 +667,39 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
 }
 
 /**
- * amdgpu_gtt_location - try to find GTT location
+ * amdgpu_sysvm_location - try to find SYSVM location
  * @adev: amdgpu device structure holding all necessary informations
  * @mc: memory controller structure holding memory informations
  *
- * Function will place try to place GTT before or after VRAM.
+ * Function will place try to place SYSVM before or after VRAM.
  *
- * If GTT size is bigger than space left then we ajust GTT size.
+ * If SYSVM size is bigger than space left then we ajust SYSVM size.
  * Thus function will never fails.
  *
- * FIXME: when reducing GTT size align new size on power of 2.
+ * FIXME: when reducing SYSVM size align new size on power of 2.
  */
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
 {
 	u64 size_af, size_bf;
 
-	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
-	size_bf = mc->vram_start & ~mc->gtt_base_align;
+	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) & ~mc->sysvm_base_align;
+	size_bf = mc->vram_start & ~mc->sysvm_base_align;
 	if (size_bf > size_af) {
-		if (mc->gtt_size > size_bf) {
-			dev_warn(adev->dev, "limiting GTT\n");
-			mc->gtt_size = size_bf;
+		if (mc->sysvm_size > size_bf) {
+			dev_warn(adev->dev, "limiting SYSVM\n");
+			mc->sysvm_size = size_bf;
 		}
-		mc->gtt_start = 0;
+		mc->sysvm_start = 0;
 	} else {
-		if (mc->gtt_size > size_af) {
-			dev_warn(adev->dev, "limiting GTT\n");
-			mc->gtt_size = size_af;
+		if (mc->sysvm_size > size_af) {
+			dev_warn(adev->dev, "limiting SYSVM\n");
+			mc->sysvm_size = size_af;
 		}
-		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+		mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) & ~mc->sysvm_base_align;
 	}
-	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
-	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
-			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+	mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
+	dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
+			mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
 }
 
 /*
@@ -1663,12 +1663,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
 
 static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
 {
-	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
+	memcpy(adev->reset_magic, adev->sysvm.ptr, AMDGPU_RESET_MAGIC_NUM);
 }
 
 static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
 {
-	return !!memcmp(adev->gart.ptr, adev->reset_magic,
+	return !!memcmp(adev->sysvm.ptr, adev->reset_magic,
 			AMDGPU_RESET_MAGIC_NUM);
 }
 
@@ -2052,14 +2052,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->flags = flags;
 	adev->asic_type = flags & AMD_ASIC_MASK;
 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
-	adev->mc.gtt_size = 512 * 1024 * 1024;
+	adev->mc.sysvm_size = 512 * 1024 * 1024;
 	adev->accel_working = false;
 	adev->num_rings = 0;
 	adev->mman.buffer_funcs = NULL;
 	adev->mman.buffer_funcs_ring = NULL;
 	adev->vm_manager.vm_pte_funcs = NULL;
 	adev->vm_manager.vm_pte_num_rings = 0;
-	adev->gart.gart_funcs = NULL;
+	adev->sysvm.sysvm_funcs = NULL;
 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 
 	adev->smc_rreg = &amdgpu_invalid_rreg;
@@ -2110,8 +2110,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	INIT_LIST_HEAD(&adev->shadow_list);
 	mutex_init(&adev->shadow_list_lock);
 
-	INIT_LIST_HEAD(&adev->gtt_list);
-	spin_lock_init(&adev->gtt_list_lock);
+	INIT_LIST_HEAD(&adev->sysvm_list);
+	spin_lock_init(&adev->sysvm_list_lock);
 
 	INIT_LIST_HEAD(&adev->ring_lru_list);
 	spin_lock_init(&adev->ring_lru_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
deleted file mode 100644
index c808388..0000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Jerome Glisse.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- *          Alex Deucher
- *          Jerome Glisse
- */
-#include <drm/drmP.h>
-#include <drm/amdgpu_drm.h>
-#include "amdgpu.h"
-
-/*
- * GART
- * The GART (Graphics Aperture Remapping Table) is an aperture
- * in the GPU's address space.  System pages can be mapped into
- * the aperture and look like contiguous pages from the GPU's
- * perspective.  A page table maps the pages in the aperture
- * to the actual backing pages in system memory.
- *
- * Radeon GPUs support both an internal GART, as described above,
- * and AGP.  AGP works similarly, but the GART table is configured
- * and maintained by the northbridge rather than the driver.
- * Radeon hw has a separate AGP aperture that is programmed to
- * point to the AGP aperture provided by the northbridge and the
- * requests are passed through to the northbridge aperture.
- * Both AGP and internal GART can be used at the same time, however
- * that is not currently supported by the driver.
- *
- * This file handles the common internal GART management.
- */
-
-/*
- * Common GART table functions.
- */
-
-/**
- * amdgpu_gart_set_defaults - set the default gtt_size
- *
- * @adev: amdgpu_device pointer
- *
- * Set the default gtt_size based on parameters and available VRAM.
- */
-void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
-{
-	/* unless the user had overridden it, set the gart
-	 * size equal to the 1024 or vram, whichever is larger.
-	 */
-	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
-					adev->mc.mc_vram_size);
-	else
-		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-}
-
-/**
- * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
- * gart table to be in system memory.
- * Returns 0 for success, -ENOMEM for failure.
- */
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
-{
-	void *ptr;
-
-	ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
-				   &adev->gart.table_addr);
-	if (ptr == NULL) {
-		return -ENOMEM;
-	}
-#ifdef CONFIG_X86
-	if (0) {
-		set_memory_uc((unsigned long)ptr,
-			      adev->gart.table_size >> PAGE_SHIFT);
-	}
-#endif
-	adev->gart.ptr = ptr;
-	memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
-	return 0;
-}
-
-/**
- * amdgpu_gart_table_ram_free - free system ram for gart page table
- *
- * @adev: amdgpu_device pointer
- *
- * Free system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
- * gart table to be in system memory.
- */
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
-{
-	if (adev->gart.ptr == NULL) {
-		return;
-	}
-#ifdef CONFIG_X86
-	if (0) {
-		set_memory_wb((unsigned long)adev->gart.ptr,
-			      adev->gart.table_size >> PAGE_SHIFT);
-	}
-#endif
-	pci_free_consistent(adev->pdev, adev->gart.table_size,
-			    (void *)adev->gart.ptr,
-			    adev->gart.table_addr);
-	adev->gart.ptr = NULL;
-	adev->gart.table_addr = 0;
-}
-
-/**
- * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate video memory for GART page table
- * (pcie r4xx, r5xx+).  These asics require the
- * gart table to be in video memory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
-{
-	int r;
-
-	if (adev->gart.robj == NULL) {
-		r = amdgpu_bo_create(adev, adev->gart.table_size,
-				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
-				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
-				     NULL, NULL, &adev->gart.robj);
-		if (r) {
-			return r;
-		}
-	}
-	return 0;
-}
-
-/**
- * amdgpu_gart_table_vram_pin - pin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Pin the GART page table in vram so it will not be moved
- * by the memory manager (pcie r4xx, r5xx+).  These asics require the
- * gart table to be in video memory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
-{
-	uint64_t gpu_addr;
-	int r;
-
-	r = amdgpu_bo_reserve(adev->gart.robj, false);
-	if (unlikely(r != 0))
-		return r;
-	r = amdgpu_bo_pin(adev->gart.robj,
-				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
-	if (r) {
-		amdgpu_bo_unreserve(adev->gart.robj);
-		return r;
-	}
-	r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
-	if (r)
-		amdgpu_bo_unpin(adev->gart.robj);
-	amdgpu_bo_unreserve(adev->gart.robj);
-	adev->gart.table_addr = gpu_addr;
-	return r;
-}
-
-/**
- * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Unpin the GART page table in vram (pcie r4xx, r5xx+).
- * These asics require the gart table to be in video memory.
- */
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
-{
-	int r;
-
-	if (adev->gart.robj == NULL) {
-		return;
-	}
-	r = amdgpu_bo_reserve(adev->gart.robj, true);
-	if (likely(r == 0)) {
-		amdgpu_bo_kunmap(adev->gart.robj);
-		amdgpu_bo_unpin(adev->gart.robj);
-		amdgpu_bo_unreserve(adev->gart.robj);
-		adev->gart.ptr = NULL;
-	}
-}
-
-/**
- * amdgpu_gart_table_vram_free - free gart page table vram
- *
- * @adev: amdgpu_device pointer
- *
- * Free the video memory used for the GART page table
- * (pcie r4xx, r5xx+).  These asics require the gart table to
- * be in video memory.
- */
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
-{
-	if (adev->gart.robj == NULL) {
-		return;
-	}
-	amdgpu_bo_unref(&adev->gart.robj);
-}
-
-/*
- * Common gart functions.
- */
-/**
- * amdgpu_gart_unbind - unbind pages from the gart page table
- *
- * @adev: amdgpu_device pointer
- * @offset: offset into the GPU's gart aperture
- * @pages: number of pages to unbind
- *
- * Unbinds the requested pages from the gart page table and
- * replaces them with the dummy page (all asics).
- * Returns 0 for success, -EINVAL for failure.
- */
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
-			int pages)
-{
-	unsigned t;
-	unsigned p;
-	int i, j;
-	u64 page_base;
-	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
-	uint64_t flags = 0;
-
-	if (!adev->gart.ready) {
-		WARN(1, "trying to unbind memory from uninitialized GART !\n");
-		return -EINVAL;
-	}
-
-	t = offset / AMDGPU_GPU_PAGE_SIZE;
-	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-	for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-		adev->gart.pages[p] = NULL;
-#endif
-		page_base = adev->dummy_page.addr;
-		if (!adev->gart.ptr)
-			continue;
-
-		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-			amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
-						t, page_base, flags);
-			page_base += AMDGPU_GPU_PAGE_SIZE;
-		}
-	}
-	mb();
-	amdgpu_gart_flush_gpu_tlb(adev, 0);
-	return 0;
-}
-
-/**
- * amdgpu_gart_map - map dma_addresses into GART entries
- *
- * @adev: amdgpu_device pointer
- * @offset: offset into the GPU's gart aperture
- * @pages: number of pages to bind
- * @dma_addr: DMA addresses of pages
- *
- * Map the dma_addresses into GART entries (all asics).
- * Returns 0 for success, -EINVAL for failure.
- */
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
-		    int pages, dma_addr_t *dma_addr, uint64_t flags,
-		    void *dst)
-{
-	uint64_t page_base;
-	unsigned i, j, t;
-
-	if (!adev->gart.ready) {
-		WARN(1, "trying to bind memory to uninitialized GART !\n");
-		return -EINVAL;
-	}
-
-	t = offset / AMDGPU_GPU_PAGE_SIZE;
-
-	for (i = 0; i < pages; i++) {
-		page_base = dma_addr[i];
-		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-			amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
-			page_base += AMDGPU_GPU_PAGE_SIZE;
-		}
-	}
-	return 0;
-}
-
-/**
- * amdgpu_gart_bind - bind pages into the gart page table
- *
- * @adev: amdgpu_device pointer
- * @offset: offset into the GPU's gart aperture
- * @pages: number of pages to bind
- * @pagelist: pages to bind
- * @dma_addr: DMA addresses of pages
- *
- * Binds the requested pages to the gart page table
- * (all asics).
- * Returns 0 for success, -EINVAL for failure.
- */
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
-		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
-		     uint64_t flags)
-{
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	unsigned i,t,p;
-#endif
-	int r;
-
-	if (!adev->gart.ready) {
-		WARN(1, "trying to bind memory to uninitialized GART !\n");
-		return -EINVAL;
-	}
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	t = offset / AMDGPU_GPU_PAGE_SIZE;
-	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-	for (i = 0; i < pages; i++, p++)
-		adev->gart.pages[p] = pagelist[i];
-#endif
-
-	if (adev->gart.ptr) {
-		r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
-			    adev->gart.ptr);
-		if (r)
-			return r;
-	}
-
-	mb();
-	amdgpu_gart_flush_gpu_tlb(adev, 0);
-	return 0;
-}
-
-/**
- * amdgpu_gart_init - init the driver info for managing the gart
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate the dummy page and init the gart driver info (all asics).
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_init(struct amdgpu_device *adev)
-{
-	int r;
-
-	if (adev->dummy_page.page)
-		return 0;
-
-	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
-	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
-		DRM_ERROR("Page size is smaller than GPU page size!\n");
-		return -EINVAL;
-	}
-	r = amdgpu_dummy_page_init(adev);
-	if (r)
-		return r;
-	/* Compute table size */
-	adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
-	adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
-	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
-		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	/* Allocate pages table */
-	adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
-	if (adev->gart.pages == NULL) {
-		amdgpu_gart_fini(adev);
-		return -ENOMEM;
-	}
-#endif
-
-	return 0;
-}
-
-/**
- * amdgpu_gart_fini - tear down the driver info for managing the gart
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the gart driver info and free the dummy page (all asics).
- */
-void amdgpu_gart_fini(struct amdgpu_device *adev)
-{
-	if (adev->gart.ready) {
-		/* unbind pages */
-		amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
-	}
-	adev->gart.ready = false;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	vfree(adev->gart.pages);
-	adev->gart.pages = NULL;
-#endif
-	amdgpu_dummy_page_fini(adev);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4510627..73a1c64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -82,7 +82,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
 	if (r)
 		kfree(*job);
 	else
-		(*job)->vm_pd_addr = adev->gart.table_addr;
+		(*job)->vm_pd_addr = adev->sysvm.table_addr;
 
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
new file mode 100644
index 0000000..50fc8d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+/*
+ * SYSVM
+ * The system VM (previously called GART) is an aperture
+ * in the GPU's address space.  System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective.  A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal SYSVM based GART, as described above,
+ * and AGP.  AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal SYSVM management.
+ */
+
+/*
+ * Common SYSVM table functions.
+ */
+
+/**
+ * amdgpu_sysvm_set_defaults - set the default sysvm_size
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the default sysvm_size based on parameters and available VRAM.
+ */
+void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
+{
+	/* unless the user had overridden it, set the gart
+	 * size equal to the 1024 or vram, whichever is larger.
+	 */
+	if (amdgpu_gart_size == -1)
+		adev->mc.sysvm_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+					adev->mc.mc_vram_size);
+	else
+		adev->mc.sysvm_size = (uint64_t)amdgpu_gart_size << 20;
+}
+
+/**
+ * amdgpu_sysvm_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate system memory for SYSVM page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev)
+{
+	void *ptr;
+
+	ptr = pci_alloc_consistent(adev->pdev, adev->sysvm.table_size,
+				   &adev->sysvm.table_addr);
+	if (ptr == NULL) {
+		return -ENOMEM;
+	}
+#ifdef CONFIG_X86
+	if (0) {
+		set_memory_uc((unsigned long)ptr,
+			      adev->sysvm.table_size >> PAGE_SHIFT);
+	}
+#endif
+	adev->sysvm.ptr = ptr;
+	memset((void *)adev->sysvm.ptr, 0, adev->sysvm.table_size);
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_table_ram_free - free system ram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free system memory for SYSVM page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ */
+void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev)
+{
+	if (adev->sysvm.ptr == NULL) {
+		return;
+	}
+#ifdef CONFIG_X86
+	if (0) {
+		set_memory_wb((unsigned long)adev->sysvm.ptr,
+			      adev->sysvm.table_size >> PAGE_SHIFT);
+	}
+#endif
+	pci_free_consistent(adev->pdev, adev->sysvm.table_size,
+			    (void *)adev->sysvm.ptr,
+			    adev->sysvm.table_addr);
+	adev->sysvm.ptr = NULL;
+	adev->sysvm.table_addr = 0;
+}
+
+/**
+ * amdgpu_sysvm_table_vram_alloc - allocate vram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate video memory for SYSVM page table
+ * (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->sysvm.robj == NULL) {
+		r = amdgpu_bo_create(adev, adev->sysvm.table_size,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+				     NULL, NULL, &adev->sysvm.robj);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_table_vram_pin - pin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Pin the SYSVM page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev)
+{
+	uint64_t gpu_addr;
+	int r;
+
+	r = amdgpu_bo_reserve(adev->sysvm.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_pin(adev->sysvm.robj,
+				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+	if (r) {
+		amdgpu_bo_unreserve(adev->sysvm.robj);
+		return r;
+	}
+	r = amdgpu_bo_kmap(adev->sysvm.robj, &adev->sysvm.ptr);
+	if (r)
+		amdgpu_bo_unpin(adev->sysvm.robj);
+	amdgpu_bo_unreserve(adev->sysvm.robj);
+	adev->sysvm.table_addr = gpu_addr;
+	return r;
+}
+
+/**
+ * amdgpu_sysvm_table_vram_unpin - unpin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
+void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->sysvm.robj == NULL) {
+		return;
+	}
+	r = amdgpu_bo_reserve(adev->sysvm.robj, true);
+	if (likely(r == 0)) {
+		amdgpu_bo_kunmap(adev->sysvm.robj);
+		amdgpu_bo_unpin(adev->sysvm.robj);
+		amdgpu_bo_unreserve(adev->sysvm.robj);
+		adev->sysvm.ptr = NULL;
+	}
+}
+
+/**
+ * amdgpu_sysvm_table_vram_free - free gart page table vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+).  These asics require the gart table to
+ * be in video memory.
+ */
+void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev)
+{
+	if (adev->sysvm.robj == NULL) {
+		return;
+	}
+	amdgpu_bo_unref(&adev->sysvm.robj);
+}
+
+/*
+ * Common gart functions.
+ */
+/**
+ * amdgpu_sysvm_unbind - unbind pages from the gart page table
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
+			int pages)
+{
+	unsigned t;
+	unsigned p;
+	int i, j;
+	u64 page_base;
+	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
+	uint64_t flags = 0;
+
+	if (!adev->sysvm.ready) {
+		WARN(1, "trying to unbind memory from uninitialized GART !\n");
+		return -EINVAL;
+	}
+
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+	for (i = 0; i < pages; i++, p++) {
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+		adev->sysvm.pages[p] = NULL;
+#endif
+		page_base = adev->dummy_page.addr;
+		if (!adev->sysvm.ptr)
+			continue;
+
+		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+			amdgpu_sysvm_set_pte_pde(adev, adev->sysvm.ptr,
+						t, page_base, flags);
+			page_base += AMDGPU_GPU_PAGE_SIZE;
+		}
+	}
+	mb();
+	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_map - map dma_addresses into GART entries
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Map the dma_addresses into GART entries (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
+		    int pages, dma_addr_t *dma_addr, uint64_t flags,
+		    void *dst)
+{
+	uint64_t page_base;
+	unsigned i, j, t;
+
+	if (!adev->sysvm.ready) {
+		WARN(1, "trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+
+	for (i = 0; i < pages; i++) {
+		page_base = dma_addr[i];
+		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+			amdgpu_sysvm_set_pte_pde(adev, dst, t, page_base, flags);
+			page_base += AMDGPU_GPU_PAGE_SIZE;
+		}
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_bind - bind pages into the gart page table
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
+		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
+		     uint64_t flags)
+{
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	unsigned i,t,p;
+#endif
+	int r;
+
+	if (!adev->sysvm.ready) {
+		WARN(1, "trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+	for (i = 0; i < pages; i++, p++)
+		adev->sysvm.pages[p] = pagelist[i];
+#endif
+
+	if (adev->sysvm.ptr) {
+		r = amdgpu_sysvm_map(adev, offset, pages, dma_addr, flags,
+			    adev->sysvm.ptr);
+		if (r)
+			return r;
+	}
+
+	mb();
+	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_init - init the driver info for managing the gart
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_sysvm_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->dummy_page.page)
+		return 0;
+
+	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
+	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
+		DRM_ERROR("Page size is smaller than GPU page size!\n");
+		return -EINVAL;
+	}
+	r = amdgpu_dummy_page_init(adev);
+	if (r)
+		return r;
+	/* Compute table size */
+	adev->sysvm.num_cpu_pages = adev->mc.sysvm_size / PAGE_SIZE;
+	adev->sysvm.num_gpu_pages = adev->mc.sysvm_size / AMDGPU_GPU_PAGE_SIZE;
+	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+		 adev->sysvm.num_cpu_pages, adev->sysvm.num_gpu_pages);
+
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	/* Allocate pages table */
+	adev->sysvm.pages = vzalloc(sizeof(void *) * adev->sysvm.num_cpu_pages);
+	if (adev->sysvm.pages == NULL) {
+		amdgpu_sysvm_fini(adev);
+		return -ENOMEM;
+	}
+#endif
+
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_fini - tear down the driver info for managing the gart
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
+void amdgpu_sysvm_fini(struct amdgpu_device *adev)
+{
+	if (adev->sysvm.ready) {
+		/* unbind pages */
+		amdgpu_sysvm_unbind(adev, 0, adev->sysvm.num_cpu_pages);
+	}
+	adev->sysvm.ready = false;
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	vfree(adev->sysvm.pages);
+	adev->sysvm.pages = NULL;
+#endif
+	amdgpu_dummy_page_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index d02e611..651712e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -32,8 +32,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 {
 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 	struct amdgpu_bo *vram_obj = NULL;
-	struct amdgpu_bo **gtt_obj = NULL;
-	uint64_t gtt_addr, vram_addr;
+	struct amdgpu_bo **sysvm_obj = NULL;
+	uint64_t sysvm_addr, vram_addr;
 	unsigned n, size;
 	int i, r;
 
@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 	/* Number of tests =
 	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
 	 */
-	n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
+	n = adev->mc.sysvm_size - AMDGPU_IB_POOL_SIZE*64*1024;
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		if (adev->rings[i])
 			n -= adev->rings[i]->ring_size;
@@ -52,8 +52,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		n -= adev->irq.ih.ring_size;
 	n /= size;
 
-	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
-	if (!gtt_obj) {
+	sysvm_obj = kzalloc(n * sizeof(*sysvm_obj), GFP_KERNEL);
+	if (!sysvm_obj) {
 		DRM_ERROR("Failed to allocate %d pointers\n", n);
 		r = 1;
 		goto out_cleanup;
@@ -75,42 +75,42 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		goto out_unres;
 	}
 	for (i = 0; i < n; i++) {
-		void *gtt_map, *vram_map;
-		void **gtt_start, **gtt_end;
+		void *sysvm_map, *vram_map;
+		void **sysvm_start, **sysvm_end;
 		void **vram_start, **vram_end;
 		struct dma_fence *fence = NULL;
 
 		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
-				     NULL, gtt_obj + i);
+				     NULL, sysvm_obj + i);
 		if (r) {
 			DRM_ERROR("Failed to create GTT object %d\n", i);
 			goto out_lclean;
 		}
 
-		r = amdgpu_bo_reserve(gtt_obj[i], false);
+		r = amdgpu_bo_reserve(sysvm_obj[i], false);
 		if (unlikely(r != 0))
 			goto out_lclean_unref;
-		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
+		r = amdgpu_bo_pin(sysvm_obj[i], AMDGPU_GEM_DOMAIN_GTT, &sysvm_addr);
 		if (r) {
 			DRM_ERROR("Failed to pin GTT object %d\n", i);
 			goto out_lclean_unres;
 		}
 
-		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
 		if (r) {
 			DRM_ERROR("Failed to map GTT object %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
-		     gtt_start < gtt_end;
-		     gtt_start++)
-			*gtt_start = gtt_start;
+		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size;
+		     sysvm_start < sysvm_end;
+		     sysvm_start++)
+			*sysvm_start = sysvm_start;
 
-		amdgpu_bo_kunmap(gtt_obj[i]);
+		amdgpu_bo_kunmap(sysvm_obj[i]);
 
-		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
+		r = amdgpu_copy_buffer(ring, sysvm_addr, vram_addr,
 				       size, NULL, &fence, false, false);
 
 		if (r) {
@@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 		}
 
-		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
 		     vram_start = vram_map, vram_end = vram_map + size;
 		     vram_start < vram_end;
-		     gtt_start++, vram_start++) {
-			if (*vram_start != gtt_start) {
+		     sysvm_start++, vram_start++) {
+			if (*vram_start != sysvm_start) {
 				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
 					  "expected 0x%p (GTT/VRAM offset "
 					  "0x%16llx/0x%16llx)\n",
-					  i, *vram_start, gtt_start,
+					  i, *vram_start, sysvm_start,
 					  (unsigned long long)
-					  (gtt_addr - adev->mc.gtt_start +
-					   (void*)gtt_start - gtt_map),
+					  (sysvm_addr - adev->mc.sysvm_start +
+					   (void*)sysvm_start - sysvm_map),
 					  (unsigned long long)
 					  (vram_addr - adev->mc.vram_start +
-					   (void*)gtt_start - gtt_map));
+					   (void*)sysvm_start - sysvm_map));
 				amdgpu_bo_kunmap(vram_obj);
 				goto out_lclean_unpin;
 			}
@@ -155,7 +155,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
 		amdgpu_bo_kunmap(vram_obj);
 
-		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
+		r = amdgpu_copy_buffer(ring, vram_addr, sysvm_addr,
 				       size, NULL, &fence, false, false);
 
 		if (r) {
@@ -171,49 +171,49 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
 		dma_fence_put(fence);
 
-		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
 		if (r) {
 			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
 		     vram_start = vram_map, vram_end = vram_map + size;
-		     gtt_start < gtt_end;
-		     gtt_start++, vram_start++) {
-			if (*gtt_start != vram_start) {
+		     sysvm_start < sysvm_end;
+		     sysvm_start++, vram_start++) {
+			if (*sysvm_start != vram_start) {
 				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
 					  "expected 0x%p (VRAM/GTT offset "
 					  "0x%16llx/0x%16llx)\n",
-					  i, *gtt_start, vram_start,
+					  i, *sysvm_start, vram_start,
 					  (unsigned long long)
 					  (vram_addr - adev->mc.vram_start +
 					   (void*)vram_start - vram_map),
 					  (unsigned long long)
-					  (gtt_addr - adev->mc.gtt_start +
+					  (sysvm_addr - adev->mc.sysvm_start +
 					   (void*)vram_start - vram_map));
-				amdgpu_bo_kunmap(gtt_obj[i]);
+				amdgpu_bo_kunmap(sysvm_obj[i]);
 				goto out_lclean_unpin;
 			}
 		}
 
-		amdgpu_bo_kunmap(gtt_obj[i]);
+		amdgpu_bo_kunmap(sysvm_obj[i]);
 
 		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
-			 gtt_addr - adev->mc.gtt_start);
+			 sysvm_addr - adev->mc.sysvm_start);
 		continue;
 
 out_lclean_unpin:
-		amdgpu_bo_unpin(gtt_obj[i]);
+		amdgpu_bo_unpin(sysvm_obj[i]);
 out_lclean_unres:
-		amdgpu_bo_unreserve(gtt_obj[i]);
+		amdgpu_bo_unreserve(sysvm_obj[i]);
 out_lclean_unref:
-		amdgpu_bo_unref(&gtt_obj[i]);
+		amdgpu_bo_unref(&sysvm_obj[i]);
 out_lclean:
 		for (--i; i >= 0; --i) {
-			amdgpu_bo_unpin(gtt_obj[i]);
-			amdgpu_bo_unreserve(gtt_obj[i]);
-			amdgpu_bo_unref(&gtt_obj[i]);
+			amdgpu_bo_unpin(sysvm_obj[i]);
+			amdgpu_bo_unreserve(sysvm_obj[i]);
+			amdgpu_bo_unref(&sysvm_obj[i]);
 		}
 		if (fence)
 			dma_fence_put(fence);
@@ -226,7 +226,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 out_unref:
 	amdgpu_bo_unref(&vram_obj);
 out_cleanup:
-	kfree(gtt_obj);
+	kfree(sysvm_obj);
 	if (r) {
 		pr_warn("Error while testing BO move\n");
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5c7a6c5..9240357 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -102,7 +102,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 		goto error_bo;
 	}
 
-	mutex_init(&adev->mman.gtt_window_lock);
+	mutex_init(&adev->mman.sysvm_window_lock);
 
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
@@ -130,7 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
 	if (adev->mman.mem_global_referenced) {
 		amd_sched_entity_fini(adev->mman.entity.sched,
 				      &adev->mman.entity);
-		mutex_destroy(&adev->mman.gtt_window_lock);
+		mutex_destroy(&adev->mman.sysvm_window_lock);
 		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 		drm_global_item_unref(&adev->mman.mem_global_ref);
 		adev->mman.mem_global_referenced = false;
@@ -158,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		break;
 	case TTM_PL_TT:
 		man->func = &amdgpu_gtt_mgr_func;
-		man->gpu_offset = adev->mc.gtt_start;
+		man->gpu_offset = adev->mc.sysvm_start;
 		man->available_caching = TTM_PL_MASK_CACHING;
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -289,7 +289,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 	new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
 
 	num_pages = new_mem->num_pages;
-	mutex_lock(&adev->mman.gtt_window_lock);
+	mutex_lock(&adev->mman.sysvm_window_lock);
 	while (num_pages) {
 		unsigned long cur_pages = min(min(old_size, new_size),
 					      (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
@@ -341,14 +341,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 			new_start += cur_pages * PAGE_SIZE;
 		}
 	}
-	mutex_unlock(&adev->mman.gtt_window_lock);
+	mutex_unlock(&adev->mman.sysvm_window_lock);
 
 	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
 	dma_fence_put(fence);
 	return r;
 
 error:
-	mutex_unlock(&adev->mman.gtt_window_lock);
+	mutex_unlock(&adev->mman.sysvm_window_lock);
 
 	if (fence)
 		dma_fence_wait(fence, false);
@@ -693,10 +693,10 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 	uint64_t flags;
 	int r;
 
-	spin_lock(&gtt->adev->gtt_list_lock);
+	spin_lock(&gtt->adev->sysvm_list_lock);
 	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
 	gtt->offset = (u64)mem->start << PAGE_SHIFT;
-	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+	r = amdgpu_sysvm_bind(gtt->adev, gtt->offset, ttm->num_pages,
 		ttm->pages, gtt->ttm.dma_address, flags);
 
 	if (r) {
@@ -705,9 +705,9 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 		goto error_gart_bind;
 	}
 
-	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+	list_add_tail(&gtt->list, &gtt->adev->sysvm_list);
 error_gart_bind:
-	spin_unlock(&gtt->adev->gtt_list_lock);
+	spin_unlock(&gtt->adev->sysvm_list_lock);
 	return r;
 
 }
@@ -774,20 +774,20 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
 	int r;
 
 	bo_mem.mem_type = TTM_PL_TT;
-	spin_lock(&adev->gtt_list_lock);
-	list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+	spin_lock(&adev->sysvm_list_lock);
+	list_for_each_entry_safe(gtt, tmp, &adev->sysvm_list, list) {
 		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
-		r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+		r = amdgpu_sysvm_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
 				     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
 				     flags);
 		if (r) {
-			spin_unlock(&adev->gtt_list_lock);
+			spin_unlock(&adev->sysvm_list_lock);
 			DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
 				  gtt->ttm.ttm.num_pages, gtt->offset);
 			return r;
 		}
 	}
-	spin_unlock(&adev->gtt_list_lock);
+	spin_unlock(&adev->sysvm_list_lock);
 	return 0;
 }
 
@@ -803,8 +803,8 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 		return 0;
 
 	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
-	spin_lock(&gtt->adev->gtt_list_lock);
-	r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
+	spin_lock(&gtt->adev->sysvm_list_lock);
+	r = amdgpu_sysvm_unbind(gtt->adev, gtt->offset, ttm->num_pages);
 	if (r) {
 		DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
 			  gtt->ttm.ttm.num_pages, gtt->offset);
@@ -812,7 +812,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 	}
 	list_del_init(&gtt->list);
 error_unbind:
-	spin_unlock(&gtt->adev->gtt_list_lock);
+	spin_unlock(&gtt->adev->sysvm_list_lock);
 	return r;
 }
 
@@ -1037,7 +1037,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 			flags |= AMDGPU_PTE_SNOOPED;
 	}
 
-	flags |= adev->gart.gart_pte_flags;
+	flags |= adev->sysvm.sysvm_pte_flags;
 	flags |= AMDGPU_PTE_READABLE;
 
 	if (!amdgpu_ttm_tt_is_readonly(ttm))
@@ -1144,13 +1144,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
 		 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
-				adev->mc.gtt_size >> PAGE_SHIFT);
+				adev->mc.sysvm_size >> PAGE_SHIFT);
 	if (r) {
 		DRM_ERROR("Failed initializing GTT heap.\n");
 		return r;
 	}
 	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
-		 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
+		 (unsigned)(adev->mc.sysvm_size / (1024 * 1024)));
 
 	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
 	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
@@ -1223,7 +1223,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
 	if (adev->gds.oa.total_size)
 		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
 	ttm_bo_device_release(&adev->mman.bdev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_fini(adev);
 	amdgpu_ttm_global_fini(adev);
 	adev->mman.initialized = false;
 	DRM_INFO("amdgpu: ttm finalized\n");
@@ -1279,7 +1279,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
 	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
 
-	*addr = adev->mc.gtt_start;
+	*addr = adev->mc.sysvm_start;
 	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
 		AMDGPU_GPU_PAGE_SIZE;
 
@@ -1296,7 +1296,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 	src_addr = num_dw * 4;
 	src_addr += job->ibs[0].gpu_addr;
 
-	dst_addr = adev->gart.table_addr;
+	dst_addr = adev->sysvm.table_addr;
 	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
 				dst_addr, num_bytes);
@@ -1306,7 +1306,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 
 	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
 	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
-	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+	r = amdgpu_sysvm_map(adev, 0, num_pages, dma_address, flags,
 			    &job->ibs[0].ptr[num_dw]);
 	if (r)
 		goto error_free;
@@ -1522,7 +1522,7 @@ static int ttm_pl_tt = TTM_PL_TT;
 
 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
 	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
-	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
+	{"amdgpu_sysvm_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
 	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
 #ifdef CONFIG_SWIOTLB
 	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
@@ -1574,9 +1574,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
 	.llseek = default_llseek
 };
 
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
 
-static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
+static ssize_t amdgpu_ttm_sysvm_read(struct file *f, char __user *buf,
 				   size_t size, loff_t *pos)
 {
 	struct amdgpu_device *adev = file_inode(f)->i_private;
@@ -1590,16 +1590,16 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
 		struct page *page;
 		void *ptr;
 
-		if (p >= adev->gart.num_cpu_pages)
+		if (p >= adev->sysvm.num_cpu_pages)
 			return result;
 
-		page = adev->gart.pages[p];
+		page = adev->sysvm.pages[p];
 		if (page) {
 			ptr = kmap(page);
 			ptr += off;
 
 			r = copy_to_user(buf, ptr, cur_size);
-			kunmap(adev->gart.pages[p]);
+			kunmap(adev->sysvm.pages[p]);
 		} else
 			r = clear_user(buf, cur_size);
 
@@ -1615,9 +1615,9 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
 	return result;
 }
 
-static const struct file_operations amdgpu_ttm_gtt_fops = {
+static const struct file_operations amdgpu_ttm_sysvm_fops = {
 	.owner = THIS_MODULE,
-	.read = amdgpu_ttm_gtt_read,
+	.read = amdgpu_ttm_sysvm_read,
 	.llseek = default_llseek
 };
 
@@ -1640,12 +1640,12 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
 	i_size_write(ent->d_inode, adev->mc.mc_vram_size);
 	adev->mman.vram = ent;
 
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
-				  adev, &amdgpu_ttm_gtt_fops);
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	ent = debugfs_create_file("amdgpu_sysvm", S_IFREG | S_IRUGO, root,
+				  adev, &amdgpu_ttm_sysvm_fops);
 	if (IS_ERR(ent))
 		return PTR_ERR(ent);
-	i_size_write(ent->d_inode, adev->mc.gtt_size);
+	i_size_write(ent->d_inode, adev->mc.sysvm_size);
 	adev->mman.gtt = ent;
 
 #endif
@@ -1670,7 +1670,7 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
 	debugfs_remove(adev->mman.vram);
 	adev->mman.vram = NULL;
 
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
 	debugfs_remove(adev->mman.gtt);
 	adev->mman.gtt = NULL;
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 4f5c1da..1443038 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -53,7 +53,7 @@ struct amdgpu_mman {
 	const struct amdgpu_buffer_funcs	*buffer_funcs;
 	struct amdgpu_ring			*buffer_funcs_ring;
 
-	struct mutex				gtt_window_lock;
+	struct mutex				sysvm_window_lock;
 	/* Scheduler entity for buffer moves */
 	struct amd_sched_entity			entity;
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1d1810d..8dbacec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -982,14 +982,14 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
 		value = params->pages_addr ?
 			amdgpu_vm_map_gart(params->pages_addr, addr) :
 			addr;
-		amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
+		amdgpu_sysvm_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
 					i, value, flags);
 		addr += incr;
 	}
 
 	/* Flush HDP */
 	mb();
-	amdgpu_gart_flush_gpu_tlb(params->adev, 0);
+	amdgpu_sysvm_flush_gpu_tlb(params->adev, 0);
 }
 
 static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
@@ -1101,7 +1101,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 		}
 
 		pt = amdgpu_bo_gpu_offset(bo);
-		pt = amdgpu_gart_get_vm_pde(adev, pt);
+		pt = amdgpu_sysvm_get_vm_pde(adev, pt);
 		if (parent->entries[pt_idx].addr == pt)
 			continue;
 
@@ -1564,7 +1564,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  *
  * @adev: amdgpu_device pointer
  * @exclusive: fence we need to sync to
- * @gtt_flags: flags as they are used for GTT
+ * @sysvm_flags: flags as they are used in the SYSVM
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
@@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 				      struct dma_fence *exclusive,
-				      uint64_t gtt_flags,
+				      uint64_t sysvm_flags,
 				      dma_addr_t *pages_addr,
 				      struct amdgpu_vm *vm,
 				      struct amdgpu_bo_va_mapping *mapping,
@@ -1633,8 +1633,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 		}
 
 		if (pages_addr) {
-			if (flags == gtt_flags)
-				src = adev->gart.table_addr +
+			if (flags == sysvm_flags)
+				src = adev->sysvm.table_addr +
 					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
 			else
 				max_entries = min(max_entries, 16ull * 1024ull);
@@ -1681,7 +1681,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	struct amdgpu_vm *vm = bo_va->vm;
 	struct amdgpu_bo_va_mapping *mapping;
 	dma_addr_t *pages_addr = NULL;
-	uint64_t gtt_flags, flags;
+	uint64_t sysvm_flags, flags;
 	struct ttm_mem_reg *mem;
 	struct drm_mm_node *nodes;
 	struct dma_fence *exclusive;
@@ -1706,12 +1706,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
 	if (bo_va->bo) {
 		flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-		gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+		sysvm_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
 			adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
 			flags : 0;
 	} else {
 		flags = 0x0;
-		gtt_flags = ~0x0;
+		sysvm_flags = ~0x0;
 	}
 
 	spin_lock(&vm->status_lock);
@@ -1721,7 +1721,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
 	list_for_each_entry(mapping, &bo_va->invalids, list) {
 		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
-					       gtt_flags, pages_addr, vm,
+					       sysvm_flags, pages_addr, vm,
 					       mapping, flags, nodes,
 					       &bo_va->last_pt_update);
 		if (r)
@@ -1756,7 +1756,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
 
 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
-	adev->gart.gart_funcs->set_prt(adev, enable);
+	adev->sysvm.sysvm_funcs->set_prt(adev, enable);
 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
 }
 
@@ -1765,7 +1765,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
  */
 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
 {
-	if (!adev->gart.gart_funcs->set_prt)
+	if (!adev->sysvm.sysvm_funcs->set_prt)
 		return;
 
 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@@ -1800,7 +1800,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
 {
 	struct amdgpu_prt_cb *cb;
 
-	if (!adev->gart.gart_funcs->set_prt)
+	if (!adev->sysvm.sysvm_funcs->set_prt)
 		return;
 
 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@@ -2498,7 +2498,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
 	struct amdgpu_bo_va_mapping *mapping, *tmp;
-	bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+	bool prt_fini_needed = !!adev->sysvm.sysvm_funcs->set_prt;
 	int i;
 
 	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6986285..708fb84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3731,10 +3731,10 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	gfx_v9_0_write_data_to_reg(ring, usepfp, true,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index a42f483..1290434 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -40,8 +40,8 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
 {
 	uint64_t value;
 
-	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
-	value = adev->gart.table_addr - adev->mc.vram_start
+	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
+	value = adev->sysvm.table_addr - adev->mc.vram_start
 		+ adev->vm_manager.vram_base_offset;
 	value &= 0x0000FFFFFFFFF000ULL;
 	value |= 0x1; /*valid bit*/
@@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 	gfxhub_v1_0_init_gart_pt_regs(adev);
 
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
-		     (u32)(adev->mc.gtt_start >> 12));
+		     (u32)(adev->mc.sysvm_start >> 12));
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
-		     (u32)(adev->mc.gtt_start >> 44));
+		     (u32)(adev->mc.sysvm_start >> 44));
 
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
-		     (u32)(adev->mc.gtt_end >> 12));
+		     (u32)(adev->mc.sysvm_end >> 12));
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
-		     (u32)(adev->mc.gtt_end >> 44));
+		     (u32)(adev->mc.sysvm_end >> 44));
 }
 
 static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -228,7 +228,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
 	}
 }
 
-int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
+int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	if (amdgpu_sriov_vf(adev)) {
 		/*
@@ -256,7 +256,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
 	return 0;
 }
 
-void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
+void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	u32 tmp;
 	u32 i;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index d2dbb08..d194b7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -24,8 +24,8 @@
 #ifndef __GFXHUB_V1_0_H__
 #define __GFXHUB_V1_0_H__
 
-int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev);
+int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
+void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
 void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
 					  bool value);
 void gfxhub_v1_0_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5ed6788f..53c3b8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -36,7 +36,7 @@
 #include "dce/dce_6_0_sh_mask.h"
 #include "si_enums.h"
 
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev);
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v6_0_wait_for_idle(void *handle);
 
@@ -216,7 +216,7 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v6_0_vram_sysvm_location(struct amdgpu_device *adev,
 				       struct amdgpu_mc *mc)
 {
 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
@@ -228,8 +228,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
 		mc->mc_vram_size = 0xFFC0000000ULL;
 	}
 	amdgpu_vram_location(adev, &adev->mc, base);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	adev->mc.sysvm_base_align = 0;
+	amdgpu_sysvm_location(adev, mc);
 }
 
 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -321,8 +321,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 	adev->mc.visible_vram_size = adev->mc.aper_size;
 
-	amdgpu_gart_set_defaults(adev);
-	gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+	amdgpu_sysvm_set_defaults(adev);
+	gmc_v6_0_vram_sysvm_location(adev, &adev->mc);
 
 	return 0;
 }
@@ -447,15 +447,15 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 	}
 }
 
-static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v6_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	int r, i;
 
-	if (adev->gart.robj == NULL) {
+	if (adev->sysvm.robj == NULL) {
 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
-	r = amdgpu_gart_table_vram_pin(adev);
+	r = amdgpu_sysvm_table_vram_pin(adev);
 	if (r)
 		return r;
 	/* Setup TLB control */
@@ -482,9 +482,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 	       (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
 	       (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 	/* setup context0 */
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
@@ -508,10 +508,10 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 	for (i = 1; i < 16; i++) {
 		if (i < 8)
 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 		else
 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 	}
 
 	/* enable context1-15 */
@@ -530,9 +530,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 
 	gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
-		 (unsigned long long)adev->gart.table_addr);
-	adev->gart.ready = true;
+		 (unsigned)(adev->mc.sysvm_size >> 20),
+		 (unsigned long long)adev->sysvm.table_addr);
+	adev->sysvm.ready = true;
 	return 0;
 }
 
@@ -540,19 +540,19 @@ static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
 {
 	int r;
 
-	if (adev->gart.robj) {
+	if (adev->sysvm.robj) {
 		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
 		return 0;
 	}
-	r = amdgpu_gart_init(adev);
+	r = amdgpu_sysvm_init(adev);
 	if (r)
 		return r;
-	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-	adev->gart.gart_pte_flags = 0;
-	return amdgpu_gart_table_vram_alloc(adev);
+	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+	adev->sysvm.sysvm_pte_flags = 0;
+	return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
-static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v6_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	/*unsigned i;
 
@@ -582,13 +582,13 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
 	WREG32(mmVM_L2_CNTL3,
 	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
-	amdgpu_gart_table_vram_unpin(adev);
+	amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
 {
-	amdgpu_gart_table_vram_free(adev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_table_vram_free(adev);
+	amdgpu_sysvm_fini(adev);
 }
 
 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
@@ -762,7 +762,7 @@ static int gmc_v6_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	gmc_v6_0_set_gart_funcs(adev);
+	gmc_v6_0_set_sysvm_funcs(adev);
 	gmc_v6_0_set_irq_funcs(adev);
 
 	return 0;
@@ -889,7 +889,7 @@ static int gmc_v6_0_hw_init(void *handle)
 		}
 	}
 
-	r = gmc_v6_0_gart_enable(adev);
+	r = gmc_v6_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
@@ -901,7 +901,7 @@ static int gmc_v6_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-	gmc_v6_0_gart_disable(adev);
+	gmc_v6_0_sysvm_disable(adev);
 
 	return 0;
 }
@@ -1095,7 +1095,7 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
 	.set_powergating_state = gmc_v6_0_set_powergating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v6_0_sysvm_funcs = {
 	.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
 	.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
 	.set_prt = gmc_v6_0_set_prt,
@@ -1108,10 +1108,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
 	.process = gmc_v6_0_process_interrupt,
 };
 
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-	if (adev->gart.gart_funcs == NULL)
-		adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+	if (adev->sysvm.sysvm_funcs == NULL)
+		adev->sysvm.sysvm_funcs = &gmc_v6_0_sysvm_funcs;
 }
 
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 15f2c0f..2329bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -39,7 +39,7 @@
 
 #include "amdgpu_atombios.h"
 
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev);
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v7_0_wait_for_idle(void *handle);
 
@@ -231,7 +231,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v7_0_vram_sysvm_location(struct amdgpu_device *adev,
 				       struct amdgpu_mc *mc)
 {
 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
@@ -244,8 +244,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
 		mc->mc_vram_size = 0xFFC0000000ULL;
 	}
 	amdgpu_vram_location(adev, &adev->mc, base);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	adev->mc.sysvm_base_align = 0;
+	amdgpu_sysvm_location(adev, mc);
 }
 
 /**
@@ -373,8 +373,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-	amdgpu_gart_set_defaults(adev);
-	gmc_v7_0_vram_gtt_location(adev, &adev->mc);
+	amdgpu_sysvm_set_defaults(adev);
+	gmc_v7_0_vram_sysvm_location(adev, &adev->mc);
 
 	return 0;
 }
@@ -536,7 +536,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
 }
 
 /**
- * gmc_v7_0_gart_enable - gart enable
+ * gmc_v7_0_sysvm_enable - gart enable
  *
  * @adev: amdgpu_device pointer
  *
@@ -546,16 +546,16 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
  * and GPUVM for FSA64 clients (CIK).
  * Returns 0 for success, errors for failure.
  */
-static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v7_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	int r, i;
 	u32 tmp;
 
-	if (adev->gart.robj == NULL) {
+	if (adev->sysvm.robj == NULL) {
 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
-	r = amdgpu_gart_table_vram_pin(adev);
+	r = amdgpu_sysvm_table_vram_pin(adev);
 	if (r)
 		return r;
 	/* Setup TLB control */
@@ -585,9 +585,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
 	WREG32(mmVM_L2_CNTL3, tmp);
 	/* setup context0 */
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
@@ -611,10 +611,10 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 	for (i = 1; i < 16; i++) {
 		if (i < 8)
 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 		else
 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 	}
 
 	/* enable context1-15 */
@@ -640,9 +640,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 
 	gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
-		 (unsigned long long)adev->gart.table_addr);
-	adev->gart.ready = true;
+		 (unsigned)(adev->mc.sysvm_size >> 20),
+		 (unsigned long long)adev->sysvm.table_addr);
+	adev->sysvm.ready = true;
 	return 0;
 }
 
@@ -650,27 +650,27 @@ static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
 {
 	int r;
 
-	if (adev->gart.robj) {
+	if (adev->sysvm.robj) {
 		WARN(1, "R600 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
-	r = amdgpu_gart_init(adev);
+	r = amdgpu_sysvm_init(adev);
 	if (r)
 		return r;
-	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-	adev->gart.gart_pte_flags = 0;
-	return amdgpu_gart_table_vram_alloc(adev);
+	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+	adev->sysvm.sysvm_pte_flags = 0;
+	return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
 /**
- * gmc_v7_0_gart_disable - gart disable
+ * gmc_v7_0_sysvm_disable - gart disable
  *
  * @adev: amdgpu_device pointer
  *
  * This disables all VM page table (CIK).
  */
-static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v7_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	u32 tmp;
 
@@ -688,7 +688,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 	WREG32(mmVM_L2_CNTL, tmp);
 	WREG32(mmVM_L2_CNTL2, 0);
-	amdgpu_gart_table_vram_unpin(adev);
+	amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 /**
@@ -700,8 +700,8 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
  */
 static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
 {
-	amdgpu_gart_table_vram_free(adev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_table_vram_free(adev);
+	amdgpu_sysvm_fini(adev);
 }
 
 /**
@@ -885,7 +885,7 @@ static int gmc_v7_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	gmc_v7_0_set_gart_funcs(adev);
+	gmc_v7_0_set_sysvm_funcs(adev);
 	gmc_v7_0_set_irq_funcs(adev);
 
 	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1036,7 +1036,7 @@ static int gmc_v7_0_hw_init(void *handle)
 		}
 	}
 
-	r = gmc_v7_0_gart_enable(adev);
+	r = gmc_v7_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
@@ -1048,7 +1048,7 @@ static int gmc_v7_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-	gmc_v7_0_gart_disable(adev);
+	gmc_v7_0_sysvm_disable(adev);
 
 	return 0;
 }
@@ -1270,7 +1270,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 	.set_powergating_state = gmc_v7_0_set_powergating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v7_0_sysvm_funcs = {
 	.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
 	.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
 	.set_prt = gmc_v7_0_set_prt,
@@ -1283,10 +1283,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
 	.process = gmc_v7_0_process_interrupt,
 };
 
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-	if (adev->gart.gart_funcs == NULL)
-		adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
+	if (adev->sysvm.sysvm_funcs == NULL)
+		adev->sysvm.sysvm_funcs = &gmc_v7_0_sysvm_funcs;
 }
 
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 213af65..cf8f8d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -41,7 +41,7 @@
 #include "amdgpu_atombios.h"
 
 
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v8_0_wait_for_idle(void *handle);
 
@@ -393,7 +393,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v8_0_vram_sysvm_location(struct amdgpu_device *adev,
 				       struct amdgpu_mc *mc)
 {
 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
@@ -406,8 +406,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
 		mc->mc_vram_size = 0xFFC0000000ULL;
 	}
 	amdgpu_vram_location(adev, &adev->mc, base);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	adev->mc.sysvm_base_align = 0;
+	amdgpu_sysvm_location(adev, mc);
 }
 
 /**
@@ -535,8 +535,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-	amdgpu_gart_set_defaults(adev);
-	gmc_v8_0_vram_gtt_location(adev, &adev->mc);
+	amdgpu_sysvm_set_defaults(adev);
+	gmc_v8_0_vram_sysvm_location(adev, &adev->mc);
 
 	return 0;
 }
@@ -722,7 +722,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
 }
 
 /**
- * gmc_v8_0_gart_enable - gart enable
+ * gmc_v8_0_sysvm_enable - gart enable
  *
  * @adev: amdgpu_device pointer
  *
@@ -732,16 +732,16 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
  * and GPUVM for FSA64 clients (CIK).
  * Returns 0 for success, errors for failure.
  */
-static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v8_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	int r, i;
 	u32 tmp;
 
-	if (adev->gart.robj == NULL) {
+	if (adev->sysvm.robj == NULL) {
 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
-	r = amdgpu_gart_table_vram_pin(adev);
+	r = amdgpu_sysvm_table_vram_pin(adev);
 	if (r)
 		return r;
 	/* Setup TLB control */
@@ -787,9 +787,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
 	WREG32(mmVM_L2_CNTL4, tmp);
 	/* setup context0 */
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
@@ -813,10 +813,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 	for (i = 1; i < 16; i++) {
 		if (i < 8)
 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 		else
 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 	}
 
 	/* enable context1-15 */
@@ -843,9 +843,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 
 	gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
-		 (unsigned long long)adev->gart.table_addr);
-	adev->gart.ready = true;
+		 (unsigned)(adev->mc.sysvm_size >> 20),
+		 (unsigned long long)adev->sysvm.table_addr);
+	adev->sysvm.ready = true;
 	return 0;
 }
 
@@ -853,27 +853,27 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
 {
 	int r;
 
-	if (adev->gart.robj) {
+	if (adev->sysvm.robj) {
 		WARN(1, "R600 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
-	r = amdgpu_gart_init(adev);
+	r = amdgpu_sysvm_init(adev);
 	if (r)
 		return r;
-	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
-	return amdgpu_gart_table_vram_alloc(adev);
+	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_EXECUTABLE;
+	return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
 /**
- * gmc_v8_0_gart_disable - gart disable
+ * gmc_v8_0_sysvm_disable - gart disable
  *
  * @adev: amdgpu_device pointer
  *
  * This disables all VM page table (CIK).
  */
-static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v8_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	u32 tmp;
 
@@ -891,7 +891,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 	WREG32(mmVM_L2_CNTL, tmp);
 	WREG32(mmVM_L2_CNTL2, 0);
-	amdgpu_gart_table_vram_unpin(adev);
+	amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 /**
@@ -903,8 +903,8 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
  */
 static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
 {
-	amdgpu_gart_table_vram_free(adev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_table_vram_free(adev);
+	amdgpu_sysvm_fini(adev);
 }
 
 /**
@@ -962,7 +962,7 @@ static int gmc_v8_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	gmc_v8_0_set_gart_funcs(adev);
+	gmc_v8_0_set_sysvm_funcs(adev);
 	gmc_v8_0_set_irq_funcs(adev);
 
 	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1128,7 +1128,7 @@ static int gmc_v8_0_hw_init(void *handle)
 		}
 	}
 
-	r = gmc_v8_0_gart_enable(adev);
+	r = gmc_v8_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
@@ -1140,7 +1140,7 @@ static int gmc_v8_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-	gmc_v8_0_gart_disable(adev);
+	gmc_v8_0_sysvm_disable(adev);
 
 	return 0;
 }
@@ -1590,7 +1590,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v8_0_sysvm_funcs = {
 	.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
 	.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
 	.set_prt = gmc_v8_0_set_prt,
@@ -1603,10 +1603,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
 	.process = gmc_v8_0_process_interrupt,
 };
 
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-	if (adev->gart.gart_funcs == NULL)
-		adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
+	if (adev->sysvm.sysvm_funcs == NULL)
+		adev->sysvm.sysvm_funcs = &gmc_v8_0_sysvm_funcs;
 }
 
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index dbb43d9..f067465 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -365,7 +365,7 @@ static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
 	return addr;
 }
 
-static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v9_0_sysvm_funcs = {
 	.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
 	.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
 	.get_invalidate_req = gmc_v9_0_get_invalidate_req,
@@ -373,17 +373,17 @@ static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
 	.get_vm_pde = gmc_v9_0_get_vm_pde
 };
 
-static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-	if (adev->gart.gart_funcs == NULL)
-		adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
+	if (adev->sysvm.sysvm_funcs == NULL)
+		adev->sysvm.sysvm_funcs = &gmc_v9_0_sysvm_funcs;
 }
 
 static int gmc_v9_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	gmc_v9_0_set_gart_funcs(adev);
+	gmc_v9_0_set_sysvm_funcs(adev);
 	gmc_v9_0_set_irq_funcs(adev);
 
 	return 0;
@@ -412,15 +412,15 @@ static int gmc_v9_0_late_init(void *handle)
 	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
 }
 
-static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v9_0_vram_sysvm_location(struct amdgpu_device *adev,
 					struct amdgpu_mc *mc)
 {
 	u64 base = 0;
 	if (!amdgpu_sriov_vf(adev))
 		base = mmhub_v1_0_get_fb_location(adev);
 	amdgpu_vram_location(adev, &adev->mc, base);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	adev->mc.sysvm_base_align = 0;
+	amdgpu_sysvm_location(adev, mc);
 	/* base offset of vram pages */
 	if (adev->flags & AMD_IS_APU)
 		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -494,8 +494,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-	amdgpu_gart_set_defaults(adev);
-	gmc_v9_0_vram_gtt_location(adev, &adev->mc);
+	amdgpu_sysvm_set_defaults(adev);
+	gmc_v9_0_vram_sysvm_location(adev, &adev->mc);
 
 	return 0;
 }
@@ -504,18 +504,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
 {
 	int r;
 
-	if (adev->gart.robj) {
+	if (adev->sysvm.robj) {
 		WARN(1, "VEGA10 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
-	r = amdgpu_gart_init(adev);
+	r = amdgpu_sysvm_init(adev);
 	if (r)
 		return r;
-	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
+	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
 				 AMDGPU_PTE_EXECUTABLE;
-	return amdgpu_gart_table_vram_alloc(adev);
+	return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
 static int gmc_v9_0_sw_init(void *handle)
@@ -640,8 +640,8 @@ static int gmc_v9_0_sw_init(void *handle)
  */
 static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
 {
-	amdgpu_gart_table_vram_free(adev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_table_vram_free(adev);
+	amdgpu_sysvm_fini(adev);
 }
 
 static int gmc_v9_0_sw_fini(void *handle)
@@ -669,11 +669,11 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
 }
 
 /**
- * gmc_v9_0_gart_enable - gart enable
+ * gmc_v9_0_sysvm_enable - gart enable
  *
  * @adev: amdgpu_device pointer
  */
-static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v9_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	int r;
 	bool value;
@@ -683,11 +683,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 		golden_settings_vega10_hdp,
 		(const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
 
-	if (adev->gart.robj == NULL) {
+	if (adev->sysvm.robj == NULL) {
 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
-	r = amdgpu_gart_table_vram_pin(adev);
+	r = amdgpu_sysvm_table_vram_pin(adev);
 	if (r)
 		return r;
 
@@ -706,11 +706,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 		break;
 	}
 
-	r = gfxhub_v1_0_gart_enable(adev);
+	r = gfxhub_v1_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
-	r = mmhub_v1_0_gart_enable(adev);
+	r = mmhub_v1_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
@@ -733,9 +733,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
 
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
-		 (unsigned long long)adev->gart.table_addr);
-	adev->gart.ready = true;
+		 (unsigned)(adev->mc.sysvm_size >> 20),
+		 (unsigned long long)adev->sysvm.table_addr);
+	adev->sysvm.ready = true;
 	return 0;
 }
 
@@ -747,23 +747,23 @@ static int gmc_v9_0_hw_init(void *handle)
 	/* The sequence of these two function calls matters.*/
 	gmc_v9_0_init_golden_registers(adev);
 
-	r = gmc_v9_0_gart_enable(adev);
+	r = gmc_v9_0_sysvm_enable(adev);
 
 	return r;
 }
 
 /**
- * gmc_v9_0_gart_disable - gart disable
+ * gmc_v9_0_sysvm_disable - gart disable
  *
  * @adev: amdgpu_device pointer
  *
  * This disables all VM page table.
  */
-static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v9_0_sysvm_disable(struct amdgpu_device *adev)
 {
-	gfxhub_v1_0_gart_disable(adev);
-	mmhub_v1_0_gart_disable(adev);
-	amdgpu_gart_table_vram_unpin(adev);
+	gfxhub_v1_0_sysvm_disable(adev);
+	mmhub_v1_0_sysvm_disable(adev);
+	amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 static int gmc_v9_0_hw_fini(void *handle)
@@ -777,7 +777,7 @@ static int gmc_v9_0_hw_fini(void *handle)
 	}
 
 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-	gmc_v9_0_gart_disable(adev);
+	gmc_v9_0_sysvm_disable(adev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 9804318..fbc8f6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -51,8 +51,8 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
 {
 	uint64_t value;
 
-	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
-	value = adev->gart.table_addr - adev->mc.vram_start +
+	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
+	value = adev->sysvm.table_addr - adev->mc.vram_start +
 		adev->vm_manager.vram_base_offset;
 	value &= 0x0000FFFFFFFFF000ULL;
 	value |= 0x1; /* valid bit */
@@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 	mmhub_v1_0_init_gart_pt_regs(adev);
 
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
-		     (u32)(adev->mc.gtt_start >> 12));
+		     (u32)(adev->mc.sysvm_start >> 12));
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
-		     (u32)(adev->mc.gtt_start >> 44));
+		     (u32)(adev->mc.sysvm_start >> 44));
 
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
-		     (u32)(adev->mc.gtt_end >> 12));
+		     (u32)(adev->mc.sysvm_end >> 12));
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
-		     (u32)(adev->mc.gtt_end >> 44));
+		     (u32)(adev->mc.sysvm_end >> 44));
 }
 
 static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -462,7 +462,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
 	}
 }
 
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
+int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	if (amdgpu_sriov_vf(adev)) {
 		/*
@@ -490,7 +490,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
 	return 0;
 }
 
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
+void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	u32 tmp;
 	u32 i;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index 57bb940..23128e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -24,8 +24,8 @@
 #define __MMHUB_V1_0_H__
 
 u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
+int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
+void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
 void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
 					 bool value);
 void mmhub_v1_0_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4a65697..056b169 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1121,10 +1121,10 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					 unsigned vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 987b958..95913fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1312,11 +1312,11 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	uint32_t data0, data1, mask;
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
@@ -1353,10 +1353,10 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
 			 unsigned int vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1ecd6bb..b869423 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -952,10 +952,10 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
 			 unsigned int vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 21e7b88..2ca49af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -878,11 +878,11 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	uint32_t data0, data1, mask;
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
@@ -1010,10 +1010,10 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
 			 unsigned int vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 08/11] drm/amdgpu: move SYSVM struct and function into amdgpu_sysvm.h
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
                     ` (5 preceding siblings ...)
  2017-07-03  9:44   ` [PATCH 07/11] drm/amdgpu: rename GART to SYSVM Christian König
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-8-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 09/11] drm/amdgpu: move amdgpu_sysvm_location into amdgpu_sysvm.c as well Christian König
                     ` (3 subsequent siblings)
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

No functional change.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h       | 48 +------------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h | 77 +++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h   |  1 +
 3 files changed, 79 insertions(+), 47 deletions(-)
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index abe191f..a2c0eac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -69,6 +69,7 @@
 
 #include "gpu_scheduler.h"
 #include "amdgpu_virt.h"
+#include "amdgpu_sysvm.h"
 
 /*
  * Modules parameters.
@@ -534,53 +535,6 @@ int amdgpu_fence_slab_init(void);
 void amdgpu_fence_slab_fini(void);
 
 /*
- * GART structures, functions & helpers
- */
-struct amdgpu_mc;
-
-#define AMDGPU_GPU_PAGE_SIZE 4096
-#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
-#define AMDGPU_GPU_PAGE_SHIFT 12
-#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
-
-struct amdgpu_sysvm {
-	dma_addr_t			table_addr;
-	struct amdgpu_bo		*robj;
-	void				*ptr;
-	unsigned			num_gpu_pages;
-	unsigned			num_cpu_pages;
-	unsigned			table_size;
-#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
-	struct page			**pages;
-#endif
-	bool				ready;
-
-	/* Asic default pte flags */
-	uint64_t			sysvm_pte_flags;
-
-	const struct amdgpu_sysvm_funcs *sysvm_funcs;
-};
-
-void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
-int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
-int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
-void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
-int amdgpu_sysvm_init(struct amdgpu_device *adev);
-void amdgpu_sysvm_fini(struct amdgpu_device *adev);
-int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
-			int pages);
-int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
-		    int pages, dma_addr_t *dma_addr, uint64_t flags,
-		    void *dst);
-int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
-		     int pages, struct page **pagelist,
-		     dma_addr_t *dma_addr, uint64_t flags);
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
-
-/*
  * VMHUB structures, functions & helpers
  */
 struct amdgpu_vmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
new file mode 100644
index 0000000..7846765
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_SYSVM_H__
+#define __AMDGPU_SYSVM_H__
+
+#include <linux/types.h>
+
+/*
+ * SYSVM structures, functions & helpers
+ */
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_sysvm_funcs;
+
+#define AMDGPU_GPU_PAGE_SIZE 4096
+#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
+#define AMDGPU_GPU_PAGE_SHIFT 12
+#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+
+struct amdgpu_sysvm {
+	dma_addr_t			table_addr;
+	struct amdgpu_bo		*robj;
+	void				*ptr;
+	unsigned			num_gpu_pages;
+	unsigned			num_cpu_pages;
+	unsigned			table_size;
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	struct page			**pages;
+#endif
+	bool				ready;
+
+	/* Asic default pte flags */
+	uint64_t			sysvm_pte_flags;
+
+	const struct amdgpu_sysvm_funcs *sysvm_funcs;
+};
+
+void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
+int amdgpu_sysvm_init(struct amdgpu_device *adev);
+void amdgpu_sysvm_fini(struct amdgpu_device *adev);
+int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
+			int pages);
+int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
+		    int pages, dma_addr_t *dma_addr, uint64_t flags,
+		    void *dst);
+int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
+		     int pages, struct page **pagelist,
+		     dma_addr_t *dma_addr, uint64_t flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 1443038..9cd435c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -80,5 +80,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
 int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
 
 #endif
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 09/11] drm/amdgpu: move amdgpu_sysvm_location into amdgpu_sysvm.c as well
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
                     ` (6 preceding siblings ...)
  2017-07-03  9:44   ` [PATCH 08/11] drm/amdgpu: move SYSVM struct and function into amdgpu_sysvm.h Christian König
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-9-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 10/11] drm/amdgpu: setup GTT size directly from module parameter Christian König
                     ` (2 subsequent siblings)
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

No intended functional change.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  1 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 36 ----------------------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c  | 38 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h  |  2 ++
 4 files changed, 40 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a2c0eac..1ed6b7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1862,7 +1862,6 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 				 struct ttm_mem_reg *mem);
 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
 int amdgpu_ttm_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 46a82d3..228b262 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -666,42 +666,6 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
 			mc->vram_end, mc->real_vram_size >> 20);
 }
 
-/**
- * amdgpu_sysvm_location - try to find SYSVM location
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
- *
- * Function will place try to place SYSVM before or after VRAM.
- *
- * If SYSVM size is bigger than space left then we ajust SYSVM size.
- * Thus function will never fails.
- *
- * FIXME: when reducing SYSVM size align new size on power of 2.
- */
-void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
-{
-	u64 size_af, size_bf;
-
-	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) & ~mc->sysvm_base_align;
-	size_bf = mc->vram_start & ~mc->sysvm_base_align;
-	if (size_bf > size_af) {
-		if (mc->sysvm_size > size_bf) {
-			dev_warn(adev->dev, "limiting SYSVM\n");
-			mc->sysvm_size = size_bf;
-		}
-		mc->sysvm_start = 0;
-	} else {
-		if (mc->sysvm_size > size_af) {
-			dev_warn(adev->dev, "limiting SYSVM\n");
-			mc->sysvm_size = size_af;
-		}
-		mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) & ~mc->sysvm_base_align;
-	}
-	mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
-	dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
-			mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
-}
-
 /*
  * GPU helpers function.
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
index 50fc8d7..ff436ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
@@ -73,6 +73,44 @@ void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
 }
 
 /**
+ * amdgpu_sysvm_location - try to find SYSVM location
+ * @adev: amdgpu device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place SYSVM before or after VRAM.
+ *
+ * If SYSVM size is bigger than space left then we ajust SYSVM size.
+ * Thus function will never fails.
+ *
+ * FIXME: when reducing SYSVM size align new size on power of 2.
+ */
+void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+{
+	u64 size_af, size_bf;
+
+	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) &
+		~mc->sysvm_base_align;
+	size_bf = mc->vram_start & ~mc->sysvm_base_align;
+	if (size_bf > size_af) {
+		if (mc->sysvm_size > size_bf) {
+			dev_warn(adev->dev, "limiting SYSVM\n");
+			mc->sysvm_size = size_bf;
+		}
+		mc->sysvm_start = 0;
+	} else {
+		if (mc->sysvm_size > size_af) {
+			dev_warn(adev->dev, "limiting SYSVM\n");
+			mc->sysvm_size = size_af;
+		}
+		mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) &
+			~mc->sysvm_base_align;
+	}
+	mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
+	dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
+			mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
+}
+
+/**
  * amdgpu_sysvm_table_ram_alloc - allocate system ram for gart page table
  *
  * @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
index 7846765..2336ece 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
@@ -32,6 +32,7 @@
 struct amdgpu_device;
 struct amdgpu_bo;
 struct amdgpu_sysvm_funcs;
+struct amdgpu_mc;
 
 #define AMDGPU_GPU_PAGE_SIZE 4096
 #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
@@ -57,6 +58,7 @@ struct amdgpu_sysvm {
 };
 
 void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
+void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
 void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
 int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 10/11] drm/amdgpu: setup GTT size directly from module parameter
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
                     ` (7 preceding siblings ...)
  2017-07-03  9:44   ` [PATCH 09/11] drm/amdgpu: move amdgpu_sysvm_location into amdgpu_sysvm.c as well Christian König
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-10-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-03  9:44   ` [PATCH 11/11] drm/amdgpu: add sysvm_size Christian König
  2017-07-06 16:15   ` [PATCH 01/11] drm/amdgpu: reserve the first 2x512 of GART Alex Deucher
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

Instead of relying on the sysvm_size to be the same as the module parameter.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9240357..72dd83e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1097,6 +1097,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
 
 int amdgpu_ttm_init(struct amdgpu_device *adev)
 {
+	uint64_t gtt_size;
 	int r;
 
 	r = amdgpu_ttm_global_init(adev);
@@ -1143,14 +1144,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	}
 	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
 		 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
-	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
-				adev->mc.sysvm_size >> PAGE_SHIFT);
+
+	if (amdgpu_gart_size == -1)
+		gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+			       adev->mc.mc_vram_size);
+	else
+		gtt_size = (uint64_t)amdgpu_gart_size << 20;
+	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
 	if (r) {
 		DRM_ERROR("Failed initializing GTT heap.\n");
 		return r;
 	}
 	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
-		 (unsigned)(adev->mc.sysvm_size / (1024 * 1024)));
+		 (unsigned)(gtt_size / (1024 * 1024)));
 
 	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
 	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 11/11] drm/amdgpu: add sysvm_size
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
                     ` (8 preceding siblings ...)
  2017-07-03  9:44   ` [PATCH 10/11] drm/amdgpu: setup GTT size directly from module parameter Christian König
@ 2017-07-03  9:44   ` Christian König
       [not found]     ` <1499075076-1851-11-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-06 16:15   ` [PATCH 01/11] drm/amdgpu: reserve the first 2x512 of GART Alex Deucher
  10 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-03  9:44 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

From: Christian König <christian.koenig@amd.com>

Limit the size of the SYSVM. This saves us a bunch of visible VRAM,
but also limitates the maximum BO size we can swap out.

v2: rebased and cleaned up after GART to SYSVM rename.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h         | 1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  | 6 ++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c     | 4 ++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 6 ++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c   | 9 +--------
 5 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 1ed6b7a..81de31a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -77,6 +77,7 @@
 extern int amdgpu_modeset;
 extern int amdgpu_vram_limit;
 extern int amdgpu_gart_size;
+extern unsigned amdgpu_sysvm_size;
 extern int amdgpu_moverate;
 extern int amdgpu_benchmarking;
 extern int amdgpu_testing;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 228b262..daded9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1086,6 +1086,12 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
 		}
 	}
 
+	if (amdgpu_sysvm_size < 32) {
+		dev_warn(adev->dev, "sysvm size (%d) too small\n",
+				 amdgpu_sysvm_size);
+		amdgpu_sysvm_size = 32;
+	}
+
 	amdgpu_check_vm_size(adev);
 
 	amdgpu_check_block_size(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4bf4a80..56f9867 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -75,6 +75,7 @@
 
 int amdgpu_vram_limit = 0;
 int amdgpu_gart_size = -1; /* auto */
+unsigned amdgpu_sysvm_size = 256;
 int amdgpu_moverate = -1; /* auto */
 int amdgpu_benchmarking = 0;
 int amdgpu_testing = 0;
@@ -124,6 +125,9 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
 MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
 module_param_named(gartsize, amdgpu_gart_size, int, 0600);
 
+MODULE_PARM_DESC(sysvmsize, "Size of the system VM in megabytes (default 256)");
+module_param_named(sysvmsize, amdgpu_sysvm_size, int, 0600);
+
 MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
 module_param_named(moverate, amdgpu_moverate, int, 0600);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f46a97d..bbf6bd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -42,6 +42,7 @@ struct amdgpu_gtt_mgr {
 static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
 			       unsigned long p_size)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
 	struct amdgpu_gtt_mgr *mgr;
 	uint64_t start, size;
 
@@ -50,7 +51,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
 		return -ENOMEM;
 
 	start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
-	size = p_size - start;
+	size = (adev->mc.sysvm_size >> PAGE_SHIFT) - start;
 	drm_mm_init(&mgr->mm, start, size);
 	spin_lock_init(&mgr->lock);
 	mgr->available = p_size;
@@ -112,6 +113,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 			 const struct ttm_place *place,
 			 struct ttm_mem_reg *mem)
 {
+	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
 	struct amdgpu_gtt_mgr *mgr = man->priv;
 	struct drm_mm_node *node = mem->mm_node;
 	enum drm_mm_insert_mode mode;
@@ -129,7 +131,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
 	if (place && place->lpfn)
 		lpfn = place->lpfn;
 	else
-		lpfn = man->size;
+		lpfn = adev->sysvm.num_cpu_pages;
 
 	mode = DRM_MM_INSERT_BEST;
 	if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
index ff436ad..711e4b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
@@ -62,14 +62,7 @@
  */
 void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
 {
-	/* unless the user had overridden it, set the gart
-	 * size equal to the 1024 or vram, whichever is larger.
-	 */
-	if (amdgpu_gart_size == -1)
-		adev->mc.sysvm_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
-					adev->mc.mc_vram_size);
-	else
-		adev->mc.sysvm_size = (uint64_t)amdgpu_gart_size << 20;
+	adev->mc.sysvm_size = (uint64_t)amdgpu_sysvm_size << 20;
 }
 
 /**
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* RE: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
       [not found]     ` <1499075076-1851-7-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-04  3:13       ` Zhou, David(ChunMing)
       [not found]         ` <MWHPR1201MB0206D4883B42434777D43C12B4D70-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
  2017-07-04  8:09       ` Huang Rui
  2017-07-04 21:11       ` Felix Kuehling
  2 siblings, 1 reply; 30+ messages in thread
From: Zhou, David(ChunMing) @ 2017-07-04  3:13 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Distinguishing system vm and general vm is a good idea, but I'm not sure about renaming GTT to sysvm part, especially TTM TT stays there. Maybe we just need rename GART functions to SYSVM.

Regards,
David Zhou

-----Original Message-----
From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf Of Christian K?nig
Sent: Monday, July 03, 2017 5:45 PM
To: amd-gfx@lists.freedesktop.org
Subject: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM

From: Christian König <christian.koenig@amd.com>

Just mass rename all names related to the hardware GART/GTT functions to SYSVM.

The name of symbols related to the TTM TT domain stay the same.

This should improve the distinction between the two.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/Kconfig         |   9 +-
 drivers/gpu/drm/amd/amdgpu/Makefile        |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  58 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  48 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 423 -----------------------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c  | 423 +++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c   |  84 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  76 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |   2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     |  30 +-
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      |   4 +-
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   |  16 +-
 drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h   |   4 +-
 drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  66 ++---
 drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  70 ++---
 drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  70 ++---
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      |  66 ++---
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c    |  16 +-
 drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h    |   4 +-
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     |   4 +-
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c      |   8 +-
 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c      |   4 +-
 drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c      |   8 +-
 24 files changed, 749 insertions(+), 748 deletions(-)
 delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c

diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index e8af1f5..ebbac01 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -31,14 +31,15 @@ config DRM_AMDGPU_USERPTR
 	  This option selects CONFIG_MMU_NOTIFIER if it isn't already
 	  selected to enabled full userptr support.
 
-config DRM_AMDGPU_GART_DEBUGFS
-	bool "Allow GART access through debugfs"
+config DRM_AMDGPU_SYSVM_DEBUGFS
+	bool "Allow SYSVM access through debugfs"
 	depends on DRM_AMDGPU
 	depends on DEBUG_FS
 	default n
 	help
-	  Selecting this option creates a debugfs file to inspect the mapped
-	  pages. Uses more memory for housekeeping, enable only for debugging.
+	  Selecting this option creates a debugfs file to inspect the SYSVM
+	  mapped pages. Uses more memory for housekeeping, enable only for
+	  debugging.
 
 source "drivers/gpu/drm/amd/acp/Kconfig"
 source "drivers/gpu/drm/amd/display/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 3661110..d80d49f 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -22,7 +22,7 @@ amdgpu-y := amdgpu_drv.o
 # add KMS driver
 amdgpu-y += amdgpu_device.o amdgpu_kms.o \
 	amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
-	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
+	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_sysvm.o \
 	amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
 	amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
 	amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4a2b33d..abe191f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -305,7 +305,7 @@ struct amdgpu_vm_pte_funcs {
 };
 
 /* provided by the gmc block */
-struct amdgpu_gart_funcs {
+struct amdgpu_sysvm_funcs {
 	/* flush the vm tlb via mmio */
 	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
 			      uint32_t vmid);
@@ -543,39 +543,39 @@ struct amdgpu_mc;
 #define AMDGPU_GPU_PAGE_SHIFT 12
 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
 
-struct amdgpu_gart {
+struct amdgpu_sysvm {
 	dma_addr_t			table_addr;
 	struct amdgpu_bo		*robj;
 	void				*ptr;
 	unsigned			num_gpu_pages;
 	unsigned			num_cpu_pages;
 	unsigned			table_size;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
 	struct page			**pages;
 #endif
 	bool				ready;
 
 	/* Asic default pte flags */
-	uint64_t			gart_pte_flags;
+	uint64_t			sysvm_pte_flags;
 
-	const struct amdgpu_gart_funcs *gart_funcs;
+	const struct amdgpu_sysvm_funcs *sysvm_funcs;
 };
 
-void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
-int amdgpu_gart_init(struct amdgpu_device *adev);
-void amdgpu_gart_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
+int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
+void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
+int amdgpu_sysvm_init(struct amdgpu_device *adev);
+void amdgpu_sysvm_fini(struct amdgpu_device *adev);
+int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
 			int pages);
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
 		    int pages, dma_addr_t *dma_addr, uint64_t flags,
 		    void *dst);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
 		     int pages, struct page **pagelist,
 		     dma_addr_t *dma_addr, uint64_t flags);
 int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
@@ -604,15 +604,15 @@ struct amdgpu_mc {
 	 * about vram size near mc fb location */
 	u64			mc_vram_size;
 	u64			visible_vram_size;
-	u64			gtt_size;
-	u64			gtt_start;
-	u64			gtt_end;
+	u64			sysvm_size;
+	u64			sysvm_start;
+	u64			sysvm_end;
 	u64			vram_start;
 	u64			vram_end;
 	unsigned		vram_width;
 	u64			real_vram_size;
 	int			vram_mtrr;
-	u64                     gtt_base_align;
+	u64                     sysvm_base_align;
 	u64                     mc_mask;
 	const struct firmware   *fw;	/* MC firmware */
 	uint32_t                fw_version;
@@ -1575,7 +1575,7 @@ struct amdgpu_device {
 
 	/* MC */
 	struct amdgpu_mc		mc;
-	struct amdgpu_gart		gart;
+	struct amdgpu_sysvm		sysvm;
 	struct amdgpu_dummy_page	dummy_page;
 	struct amdgpu_vm_manager	vm_manager;
 	struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
@@ -1686,8 +1686,8 @@ struct amdgpu_device {
 	struct list_head                shadow_list;
 	struct mutex                    shadow_list_lock;
 	/* link all gtt */
-	spinlock_t			gtt_list_lock;
-	struct list_head                gtt_list;
+	spinlock_t			sysvm_list_lock;
+	struct list_head                sysvm_list;
 	/* keep an lru list of rings by HW IP */
 	struct list_head		ring_lru_list;
 	spinlock_t			ring_lru_list_lock;
@@ -1835,13 +1835,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
-#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
-#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
+#define amdgpu_sysvm_flush_gpu_tlb(adev, vmid) (adev)->sysvm.sysvm_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_sysvm_set_pte_pde(adev, pt, idx, addr, flags) (adev)->sysvm.sysvm_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
+#define amdgpu_sysvm_get_vm_pde(adev, addr) (adev)->sysvm.sysvm_funcs->get_vm_pde((adev), (addr))
 #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
-#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
+#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->sysvm.sysvm_funcs->get_vm_pte_flags((adev),(flags))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
@@ -1908,7 +1908,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 				 struct ttm_mem_reg *mem);
 void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
 void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
 int amdgpu_ttm_init(struct amdgpu_device *adev);
 void amdgpu_ttm_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5b1220f..46a82d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -667,39 +667,39 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
 }
 
 /**
- * amdgpu_gtt_location - try to find GTT location
+ * amdgpu_sysvm_location - try to find SYSVM location
  * @adev: amdgpu device structure holding all necessary informations
  * @mc: memory controller structure holding memory informations
  *
- * Function will place try to place GTT before or after VRAM.
+ * Function will place try to place SYSVM before or after VRAM.
  *
- * If GTT size is bigger than space left then we ajust GTT size.
+ * If SYSVM size is bigger than space left then we ajust SYSVM size.
  * Thus function will never fails.
  *
- * FIXME: when reducing GTT size align new size on power of 2.
+ * FIXME: when reducing SYSVM size align new size on power of 2.
  */
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
 {
 	u64 size_af, size_bf;
 
-	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
-	size_bf = mc->vram_start & ~mc->gtt_base_align;
+	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) & ~mc->sysvm_base_align;
+	size_bf = mc->vram_start & ~mc->sysvm_base_align;
 	if (size_bf > size_af) {
-		if (mc->gtt_size > size_bf) {
-			dev_warn(adev->dev, "limiting GTT\n");
-			mc->gtt_size = size_bf;
+		if (mc->sysvm_size > size_bf) {
+			dev_warn(adev->dev, "limiting SYSVM\n");
+			mc->sysvm_size = size_bf;
 		}
-		mc->gtt_start = 0;
+		mc->sysvm_start = 0;
 	} else {
-		if (mc->gtt_size > size_af) {
-			dev_warn(adev->dev, "limiting GTT\n");
-			mc->gtt_size = size_af;
+		if (mc->sysvm_size > size_af) {
+			dev_warn(adev->dev, "limiting SYSVM\n");
+			mc->sysvm_size = size_af;
 		}
-		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+		mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) & ~mc->sysvm_base_align;
 	}
-	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
-	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
-			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+	mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
+	dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
+			mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
 }
 
 /*
@@ -1663,12 +1663,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
 
 static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
 {
-	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
+	memcpy(adev->reset_magic, adev->sysvm.ptr, AMDGPU_RESET_MAGIC_NUM);
 }
 
 static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
 {
-	return !!memcmp(adev->gart.ptr, adev->reset_magic,
+	return !!memcmp(adev->sysvm.ptr, adev->reset_magic,
 			AMDGPU_RESET_MAGIC_NUM);
 }
 
@@ -2052,14 +2052,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->flags = flags;
 	adev->asic_type = flags & AMD_ASIC_MASK;
 	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
-	adev->mc.gtt_size = 512 * 1024 * 1024;
+	adev->mc.sysvm_size = 512 * 1024 * 1024;
 	adev->accel_working = false;
 	adev->num_rings = 0;
 	adev->mman.buffer_funcs = NULL;
 	adev->mman.buffer_funcs_ring = NULL;
 	adev->vm_manager.vm_pte_funcs = NULL;
 	adev->vm_manager.vm_pte_num_rings = 0;
-	adev->gart.gart_funcs = NULL;
+	adev->sysvm.sysvm_funcs = NULL;
 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 
 	adev->smc_rreg = &amdgpu_invalid_rreg;
@@ -2110,8 +2110,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	INIT_LIST_HEAD(&adev->shadow_list);
 	mutex_init(&adev->shadow_list_lock);
 
-	INIT_LIST_HEAD(&adev->gtt_list);
-	spin_lock_init(&adev->gtt_list_lock);
+	INIT_LIST_HEAD(&adev->sysvm_list);
+	spin_lock_init(&adev->sysvm_list_lock);
 
 	INIT_LIST_HEAD(&adev->ring_lru_list);
 	spin_lock_init(&adev->ring_lru_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
deleted file mode 100644
index c808388..0000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * Copyright 2008 Advanced Micro Devices, Inc.
- * Copyright 2008 Red Hat Inc.
- * Copyright 2009 Jerome Glisse.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Dave Airlie
- *          Alex Deucher
- *          Jerome Glisse
- */
-#include <drm/drmP.h>
-#include <drm/amdgpu_drm.h>
-#include "amdgpu.h"
-
-/*
- * GART
- * The GART (Graphics Aperture Remapping Table) is an aperture
- * in the GPU's address space.  System pages can be mapped into
- * the aperture and look like contiguous pages from the GPU's
- * perspective.  A page table maps the pages in the aperture
- * to the actual backing pages in system memory.
- *
- * Radeon GPUs support both an internal GART, as described above,
- * and AGP.  AGP works similarly, but the GART table is configured
- * and maintained by the northbridge rather than the driver.
- * Radeon hw has a separate AGP aperture that is programmed to
- * point to the AGP aperture provided by the northbridge and the
- * requests are passed through to the northbridge aperture.
- * Both AGP and internal GART can be used at the same time, however
- * that is not currently supported by the driver.
- *
- * This file handles the common internal GART management.
- */
-
-/*
- * Common GART table functions.
- */
-
-/**
- * amdgpu_gart_set_defaults - set the default gtt_size
- *
- * @adev: amdgpu_device pointer
- *
- * Set the default gtt_size based on parameters and available VRAM.
- */
-void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
-{
-	/* unless the user had overridden it, set the gart
-	 * size equal to the 1024 or vram, whichever is larger.
-	 */
-	if (amdgpu_gart_size == -1)
-		adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
-					adev->mc.mc_vram_size);
-	else
-		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-}
-
-/**
- * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
- * gart table to be in system memory.
- * Returns 0 for success, -ENOMEM for failure.
- */
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
-{
-	void *ptr;
-
-	ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
-				   &adev->gart.table_addr);
-	if (ptr == NULL) {
-		return -ENOMEM;
-	}
-#ifdef CONFIG_X86
-	if (0) {
-		set_memory_uc((unsigned long)ptr,
-			      adev->gart.table_size >> PAGE_SHIFT);
-	}
-#endif
-	adev->gart.ptr = ptr;
-	memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
-	return 0;
-}
-
-/**
- * amdgpu_gart_table_ram_free - free system ram for gart page table
- *
- * @adev: amdgpu_device pointer
- *
- * Free system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
- * gart table to be in system memory.
- */
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
-{
-	if (adev->gart.ptr == NULL) {
-		return;
-	}
-#ifdef CONFIG_X86
-	if (0) {
-		set_memory_wb((unsigned long)adev->gart.ptr,
-			      adev->gart.table_size >> PAGE_SHIFT);
-	}
-#endif
-	pci_free_consistent(adev->pdev, adev->gart.table_size,
-			    (void *)adev->gart.ptr,
-			    adev->gart.table_addr);
-	adev->gart.ptr = NULL;
-	adev->gart.table_addr = 0;
-}
-
-/**
- * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate video memory for GART page table
- * (pcie r4xx, r5xx+).  These asics require the
- * gart table to be in video memory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
-{
-	int r;
-
-	if (adev->gart.robj == NULL) {
-		r = amdgpu_bo_create(adev, adev->gart.table_size,
-				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
-				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
-				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
-				     NULL, NULL, &adev->gart.robj);
-		if (r) {
-			return r;
-		}
-	}
-	return 0;
-}
-
-/**
- * amdgpu_gart_table_vram_pin - pin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Pin the GART page table in vram so it will not be moved
- * by the memory manager (pcie r4xx, r5xx+).  These asics require the
- * gart table to be in video memory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
-{
-	uint64_t gpu_addr;
-	int r;
-
-	r = amdgpu_bo_reserve(adev->gart.robj, false);
-	if (unlikely(r != 0))
-		return r;
-	r = amdgpu_bo_pin(adev->gart.robj,
-				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
-	if (r) {
-		amdgpu_bo_unreserve(adev->gart.robj);
-		return r;
-	}
-	r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
-	if (r)
-		amdgpu_bo_unpin(adev->gart.robj);
-	amdgpu_bo_unreserve(adev->gart.robj);
-	adev->gart.table_addr = gpu_addr;
-	return r;
-}
-
-/**
- * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Unpin the GART page table in vram (pcie r4xx, r5xx+).
- * These asics require the gart table to be in video memory.
- */
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
-{
-	int r;
-
-	if (adev->gart.robj == NULL) {
-		return;
-	}
-	r = amdgpu_bo_reserve(adev->gart.robj, true);
-	if (likely(r == 0)) {
-		amdgpu_bo_kunmap(adev->gart.robj);
-		amdgpu_bo_unpin(adev->gart.robj);
-		amdgpu_bo_unreserve(adev->gart.robj);
-		adev->gart.ptr = NULL;
-	}
-}
-
-/**
- * amdgpu_gart_table_vram_free - free gart page table vram
- *
- * @adev: amdgpu_device pointer
- *
- * Free the video memory used for the GART page table
- * (pcie r4xx, r5xx+).  These asics require the gart table to
- * be in video memory.
- */
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
-{
-	if (adev->gart.robj == NULL) {
-		return;
-	}
-	amdgpu_bo_unref(&adev->gart.robj);
-}
-
-/*
- * Common gart functions.
- */
-/**
- * amdgpu_gart_unbind - unbind pages from the gart page table
- *
- * @adev: amdgpu_device pointer
- * @offset: offset into the GPU's gart aperture
- * @pages: number of pages to unbind
- *
- * Unbinds the requested pages from the gart page table and
- * replaces them with the dummy page (all asics).
- * Returns 0 for success, -EINVAL for failure.
- */
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
-			int pages)
-{
-	unsigned t;
-	unsigned p;
-	int i, j;
-	u64 page_base;
-	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
-	uint64_t flags = 0;
-
-	if (!adev->gart.ready) {
-		WARN(1, "trying to unbind memory from uninitialized GART !\n");
-		return -EINVAL;
-	}
-
-	t = offset / AMDGPU_GPU_PAGE_SIZE;
-	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-	for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-		adev->gart.pages[p] = NULL;
-#endif
-		page_base = adev->dummy_page.addr;
-		if (!adev->gart.ptr)
-			continue;
-
-		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-			amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
-						t, page_base, flags);
-			page_base += AMDGPU_GPU_PAGE_SIZE;
-		}
-	}
-	mb();
-	amdgpu_gart_flush_gpu_tlb(adev, 0);
-	return 0;
-}
-
-/**
- * amdgpu_gart_map - map dma_addresses into GART entries
- *
- * @adev: amdgpu_device pointer
- * @offset: offset into the GPU's gart aperture
- * @pages: number of pages to bind
- * @dma_addr: DMA addresses of pages
- *
- * Map the dma_addresses into GART entries (all asics).
- * Returns 0 for success, -EINVAL for failure.
- */
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
-		    int pages, dma_addr_t *dma_addr, uint64_t flags,
-		    void *dst)
-{
-	uint64_t page_base;
-	unsigned i, j, t;
-
-	if (!adev->gart.ready) {
-		WARN(1, "trying to bind memory to uninitialized GART !\n");
-		return -EINVAL;
-	}
-
-	t = offset / AMDGPU_GPU_PAGE_SIZE;
-
-	for (i = 0; i < pages; i++) {
-		page_base = dma_addr[i];
-		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
-			amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
-			page_base += AMDGPU_GPU_PAGE_SIZE;
-		}
-	}
-	return 0;
-}
-
-/**
- * amdgpu_gart_bind - bind pages into the gart page table
- *
- * @adev: amdgpu_device pointer
- * @offset: offset into the GPU's gart aperture
- * @pages: number of pages to bind
- * @pagelist: pages to bind
- * @dma_addr: DMA addresses of pages
- *
- * Binds the requested pages to the gart page table
- * (all asics).
- * Returns 0 for success, -EINVAL for failure.
- */
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
-		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
-		     uint64_t flags)
-{
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	unsigned i,t,p;
-#endif
-	int r;
-
-	if (!adev->gart.ready) {
-		WARN(1, "trying to bind memory to uninitialized GART !\n");
-		return -EINVAL;
-	}
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	t = offset / AMDGPU_GPU_PAGE_SIZE;
-	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-	for (i = 0; i < pages; i++, p++)
-		adev->gart.pages[p] = pagelist[i];
-#endif
-
-	if (adev->gart.ptr) {
-		r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
-			    adev->gart.ptr);
-		if (r)
-			return r;
-	}
-
-	mb();
-	amdgpu_gart_flush_gpu_tlb(adev, 0);
-	return 0;
-}
-
-/**
- * amdgpu_gart_init - init the driver info for managing the gart
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate the dummy page and init the gart driver info (all asics).
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_init(struct amdgpu_device *adev)
-{
-	int r;
-
-	if (adev->dummy_page.page)
-		return 0;
-
-	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
-	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
-		DRM_ERROR("Page size is smaller than GPU page size!\n");
-		return -EINVAL;
-	}
-	r = amdgpu_dummy_page_init(adev);
-	if (r)
-		return r;
-	/* Compute table size */
-	adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
-	adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
-	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
-		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	/* Allocate pages table */
-	adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
-	if (adev->gart.pages == NULL) {
-		amdgpu_gart_fini(adev);
-		return -ENOMEM;
-	}
-#endif
-
-	return 0;
-}
-
-/**
- * amdgpu_gart_fini - tear down the driver info for managing the gart
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the gart driver info and free the dummy page (all asics).
- */
-void amdgpu_gart_fini(struct amdgpu_device *adev)
-{
-	if (adev->gart.ready) {
-		/* unbind pages */
-		amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
-	}
-	adev->gart.ready = false;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	vfree(adev->gart.pages);
-	adev->gart.pages = NULL;
-#endif
-	amdgpu_dummy_page_fini(adev);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4510627..73a1c64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -82,7 +82,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
 	if (r)
 		kfree(*job);
 	else
-		(*job)->vm_pd_addr = adev->gart.table_addr;
+		(*job)->vm_pd_addr = adev->sysvm.table_addr;
 
 	return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
new file mode 100644
index 0000000..50fc8d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ *          Jerome Glisse
+ */
+#include <drm/drmP.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+/*
+ * SYSVM
+ * The system VM (previously called GART) is an aperture
+ * in the GPU's address space.  System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective.  A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal SYSVM based GART, as described above,
+ * and AGP.  AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal SYSVM management.
+ */
+
+/*
+ * Common SYSVM table functions.
+ */
+
+/**
+ * amdgpu_sysvm_set_defaults - set the default sysvm_size
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the default sysvm_size based on parameters and available VRAM.
+ */
+void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
+{
+	/* unless the user had overridden it, set the gart
+	 * size equal to the 1024 or vram, whichever is larger.
+	 */
+	if (amdgpu_gart_size == -1)
+		adev->mc.sysvm_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+					adev->mc.mc_vram_size);
+	else
+		adev->mc.sysvm_size = (uint64_t)amdgpu_gart_size << 20;
+}
+
+/**
+ * amdgpu_sysvm_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate system memory for SYSVM page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev)
+{
+	void *ptr;
+
+	ptr = pci_alloc_consistent(adev->pdev, adev->sysvm.table_size,
+				   &adev->sysvm.table_addr);
+	if (ptr == NULL) {
+		return -ENOMEM;
+	}
+#ifdef CONFIG_X86
+	if (0) {
+		set_memory_uc((unsigned long)ptr,
+			      adev->sysvm.table_size >> PAGE_SHIFT);
+	}
+#endif
+	adev->sysvm.ptr = ptr;
+	memset((void *)adev->sysvm.ptr, 0, adev->sysvm.table_size);
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_table_ram_free - free system ram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free system memory for SYSVM page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
+ * gart table to be in system memory.
+ */
+void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev)
+{
+	if (adev->sysvm.ptr == NULL) {
+		return;
+	}
+#ifdef CONFIG_X86
+	if (0) {
+		set_memory_wb((unsigned long)adev->sysvm.ptr,
+			      adev->sysvm.table_size >> PAGE_SHIFT);
+	}
+#endif
+	pci_free_consistent(adev->pdev, adev->sysvm.table_size,
+			    (void *)adev->sysvm.ptr,
+			    adev->sysvm.table_addr);
+	adev->sysvm.ptr = NULL;
+	adev->sysvm.table_addr = 0;
+}
+
+/**
+ * amdgpu_sysvm_table_vram_alloc - allocate vram for gart page table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate video memory for SYSVM page table
+ * (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->sysvm.robj == NULL) {
+		r = amdgpu_bo_create(adev, adev->sysvm.table_size,
+				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+				     NULL, NULL, &adev->sysvm.robj);
+		if (r) {
+			return r;
+		}
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_table_vram_pin - pin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Pin the SYSVM page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+).  These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev)
+{
+	uint64_t gpu_addr;
+	int r;
+
+	r = amdgpu_bo_reserve(adev->sysvm.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = amdgpu_bo_pin(adev->sysvm.robj,
+				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+	if (r) {
+		amdgpu_bo_unreserve(adev->sysvm.robj);
+		return r;
+	}
+	r = amdgpu_bo_kmap(adev->sysvm.robj, &adev->sysvm.ptr);
+	if (r)
+		amdgpu_bo_unpin(adev->sysvm.robj);
+	amdgpu_bo_unreserve(adev->sysvm.robj);
+	adev->sysvm.table_addr = gpu_addr;
+	return r;
+}
+
+/**
+ * amdgpu_sysvm_table_vram_unpin - unpin gart page table in vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
+void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->sysvm.robj == NULL) {
+		return;
+	}
+	r = amdgpu_bo_reserve(adev->sysvm.robj, true);
+	if (likely(r == 0)) {
+		amdgpu_bo_kunmap(adev->sysvm.robj);
+		amdgpu_bo_unpin(adev->sysvm.robj);
+		amdgpu_bo_unreserve(adev->sysvm.robj);
+		adev->sysvm.ptr = NULL;
+	}
+}
+
+/**
+ * amdgpu_sysvm_table_vram_free - free gart page table vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+).  These asics require the gart table to
+ * be in video memory.
+ */
+void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev)
+{
+	if (adev->sysvm.robj == NULL) {
+		return;
+	}
+	amdgpu_bo_unref(&adev->sysvm.robj);
+}
+
+/*
+ * Common gart functions.
+ */
+/**
+ * amdgpu_sysvm_unbind - unbind pages from the gart page table
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
+			int pages)
+{
+	unsigned t;
+	unsigned p;
+	int i, j;
+	u64 page_base;
+	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
+	uint64_t flags = 0;
+
+	if (!adev->sysvm.ready) {
+		WARN(1, "trying to unbind memory from uninitialized GART !\n");
+		return -EINVAL;
+	}
+
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+	for (i = 0; i < pages; i++, p++) {
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+		adev->sysvm.pages[p] = NULL;
+#endif
+		page_base = adev->dummy_page.addr;
+		if (!adev->sysvm.ptr)
+			continue;
+
+		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+			amdgpu_sysvm_set_pte_pde(adev, adev->sysvm.ptr,
+						t, page_base, flags);
+			page_base += AMDGPU_GPU_PAGE_SIZE;
+		}
+	}
+	mb();
+	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_map - map dma_addresses into GART entries
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Map the dma_addresses into GART entries (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
+		    int pages, dma_addr_t *dma_addr, uint64_t flags,
+		    void *dst)
+{
+	uint64_t page_base;
+	unsigned i, j, t;
+
+	if (!adev->sysvm.ready) {
+		WARN(1, "trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+
+	for (i = 0; i < pages; i++) {
+		page_base = dma_addr[i];
+		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+			amdgpu_sysvm_set_pte_pde(adev, dst, t, page_base, flags);
+			page_base += AMDGPU_GPU_PAGE_SIZE;
+		}
+	}
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_bind - bind pages into the gart page table
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
+		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
+		     uint64_t flags)
+{
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	unsigned i,t,p;
+#endif
+	int r;
+
+	if (!adev->sysvm.ready) {
+		WARN(1, "trying to bind memory to uninitialized GART !\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	t = offset / AMDGPU_GPU_PAGE_SIZE;
+	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+	for (i = 0; i < pages; i++, p++)
+		adev->sysvm.pages[p] = pagelist[i];
+#endif
+
+	if (adev->sysvm.ptr) {
+		r = amdgpu_sysvm_map(adev, offset, pages, dma_addr, flags,
+			    adev->sysvm.ptr);
+		if (r)
+			return r;
+	}
+
+	mb();
+	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_init - init the driver info for managing the gart
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_sysvm_init(struct amdgpu_device *adev)
+{
+	int r;
+
+	if (adev->dummy_page.page)
+		return 0;
+
+	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
+	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
+		DRM_ERROR("Page size is smaller than GPU page size!\n");
+		return -EINVAL;
+	}
+	r = amdgpu_dummy_page_init(adev);
+	if (r)
+		return r;
+	/* Compute table size */
+	adev->sysvm.num_cpu_pages = adev->mc.sysvm_size / PAGE_SIZE;
+	adev->sysvm.num_gpu_pages = adev->mc.sysvm_size / AMDGPU_GPU_PAGE_SIZE;
+	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
+		 adev->sysvm.num_cpu_pages, adev->sysvm.num_gpu_pages);
+
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	/* Allocate pages table */
+	adev->sysvm.pages = vzalloc(sizeof(void *) * adev->sysvm.num_cpu_pages);
+	if (adev->sysvm.pages == NULL) {
+		amdgpu_sysvm_fini(adev);
+		return -ENOMEM;
+	}
+#endif
+
+	return 0;
+}
+
+/**
+ * amdgpu_sysvm_fini - tear down the driver info for managing the gart
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
+void amdgpu_sysvm_fini(struct amdgpu_device *adev)
+{
+	if (adev->sysvm.ready) {
+		/* unbind pages */
+		amdgpu_sysvm_unbind(adev, 0, adev->sysvm.num_cpu_pages);
+	}
+	adev->sysvm.ready = false;
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	vfree(adev->sysvm.pages);
+	adev->sysvm.pages = NULL;
+#endif
+	amdgpu_dummy_page_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index d02e611..651712e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -32,8 +32,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 {
 	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 	struct amdgpu_bo *vram_obj = NULL;
-	struct amdgpu_bo **gtt_obj = NULL;
-	uint64_t gtt_addr, vram_addr;
+	struct amdgpu_bo **sysvm_obj = NULL;
+	uint64_t sysvm_addr, vram_addr;
 	unsigned n, size;
 	int i, r;
 
@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 	/* Number of tests =
 	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
 	 */
-	n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
+	n = adev->mc.sysvm_size - AMDGPU_IB_POOL_SIZE*64*1024;
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		if (adev->rings[i])
 			n -= adev->rings[i]->ring_size;
@@ -52,8 +52,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		n -= adev->irq.ih.ring_size;
 	n /= size;
 
-	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
-	if (!gtt_obj) {
+	sysvm_obj = kzalloc(n * sizeof(*sysvm_obj), GFP_KERNEL);
+	if (!sysvm_obj) {
 		DRM_ERROR("Failed to allocate %d pointers\n", n);
 		r = 1;
 		goto out_cleanup;
@@ -75,42 +75,42 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 		goto out_unres;
 	}
 	for (i = 0; i < n; i++) {
-		void *gtt_map, *vram_map;
-		void **gtt_start, **gtt_end;
+		void *sysvm_map, *vram_map;
+		void **sysvm_start, **sysvm_end;
 		void **vram_start, **vram_end;
 		struct dma_fence *fence = NULL;
 
 		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
-				     NULL, gtt_obj + i);
+				     NULL, sysvm_obj + i);
 		if (r) {
 			DRM_ERROR("Failed to create GTT object %d\n", i);
 			goto out_lclean;
 		}
 
-		r = amdgpu_bo_reserve(gtt_obj[i], false);
+		r = amdgpu_bo_reserve(sysvm_obj[i], false);
 		if (unlikely(r != 0))
 			goto out_lclean_unref;
-		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
+		r = amdgpu_bo_pin(sysvm_obj[i], AMDGPU_GEM_DOMAIN_GTT, &sysvm_addr);
 		if (r) {
 			DRM_ERROR("Failed to pin GTT object %d\n", i);
 			goto out_lclean_unres;
 		}
 
-		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
 		if (r) {
 			DRM_ERROR("Failed to map GTT object %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
-		     gtt_start < gtt_end;
-		     gtt_start++)
-			*gtt_start = gtt_start;
+		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size;
+		     sysvm_start < sysvm_end;
+		     sysvm_start++)
+			*sysvm_start = sysvm_start;
 
-		amdgpu_bo_kunmap(gtt_obj[i]);
+		amdgpu_bo_kunmap(sysvm_obj[i]);
 
-		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
+		r = amdgpu_copy_buffer(ring, sysvm_addr, vram_addr,
 				       size, NULL, &fence, false, false);
 
 		if (r) {
@@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 			goto out_lclean_unpin;
 		}
 
-		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
 		     vram_start = vram_map, vram_end = vram_map + size;
 		     vram_start < vram_end;
-		     gtt_start++, vram_start++) {
-			if (*vram_start != gtt_start) {
+		     sysvm_start++, vram_start++) {
+			if (*vram_start != sysvm_start) {
 				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
 					  "expected 0x%p (GTT/VRAM offset "
 					  "0x%16llx/0x%16llx)\n",
-					  i, *vram_start, gtt_start,
+					  i, *vram_start, sysvm_start,
 					  (unsigned long long)
-					  (gtt_addr - adev->mc.gtt_start +
-					   (void*)gtt_start - gtt_map),
+					  (sysvm_addr - adev->mc.sysvm_start +
+					   (void*)sysvm_start - sysvm_map),
 					  (unsigned long long)
 					  (vram_addr - adev->mc.vram_start +
-					   (void*)gtt_start - gtt_map));
+					   (void*)sysvm_start - sysvm_map));
 				amdgpu_bo_kunmap(vram_obj);
 				goto out_lclean_unpin;
 			}
@@ -155,7 +155,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
 		amdgpu_bo_kunmap(vram_obj);
 
-		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
+		r = amdgpu_copy_buffer(ring, vram_addr, sysvm_addr,
 				       size, NULL, &fence, false, false);
 
 		if (r) {
@@ -171,49 +171,49 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 
 		dma_fence_put(fence);
 
-		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
+		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
 		if (r) {
 			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
 			goto out_lclean_unpin;
 		}
 
-		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
 		     vram_start = vram_map, vram_end = vram_map + size;
-		     gtt_start < gtt_end;
-		     gtt_start++, vram_start++) {
-			if (*gtt_start != vram_start) {
+		     sysvm_start < sysvm_end;
+		     sysvm_start++, vram_start++) {
+			if (*sysvm_start != vram_start) {
 				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
 					  "expected 0x%p (VRAM/GTT offset "
 					  "0x%16llx/0x%16llx)\n",
-					  i, *gtt_start, vram_start,
+					  i, *sysvm_start, vram_start,
 					  (unsigned long long)
 					  (vram_addr - adev->mc.vram_start +
 					   (void*)vram_start - vram_map),
 					  (unsigned long long)
-					  (gtt_addr - adev->mc.gtt_start +
+					  (sysvm_addr - adev->mc.sysvm_start +
 					   (void*)vram_start - vram_map));
-				amdgpu_bo_kunmap(gtt_obj[i]);
+				amdgpu_bo_kunmap(sysvm_obj[i]);
 				goto out_lclean_unpin;
 			}
 		}
 
-		amdgpu_bo_kunmap(gtt_obj[i]);
+		amdgpu_bo_kunmap(sysvm_obj[i]);
 
 		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
-			 gtt_addr - adev->mc.gtt_start);
+			 sysvm_addr - adev->mc.sysvm_start);
 		continue;
 
 out_lclean_unpin:
-		amdgpu_bo_unpin(gtt_obj[i]);
+		amdgpu_bo_unpin(sysvm_obj[i]);
 out_lclean_unres:
-		amdgpu_bo_unreserve(gtt_obj[i]);
+		amdgpu_bo_unreserve(sysvm_obj[i]);
 out_lclean_unref:
-		amdgpu_bo_unref(&gtt_obj[i]);
+		amdgpu_bo_unref(&sysvm_obj[i]);
 out_lclean:
 		for (--i; i >= 0; --i) {
-			amdgpu_bo_unpin(gtt_obj[i]);
-			amdgpu_bo_unreserve(gtt_obj[i]);
-			amdgpu_bo_unref(&gtt_obj[i]);
+			amdgpu_bo_unpin(sysvm_obj[i]);
+			amdgpu_bo_unreserve(sysvm_obj[i]);
+			amdgpu_bo_unref(&sysvm_obj[i]);
 		}
 		if (fence)
 			dma_fence_put(fence);
@@ -226,7 +226,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
 out_unref:
 	amdgpu_bo_unref(&vram_obj);
 out_cleanup:
-	kfree(gtt_obj);
+	kfree(sysvm_obj);
 	if (r) {
 		pr_warn("Error while testing BO move\n");
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5c7a6c5..9240357 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -102,7 +102,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 		goto error_bo;
 	}
 
-	mutex_init(&adev->mman.gtt_window_lock);
+	mutex_init(&adev->mman.sysvm_window_lock);
 
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
@@ -130,7 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
 	if (adev->mman.mem_global_referenced) {
 		amd_sched_entity_fini(adev->mman.entity.sched,
 				      &adev->mman.entity);
-		mutex_destroy(&adev->mman.gtt_window_lock);
+		mutex_destroy(&adev->mman.sysvm_window_lock);
 		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 		drm_global_item_unref(&adev->mman.mem_global_ref);
 		adev->mman.mem_global_referenced = false;
@@ -158,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		break;
 	case TTM_PL_TT:
 		man->func = &amdgpu_gtt_mgr_func;
-		man->gpu_offset = adev->mc.gtt_start;
+		man->gpu_offset = adev->mc.sysvm_start;
 		man->available_caching = TTM_PL_MASK_CACHING;
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -289,7 +289,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 	new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
 
 	num_pages = new_mem->num_pages;
-	mutex_lock(&adev->mman.gtt_window_lock);
+	mutex_lock(&adev->mman.sysvm_window_lock);
 	while (num_pages) {
 		unsigned long cur_pages = min(min(old_size, new_size),
 					      (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
@@ -341,14 +341,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
 			new_start += cur_pages * PAGE_SIZE;
 		}
 	}
-	mutex_unlock(&adev->mman.gtt_window_lock);
+	mutex_unlock(&adev->mman.sysvm_window_lock);
 
 	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
 	dma_fence_put(fence);
 	return r;
 
 error:
-	mutex_unlock(&adev->mman.gtt_window_lock);
+	mutex_unlock(&adev->mman.sysvm_window_lock);
 
 	if (fence)
 		dma_fence_wait(fence, false);
@@ -693,10 +693,10 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 	uint64_t flags;
 	int r;
 
-	spin_lock(&gtt->adev->gtt_list_lock);
+	spin_lock(&gtt->adev->sysvm_list_lock);
 	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
 	gtt->offset = (u64)mem->start << PAGE_SHIFT;
-	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+	r = amdgpu_sysvm_bind(gtt->adev, gtt->offset, ttm->num_pages,
 		ttm->pages, gtt->ttm.dma_address, flags);
 
 	if (r) {
@@ -705,9 +705,9 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
 		goto error_gart_bind;
 	}
 
-	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+	list_add_tail(&gtt->list, &gtt->adev->sysvm_list);
 error_gart_bind:
-	spin_unlock(&gtt->adev->gtt_list_lock);
+	spin_unlock(&gtt->adev->sysvm_list_lock);
 	return r;
 
 }
@@ -774,20 +774,20 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
 	int r;
 
 	bo_mem.mem_type = TTM_PL_TT;
-	spin_lock(&adev->gtt_list_lock);
-	list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
+	spin_lock(&adev->sysvm_list_lock);
+	list_for_each_entry_safe(gtt, tmp, &adev->sysvm_list, list) {
 		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
-		r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+		r = amdgpu_sysvm_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
 				     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
 				     flags);
 		if (r) {
-			spin_unlock(&adev->gtt_list_lock);
+			spin_unlock(&adev->sysvm_list_lock);
 			DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
 				  gtt->ttm.ttm.num_pages, gtt->offset);
 			return r;
 		}
 	}
-	spin_unlock(&adev->gtt_list_lock);
+	spin_unlock(&adev->sysvm_list_lock);
 	return 0;
 }
 
@@ -803,8 +803,8 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 		return 0;
 
 	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
-	spin_lock(&gtt->adev->gtt_list_lock);
-	r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
+	spin_lock(&gtt->adev->sysvm_list_lock);
+	r = amdgpu_sysvm_unbind(gtt->adev, gtt->offset, ttm->num_pages);
 	if (r) {
 		DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
 			  gtt->ttm.ttm.num_pages, gtt->offset);
@@ -812,7 +812,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
 	}
 	list_del_init(&gtt->list);
 error_unbind:
-	spin_unlock(&gtt->adev->gtt_list_lock);
+	spin_unlock(&gtt->adev->sysvm_list_lock);
 	return r;
 }
 
@@ -1037,7 +1037,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 			flags |= AMDGPU_PTE_SNOOPED;
 	}
 
-	flags |= adev->gart.gart_pte_flags;
+	flags |= adev->sysvm.sysvm_pte_flags;
 	flags |= AMDGPU_PTE_READABLE;
 
 	if (!amdgpu_ttm_tt_is_readonly(ttm))
@@ -1144,13 +1144,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
 		 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
-				adev->mc.gtt_size >> PAGE_SHIFT);
+				adev->mc.sysvm_size >> PAGE_SHIFT);
 	if (r) {
 		DRM_ERROR("Failed initializing GTT heap.\n");
 		return r;
 	}
 	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
-		 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
+		 (unsigned)(adev->mc.sysvm_size / (1024 * 1024)));
 
 	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
 	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
@@ -1223,7 +1223,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
 	if (adev->gds.oa.total_size)
 		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
 	ttm_bo_device_release(&adev->mman.bdev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_fini(adev);
 	amdgpu_ttm_global_fini(adev);
 	adev->mman.initialized = false;
 	DRM_INFO("amdgpu: ttm finalized\n");
@@ -1279,7 +1279,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
 	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
 
-	*addr = adev->mc.gtt_start;
+	*addr = adev->mc.sysvm_start;
 	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
 		AMDGPU_GPU_PAGE_SIZE;
 
@@ -1296,7 +1296,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 	src_addr = num_dw * 4;
 	src_addr += job->ibs[0].gpu_addr;
 
-	dst_addr = adev->gart.table_addr;
+	dst_addr = adev->sysvm.table_addr;
 	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
 	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
 				dst_addr, num_bytes);
@@ -1306,7 +1306,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
 
 	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
 	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
-	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+	r = amdgpu_sysvm_map(adev, 0, num_pages, dma_address, flags,
 			    &job->ibs[0].ptr[num_dw]);
 	if (r)
 		goto error_free;
@@ -1522,7 +1522,7 @@ static int ttm_pl_tt = TTM_PL_TT;
 
 static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
 	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
-	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
+	{"amdgpu_sysvm_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
 	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
 #ifdef CONFIG_SWIOTLB
 	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
@@ -1574,9 +1574,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
 	.llseek = default_llseek
 };
 
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
 
-static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
+static ssize_t amdgpu_ttm_sysvm_read(struct file *f, char __user *buf,
 				   size_t size, loff_t *pos)
 {
 	struct amdgpu_device *adev = file_inode(f)->i_private;
@@ -1590,16 +1590,16 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
 		struct page *page;
 		void *ptr;
 
-		if (p >= adev->gart.num_cpu_pages)
+		if (p >= adev->sysvm.num_cpu_pages)
 			return result;
 
-		page = adev->gart.pages[p];
+		page = adev->sysvm.pages[p];
 		if (page) {
 			ptr = kmap(page);
 			ptr += off;
 
 			r = copy_to_user(buf, ptr, cur_size);
-			kunmap(adev->gart.pages[p]);
+			kunmap(adev->sysvm.pages[p]);
 		} else
 			r = clear_user(buf, cur_size);
 
@@ -1615,9 +1615,9 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
 	return result;
 }
 
-static const struct file_operations amdgpu_ttm_gtt_fops = {
+static const struct file_operations amdgpu_ttm_sysvm_fops = {
 	.owner = THIS_MODULE,
-	.read = amdgpu_ttm_gtt_read,
+	.read = amdgpu_ttm_sysvm_read,
 	.llseek = default_llseek
 };
 
@@ -1640,12 +1640,12 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
 	i_size_write(ent->d_inode, adev->mc.mc_vram_size);
 	adev->mman.vram = ent;
 
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-	ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
-				  adev, &amdgpu_ttm_gtt_fops);
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
+	ent = debugfs_create_file("amdgpu_sysvm", S_IFREG | S_IRUGO, root,
+				  adev, &amdgpu_ttm_sysvm_fops);
 	if (IS_ERR(ent))
 		return PTR_ERR(ent);
-	i_size_write(ent->d_inode, adev->mc.gtt_size);
+	i_size_write(ent->d_inode, adev->mc.sysvm_size);
 	adev->mman.gtt = ent;
 
 #endif
@@ -1670,7 +1670,7 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
 	debugfs_remove(adev->mman.vram);
 	adev->mman.vram = NULL;
 
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
 	debugfs_remove(adev->mman.gtt);
 	adev->mman.gtt = NULL;
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 4f5c1da..1443038 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -53,7 +53,7 @@ struct amdgpu_mman {
 	const struct amdgpu_buffer_funcs	*buffer_funcs;
 	struct amdgpu_ring			*buffer_funcs_ring;
 
-	struct mutex				gtt_window_lock;
+	struct mutex				sysvm_window_lock;
 	/* Scheduler entity for buffer moves */
 	struct amd_sched_entity			entity;
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1d1810d..8dbacec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -982,14 +982,14 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
 		value = params->pages_addr ?
 			amdgpu_vm_map_gart(params->pages_addr, addr) :
 			addr;
-		amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
+		amdgpu_sysvm_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
 					i, value, flags);
 		addr += incr;
 	}
 
 	/* Flush HDP */
 	mb();
-	amdgpu_gart_flush_gpu_tlb(params->adev, 0);
+	amdgpu_sysvm_flush_gpu_tlb(params->adev, 0);
 }
 
 static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
@@ -1101,7 +1101,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
 		}
 
 		pt = amdgpu_bo_gpu_offset(bo);
-		pt = amdgpu_gart_get_vm_pde(adev, pt);
+		pt = amdgpu_sysvm_get_vm_pde(adev, pt);
 		if (parent->entries[pt_idx].addr == pt)
 			continue;
 
@@ -1564,7 +1564,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  *
  * @adev: amdgpu_device pointer
  * @exclusive: fence we need to sync to
- * @gtt_flags: flags as they are used for GTT
+ * @sysvm_flags: flags as they are used in the SYSVM
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
@@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 				      struct dma_fence *exclusive,
-				      uint64_t gtt_flags,
+				      uint64_t sysvm_flags,
 				      dma_addr_t *pages_addr,
 				      struct amdgpu_vm *vm,
 				      struct amdgpu_bo_va_mapping *mapping,
@@ -1633,8 +1633,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
 		}
 
 		if (pages_addr) {
-			if (flags == gtt_flags)
-				src = adev->gart.table_addr +
+			if (flags == sysvm_flags)
+				src = adev->sysvm.table_addr +
 					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
 			else
 				max_entries = min(max_entries, 16ull * 1024ull);
@@ -1681,7 +1681,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 	struct amdgpu_vm *vm = bo_va->vm;
 	struct amdgpu_bo_va_mapping *mapping;
 	dma_addr_t *pages_addr = NULL;
-	uint64_t gtt_flags, flags;
+	uint64_t sysvm_flags, flags;
 	struct ttm_mem_reg *mem;
 	struct drm_mm_node *nodes;
 	struct dma_fence *exclusive;
@@ -1706,12 +1706,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
 	if (bo_va->bo) {
 		flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-		gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+		sysvm_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
 			adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
 			flags : 0;
 	} else {
 		flags = 0x0;
-		gtt_flags = ~0x0;
+		sysvm_flags = ~0x0;
 	}
 
 	spin_lock(&vm->status_lock);
@@ -1721,7 +1721,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
 	list_for_each_entry(mapping, &bo_va->invalids, list) {
 		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
-					       gtt_flags, pages_addr, vm,
+					       sysvm_flags, pages_addr, vm,
 					       mapping, flags, nodes,
 					       &bo_va->last_pt_update);
 		if (r)
@@ -1756,7 +1756,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
 
 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
 	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
-	adev->gart.gart_funcs->set_prt(adev, enable);
+	adev->sysvm.sysvm_funcs->set_prt(adev, enable);
 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
 }
 
@@ -1765,7 +1765,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
  */
 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
 {
-	if (!adev->gart.gart_funcs->set_prt)
+	if (!adev->sysvm.sysvm_funcs->set_prt)
 		return;
 
 	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@@ -1800,7 +1800,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
 {
 	struct amdgpu_prt_cb *cb;
 
-	if (!adev->gart.gart_funcs->set_prt)
+	if (!adev->sysvm.sysvm_funcs->set_prt)
 		return;
 
 	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@@ -2498,7 +2498,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
 	struct amdgpu_bo_va_mapping *mapping, *tmp;
-	bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+	bool prt_fini_needed = !!adev->sysvm.sysvm_funcs->set_prt;
 	int i;
 
 	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6986285..708fb84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3731,10 +3731,10 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	gfx_v9_0_write_data_to_reg(ring, usepfp, true,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index a42f483..1290434 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -40,8 +40,8 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
 {
 	uint64_t value;
 
-	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
-	value = adev->gart.table_addr - adev->mc.vram_start
+	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
+	value = adev->sysvm.table_addr - adev->mc.vram_start
 		+ adev->vm_manager.vram_base_offset;
 	value &= 0x0000FFFFFFFFF000ULL;
 	value |= 0x1; /*valid bit*/
@@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 	gfxhub_v1_0_init_gart_pt_regs(adev);
 
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
-		     (u32)(adev->mc.gtt_start >> 12));
+		     (u32)(adev->mc.sysvm_start >> 12));
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
-		     (u32)(adev->mc.gtt_start >> 44));
+		     (u32)(adev->mc.sysvm_start >> 44));
 
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
-		     (u32)(adev->mc.gtt_end >> 12));
+		     (u32)(adev->mc.sysvm_end >> 12));
 	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
-		     (u32)(adev->mc.gtt_end >> 44));
+		     (u32)(adev->mc.sysvm_end >> 44));
 }
 
 static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -228,7 +228,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
 	}
 }
 
-int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
+int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	if (amdgpu_sriov_vf(adev)) {
 		/*
@@ -256,7 +256,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
 	return 0;
 }
 
-void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
+void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	u32 tmp;
 	u32 i;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index d2dbb08..d194b7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -24,8 +24,8 @@
 #ifndef __GFXHUB_V1_0_H__
 #define __GFXHUB_V1_0_H__
 
-int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev);
+int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
+void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
 void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
 					  bool value);
 void gfxhub_v1_0_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5ed6788f..53c3b8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -36,7 +36,7 @@
 #include "dce/dce_6_0_sh_mask.h"
 #include "si_enums.h"
 
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev);
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v6_0_wait_for_idle(void *handle);
 
@@ -216,7 +216,7 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v6_0_vram_sysvm_location(struct amdgpu_device *adev,
 				       struct amdgpu_mc *mc)
 {
 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
@@ -228,8 +228,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
 		mc->mc_vram_size = 0xFFC0000000ULL;
 	}
 	amdgpu_vram_location(adev, &adev->mc, base);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	adev->mc.sysvm_base_align = 0;
+	amdgpu_sysvm_location(adev, mc);
 }
 
 static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -321,8 +321,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
 	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
 	adev->mc.visible_vram_size = adev->mc.aper_size;
 
-	amdgpu_gart_set_defaults(adev);
-	gmc_v6_0_vram_gtt_location(adev, &adev->mc);
+	amdgpu_sysvm_set_defaults(adev);
+	gmc_v6_0_vram_sysvm_location(adev, &adev->mc);
 
 	return 0;
 }
@@ -447,15 +447,15 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 	}
 }
 
-static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v6_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	int r, i;
 
-	if (adev->gart.robj == NULL) {
+	if (adev->sysvm.robj == NULL) {
 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
-	r = amdgpu_gart_table_vram_pin(adev);
+	r = amdgpu_sysvm_table_vram_pin(adev);
 	if (r)
 		return r;
 	/* Setup TLB control */
@@ -482,9 +482,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 	       (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
 	       (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
 	/* setup context0 */
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
@@ -508,10 +508,10 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 	for (i = 1; i < 16; i++) {
 		if (i < 8)
 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 		else
 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 	}
 
 	/* enable context1-15 */
@@ -530,9 +530,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 
 	gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
 	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
-		 (unsigned long long)adev->gart.table_addr);
-	adev->gart.ready = true;
+		 (unsigned)(adev->mc.sysvm_size >> 20),
+		 (unsigned long long)adev->sysvm.table_addr);
+	adev->sysvm.ready = true;
 	return 0;
 }
 
@@ -540,19 +540,19 @@ static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
 {
 	int r;
 
-	if (adev->gart.robj) {
+	if (adev->sysvm.robj) {
 		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
 		return 0;
 	}
-	r = amdgpu_gart_init(adev);
+	r = amdgpu_sysvm_init(adev);
 	if (r)
 		return r;
-	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-	adev->gart.gart_pte_flags = 0;
-	return amdgpu_gart_table_vram_alloc(adev);
+	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+	adev->sysvm.sysvm_pte_flags = 0;
+	return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
-static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v6_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	/*unsigned i;
 
@@ -582,13 +582,13 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
 	WREG32(mmVM_L2_CNTL3,
 	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
 	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
-	amdgpu_gart_table_vram_unpin(adev);
+	amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
 {
-	amdgpu_gart_table_vram_free(adev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_table_vram_free(adev);
+	amdgpu_sysvm_fini(adev);
 }
 
 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
@@ -762,7 +762,7 @@ static int gmc_v6_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	gmc_v6_0_set_gart_funcs(adev);
+	gmc_v6_0_set_sysvm_funcs(adev);
 	gmc_v6_0_set_irq_funcs(adev);
 
 	return 0;
@@ -889,7 +889,7 @@ static int gmc_v6_0_hw_init(void *handle)
 		}
 	}
 
-	r = gmc_v6_0_gart_enable(adev);
+	r = gmc_v6_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
@@ -901,7 +901,7 @@ static int gmc_v6_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-	gmc_v6_0_gart_disable(adev);
+	gmc_v6_0_sysvm_disable(adev);
 
 	return 0;
 }
@@ -1095,7 +1095,7 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
 	.set_powergating_state = gmc_v6_0_set_powergating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v6_0_sysvm_funcs = {
 	.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
 	.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
 	.set_prt = gmc_v6_0_set_prt,
@@ -1108,10 +1108,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
 	.process = gmc_v6_0_process_interrupt,
 };
 
-static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-	if (adev->gart.gart_funcs == NULL)
-		adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
+	if (adev->sysvm.sysvm_funcs == NULL)
+		adev->sysvm.sysvm_funcs = &gmc_v6_0_sysvm_funcs;
 }
 
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 15f2c0f..2329bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -39,7 +39,7 @@
 
 #include "amdgpu_atombios.h"
 
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev);
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v7_0_wait_for_idle(void *handle);
 
@@ -231,7 +231,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v7_0_vram_sysvm_location(struct amdgpu_device *adev,
 				       struct amdgpu_mc *mc)
 {
 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
@@ -244,8 +244,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
 		mc->mc_vram_size = 0xFFC0000000ULL;
 	}
 	amdgpu_vram_location(adev, &adev->mc, base);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	adev->mc.sysvm_base_align = 0;
+	amdgpu_sysvm_location(adev, mc);
 }
 
 /**
@@ -373,8 +373,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-	amdgpu_gart_set_defaults(adev);
-	gmc_v7_0_vram_gtt_location(adev, &adev->mc);
+	amdgpu_sysvm_set_defaults(adev);
+	gmc_v7_0_vram_sysvm_location(adev, &adev->mc);
 
 	return 0;
 }
@@ -536,7 +536,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
 }
 
 /**
- * gmc_v7_0_gart_enable - gart enable
+ * gmc_v7_0_sysvm_enable - gart enable
  *
  * @adev: amdgpu_device pointer
  *
@@ -546,16 +546,16 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
  * and GPUVM for FSA64 clients (CIK).
  * Returns 0 for success, errors for failure.
  */
-static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v7_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	int r, i;
 	u32 tmp;
 
-	if (adev->gart.robj == NULL) {
+	if (adev->sysvm.robj == NULL) {
 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
-	r = amdgpu_gart_table_vram_pin(adev);
+	r = amdgpu_sysvm_table_vram_pin(adev);
 	if (r)
 		return r;
 	/* Setup TLB control */
@@ -585,9 +585,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
 	WREG32(mmVM_L2_CNTL3, tmp);
 	/* setup context0 */
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
@@ -611,10 +611,10 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 	for (i = 1; i < 16; i++) {
 		if (i < 8)
 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 		else
 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 	}
 
 	/* enable context1-15 */
@@ -640,9 +640,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 
 	gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
-		 (unsigned long long)adev->gart.table_addr);
-	adev->gart.ready = true;
+		 (unsigned)(adev->mc.sysvm_size >> 20),
+		 (unsigned long long)adev->sysvm.table_addr);
+	adev->sysvm.ready = true;
 	return 0;
 }
 
@@ -650,27 +650,27 @@ static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
 {
 	int r;
 
-	if (adev->gart.robj) {
+	if (adev->sysvm.robj) {
 		WARN(1, "R600 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
-	r = amdgpu_gart_init(adev);
+	r = amdgpu_sysvm_init(adev);
 	if (r)
 		return r;
-	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-	adev->gart.gart_pte_flags = 0;
-	return amdgpu_gart_table_vram_alloc(adev);
+	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+	adev->sysvm.sysvm_pte_flags = 0;
+	return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
 /**
- * gmc_v7_0_gart_disable - gart disable
+ * gmc_v7_0_sysvm_disable - gart disable
  *
  * @adev: amdgpu_device pointer
  *
  * This disables all VM page table (CIK).
  */
-static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v7_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	u32 tmp;
 
@@ -688,7 +688,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 	WREG32(mmVM_L2_CNTL, tmp);
 	WREG32(mmVM_L2_CNTL2, 0);
-	amdgpu_gart_table_vram_unpin(adev);
+	amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 /**
@@ -700,8 +700,8 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
  */
 static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
 {
-	amdgpu_gart_table_vram_free(adev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_table_vram_free(adev);
+	amdgpu_sysvm_fini(adev);
 }
 
 /**
@@ -885,7 +885,7 @@ static int gmc_v7_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	gmc_v7_0_set_gart_funcs(adev);
+	gmc_v7_0_set_sysvm_funcs(adev);
 	gmc_v7_0_set_irq_funcs(adev);
 
 	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1036,7 +1036,7 @@ static int gmc_v7_0_hw_init(void *handle)
 		}
 	}
 
-	r = gmc_v7_0_gart_enable(adev);
+	r = gmc_v7_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
@@ -1048,7 +1048,7 @@ static int gmc_v7_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-	gmc_v7_0_gart_disable(adev);
+	gmc_v7_0_sysvm_disable(adev);
 
 	return 0;
 }
@@ -1270,7 +1270,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
 	.set_powergating_state = gmc_v7_0_set_powergating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v7_0_sysvm_funcs = {
 	.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
 	.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
 	.set_prt = gmc_v7_0_set_prt,
@@ -1283,10 +1283,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
 	.process = gmc_v7_0_process_interrupt,
 };
 
-static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-	if (adev->gart.gart_funcs == NULL)
-		adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
+	if (adev->sysvm.sysvm_funcs == NULL)
+		adev->sysvm.sysvm_funcs = &gmc_v7_0_sysvm_funcs;
 }
 
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 213af65..cf8f8d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -41,7 +41,7 @@
 #include "amdgpu_atombios.h"
 
 
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
+static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v8_0_wait_for_idle(void *handle);
 
@@ -393,7 +393,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
 	return 0;
 }
 
-static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v8_0_vram_sysvm_location(struct amdgpu_device *adev,
 				       struct amdgpu_mc *mc)
 {
 	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
@@ -406,8 +406,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
 		mc->mc_vram_size = 0xFFC0000000ULL;
 	}
 	amdgpu_vram_location(adev, &adev->mc, base);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	adev->mc.sysvm_base_align = 0;
+	amdgpu_sysvm_location(adev, mc);
 }
 
 /**
@@ -535,8 +535,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-	amdgpu_gart_set_defaults(adev);
-	gmc_v8_0_vram_gtt_location(adev, &adev->mc);
+	amdgpu_sysvm_set_defaults(adev);
+	gmc_v8_0_vram_sysvm_location(adev, &adev->mc);
 
 	return 0;
 }
@@ -722,7 +722,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
 }
 
 /**
- * gmc_v8_0_gart_enable - gart enable
+ * gmc_v8_0_sysvm_enable - gart enable
  *
  * @adev: amdgpu_device pointer
  *
@@ -732,16 +732,16 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
  * and GPUVM for FSA64 clients (CIK).
  * Returns 0 for success, errors for failure.
  */
-static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v8_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	int r, i;
 	u32 tmp;
 
-	if (adev->gart.robj == NULL) {
+	if (adev->sysvm.robj == NULL) {
 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
-	r = amdgpu_gart_table_vram_pin(adev);
+	r = amdgpu_sysvm_table_vram_pin(adev);
 	if (r)
 		return r;
 	/* Setup TLB control */
@@ -787,9 +787,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
 	WREG32(mmVM_L2_CNTL4, tmp);
 	/* setup context0 */
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
-	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
+	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
 	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
 			(u32)(adev->dummy_page.addr >> 12));
 	WREG32(mmVM_CONTEXT0_CNTL2, 0);
@@ -813,10 +813,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 	for (i = 1; i < 16; i++) {
 		if (i < 8)
 			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 		else
 			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
-			       adev->gart.table_addr >> 12);
+			       adev->sysvm.table_addr >> 12);
 	}
 
 	/* enable context1-15 */
@@ -843,9 +843,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 
 	gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
-		 (unsigned long long)adev->gart.table_addr);
-	adev->gart.ready = true;
+		 (unsigned)(adev->mc.sysvm_size >> 20),
+		 (unsigned long long)adev->sysvm.table_addr);
+	adev->sysvm.ready = true;
 	return 0;
 }
 
@@ -853,27 +853,27 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
 {
 	int r;
 
-	if (adev->gart.robj) {
+	if (adev->sysvm.robj) {
 		WARN(1, "R600 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
-	r = amdgpu_gart_init(adev);
+	r = amdgpu_sysvm_init(adev);
 	if (r)
 		return r;
-	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
-	return amdgpu_gart_table_vram_alloc(adev);
+	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_EXECUTABLE;
+	return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
 /**
- * gmc_v8_0_gart_disable - gart disable
+ * gmc_v8_0_sysvm_disable - gart disable
  *
  * @adev: amdgpu_device pointer
  *
  * This disables all VM page table (CIK).
  */
-static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v8_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	u32 tmp;
 
@@ -891,7 +891,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
 	WREG32(mmVM_L2_CNTL, tmp);
 	WREG32(mmVM_L2_CNTL2, 0);
-	amdgpu_gart_table_vram_unpin(adev);
+	amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 /**
@@ -903,8 +903,8 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
  */
 static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
 {
-	amdgpu_gart_table_vram_free(adev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_table_vram_free(adev);
+	amdgpu_sysvm_fini(adev);
 }
 
 /**
@@ -962,7 +962,7 @@ static int gmc_v8_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	gmc_v8_0_set_gart_funcs(adev);
+	gmc_v8_0_set_sysvm_funcs(adev);
 	gmc_v8_0_set_irq_funcs(adev);
 
 	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
@@ -1128,7 +1128,7 @@ static int gmc_v8_0_hw_init(void *handle)
 		}
 	}
 
-	r = gmc_v8_0_gart_enable(adev);
+	r = gmc_v8_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
@@ -1140,7 +1140,7 @@ static int gmc_v8_0_hw_fini(void *handle)
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-	gmc_v8_0_gart_disable(adev);
+	gmc_v8_0_sysvm_disable(adev);
 
 	return 0;
 }
@@ -1590,7 +1590,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
 	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
 };
 
-static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v8_0_sysvm_funcs = {
 	.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
 	.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
 	.set_prt = gmc_v8_0_set_prt,
@@ -1603,10 +1603,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
 	.process = gmc_v8_0_process_interrupt,
 };
 
-static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-	if (adev->gart.gart_funcs == NULL)
-		adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
+	if (adev->sysvm.sysvm_funcs == NULL)
+		adev->sysvm.sysvm_funcs = &gmc_v8_0_sysvm_funcs;
 }
 
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index dbb43d9..f067465 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -365,7 +365,7 @@ static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
 	return addr;
 }
 
-static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
+static const struct amdgpu_sysvm_funcs gmc_v9_0_sysvm_funcs = {
 	.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
 	.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
 	.get_invalidate_req = gmc_v9_0_get_invalidate_req,
@@ -373,17 +373,17 @@ static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
 	.get_vm_pde = gmc_v9_0_get_vm_pde
 };
 
-static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_sysvm_funcs(struct amdgpu_device *adev)
 {
-	if (adev->gart.gart_funcs == NULL)
-		adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
+	if (adev->sysvm.sysvm_funcs == NULL)
+		adev->sysvm.sysvm_funcs = &gmc_v9_0_sysvm_funcs;
 }
 
 static int gmc_v9_0_early_init(void *handle)
 {
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-	gmc_v9_0_set_gart_funcs(adev);
+	gmc_v9_0_set_sysvm_funcs(adev);
 	gmc_v9_0_set_irq_funcs(adev);
 
 	return 0;
@@ -412,15 +412,15 @@ static int gmc_v9_0_late_init(void *handle)
 	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
 }
 
-static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
+static void gmc_v9_0_vram_sysvm_location(struct amdgpu_device *adev,
 					struct amdgpu_mc *mc)
 {
 	u64 base = 0;
 	if (!amdgpu_sriov_vf(adev))
 		base = mmhub_v1_0_get_fb_location(adev);
 	amdgpu_vram_location(adev, &adev->mc, base);
-	adev->mc.gtt_base_align = 0;
-	amdgpu_gtt_location(adev, mc);
+	adev->mc.sysvm_base_align = 0;
+	amdgpu_sysvm_location(adev, mc);
 	/* base offset of vram pages */
 	if (adev->flags & AMD_IS_APU)
 		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -494,8 +494,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
 
-	amdgpu_gart_set_defaults(adev);
-	gmc_v9_0_vram_gtt_location(adev, &adev->mc);
+	amdgpu_sysvm_set_defaults(adev);
+	gmc_v9_0_vram_sysvm_location(adev, &adev->mc);
 
 	return 0;
 }
@@ -504,18 +504,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
 {
 	int r;
 
-	if (adev->gart.robj) {
+	if (adev->sysvm.robj) {
 		WARN(1, "VEGA10 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
-	r = amdgpu_gart_init(adev);
+	r = amdgpu_sysvm_init(adev);
 	if (r)
 		return r;
-	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
-	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
+	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
+	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
 				 AMDGPU_PTE_EXECUTABLE;
-	return amdgpu_gart_table_vram_alloc(adev);
+	return amdgpu_sysvm_table_vram_alloc(adev);
 }
 
 static int gmc_v9_0_sw_init(void *handle)
@@ -640,8 +640,8 @@ static int gmc_v9_0_sw_init(void *handle)
  */
 static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
 {
-	amdgpu_gart_table_vram_free(adev);
-	amdgpu_gart_fini(adev);
+	amdgpu_sysvm_table_vram_free(adev);
+	amdgpu_sysvm_fini(adev);
 }
 
 static int gmc_v9_0_sw_fini(void *handle)
@@ -669,11 +669,11 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
 }
 
 /**
- * gmc_v9_0_gart_enable - gart enable
+ * gmc_v9_0_sysvm_enable - gart enable
  *
  * @adev: amdgpu_device pointer
  */
-static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+static int gmc_v9_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	int r;
 	bool value;
@@ -683,11 +683,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 		golden_settings_vega10_hdp,
 		(const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
 
-	if (adev->gart.robj == NULL) {
+	if (adev->sysvm.robj == NULL) {
 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
-	r = amdgpu_gart_table_vram_pin(adev);
+	r = amdgpu_sysvm_table_vram_pin(adev);
 	if (r)
 		return r;
 
@@ -706,11 +706,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 		break;
 	}
 
-	r = gfxhub_v1_0_gart_enable(adev);
+	r = gfxhub_v1_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
-	r = mmhub_v1_0_gart_enable(adev);
+	r = mmhub_v1_0_sysvm_enable(adev);
 	if (r)
 		return r;
 
@@ -733,9 +733,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
 	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
 
 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
-		 (unsigned)(adev->mc.gtt_size >> 20),
-		 (unsigned long long)adev->gart.table_addr);
-	adev->gart.ready = true;
+		 (unsigned)(adev->mc.sysvm_size >> 20),
+		 (unsigned long long)adev->sysvm.table_addr);
+	adev->sysvm.ready = true;
 	return 0;
 }
 
@@ -747,23 +747,23 @@ static int gmc_v9_0_hw_init(void *handle)
 	/* The sequence of these two function calls matters.*/
 	gmc_v9_0_init_golden_registers(adev);
 
-	r = gmc_v9_0_gart_enable(adev);
+	r = gmc_v9_0_sysvm_enable(adev);
 
 	return r;
 }
 
 /**
- * gmc_v9_0_gart_disable - gart disable
+ * gmc_v9_0_sysvm_disable - gart disable
  *
  * @adev: amdgpu_device pointer
  *
  * This disables all VM page table.
  */
-static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
+static void gmc_v9_0_sysvm_disable(struct amdgpu_device *adev)
 {
-	gfxhub_v1_0_gart_disable(adev);
-	mmhub_v1_0_gart_disable(adev);
-	amdgpu_gart_table_vram_unpin(adev);
+	gfxhub_v1_0_sysvm_disable(adev);
+	mmhub_v1_0_sysvm_disable(adev);
+	amdgpu_sysvm_table_vram_unpin(adev);
 }
 
 static int gmc_v9_0_hw_fini(void *handle)
@@ -777,7 +777,7 @@ static int gmc_v9_0_hw_fini(void *handle)
 	}
 
 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
-	gmc_v9_0_gart_disable(adev);
+	gmc_v9_0_sysvm_disable(adev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 9804318..fbc8f6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -51,8 +51,8 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
 {
 	uint64_t value;
 
-	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
-	value = adev->gart.table_addr - adev->mc.vram_start +
+	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
+	value = adev->sysvm.table_addr - adev->mc.vram_start +
 		adev->vm_manager.vram_base_offset;
 	value &= 0x0000FFFFFFFFF000ULL;
 	value |= 0x1; /* valid bit */
@@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 	mmhub_v1_0_init_gart_pt_regs(adev);
 
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
-		     (u32)(adev->mc.gtt_start >> 12));
+		     (u32)(adev->mc.sysvm_start >> 12));
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
-		     (u32)(adev->mc.gtt_start >> 44));
+		     (u32)(adev->mc.sysvm_start >> 44));
 
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
-		     (u32)(adev->mc.gtt_end >> 12));
+		     (u32)(adev->mc.sysvm_end >> 12));
 	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
-		     (u32)(adev->mc.gtt_end >> 44));
+		     (u32)(adev->mc.sysvm_end >> 44));
 }
 
 static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -462,7 +462,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
 	}
 }
 
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
+int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
 {
 	if (amdgpu_sriov_vf(adev)) {
 		/*
@@ -490,7 +490,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
 	return 0;
 }
 
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
+void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
 {
 	u32 tmp;
 	u32 i;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index 57bb940..23128e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -24,8 +24,8 @@
 #define __MMHUB_V1_0_H__
 
 u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
-int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
-void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
+int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
+void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
 void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
 					 bool value);
 void mmhub_v1_0_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4a65697..056b169 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1121,10 +1121,10 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					 unsigned vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 987b958..95913fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1312,11 +1312,11 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	uint32_t data0, data1, mask;
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
@@ -1353,10 +1353,10 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
 			 unsigned int vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1ecd6bb..b869423 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -952,10 +952,10 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
 			 unsigned int vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 21e7b88..2ca49af 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -878,11 +878,11 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
 					unsigned vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	uint32_t data0, data1, mask;
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
@@ -1010,10 +1010,10 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
 			 unsigned int vm_id, uint64_t pd_addr)
 {
 	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
-	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
 	unsigned eng = ring->vm_inv_eng;
 
-	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
+	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
 	pd_addr |= AMDGPU_PTE_VALID;
 
 	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
       [not found]         ` <MWHPR1201MB0206D4883B42434777D43C12B4D70-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
@ 2017-07-04  7:43           ` Christian König
       [not found]             ` <b838bbac-38df-5ee4-5447-9edba988ea8a-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  0 siblings, 1 reply; 30+ messages in thread
From: Christian König @ 2017-07-04  7:43 UTC (permalink / raw)
  To: Zhou, David(ChunMing), amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

> Maybe we just need rename GART functions to SYSVM.
That is essentially what I've did.

In recent discussion we a couple of times the problem that we need to 
distinguish between the system VM and the GART/GTT domain from TTM.

See starting with R6xx I think we used the system VM to implement the 
old GART functionality from AGP days.

The name stayed the same over the years, but we ran into the problem 
that we wanted to reduce the size of the system VM to save VRAM while 
still having a large GTT domain in TTM to map all system memory at the 
same time.

So that the TTM parts stay the same is intentional.

Regards,
Christian.

Am 04.07.2017 um 05:13 schrieb Zhou, David(ChunMing):
> Distinguishing system vm and general vm is a good idea, but I'm not sure about renaming GTT to sysvm part, especially TTM TT stays there. Maybe we just need rename GART functions to SYSVM.
>
> Regards,
> David Zhou
>
> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On Behalf Of Christian K?nig
> Sent: Monday, July 03, 2017 5:45 PM
> To: amd-gfx@lists.freedesktop.org
> Subject: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
>
> From: Christian König <christian.koenig@amd.com>
>
> Just mass rename all names related to the hardware GART/GTT functions to SYSVM.
>
> The name of symbols related to the TTM TT domain stay the same.
>
> This should improve the distinction between the two.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
       [not found]             ` <b838bbac-38df-5ee4-5447-9edba988ea8a-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-04  7:59               ` zhoucm1
  0 siblings, 0 replies; 30+ messages in thread
From: zhoucm1 @ 2017-07-04  7:59 UTC (permalink / raw)
  To: Christian König, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

I have no strong opinion with that, this patch is Acked-by: Chunming 
Zhou <david1.zhou@amd.com>

On 2017年07月04日 15:43, Christian König wrote:
>> Maybe we just need rename GART functions to SYSVM.
> That is essentially what I've did.
>
> In recent discussion we a couple of times the problem that we need to 
> distinguish between the system VM and the GART/GTT domain from TTM.
>
> See starting with R6xx I think we used the system VM to implement the 
> old GART functionality from AGP days.
>
> The name stayed the same over the years, but we ran into the problem 
> that we wanted to reduce the size of the system VM to save VRAM while 
> still having a large GTT domain in TTM to map all system memory at the 
> same time.
>
> So that the TTM parts stay the same is intentional.
>
> Regards,
> Christian.
>
> Am 04.07.2017 um 05:13 schrieb Zhou, David(ChunMing):
>> Distinguishing system vm and general vm is a good idea, but I'm not 
>> sure about renaming GTT to sysvm part, especially TTM TT stays there. 
>> Maybe we just need rename GART functions to SYSVM.
>>
>> Regards,
>> David Zhou
>>
>> -----Original Message-----
>> From: amd-gfx [mailto:amd-gfx-bounces@lists.freedesktop.org] On 
>> Behalf Of Christian K?nig
>> Sent: Monday, July 03, 2017 5:45 PM
>> To: amd-gfx@lists.freedesktop.org
>> Subject: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
>>
>> From: Christian König <christian.koenig@amd.com>
>>
>> Just mass rename all names related to the hardware GART/GTT functions 
>> to SYSVM.
>>
>> The name of symbols related to the TTM TT domain stay the same.
>>
>> This should improve the distinction between the two.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
       [not found]     ` <1499075076-1851-7-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-04  3:13       ` Zhou, David(ChunMing)
@ 2017-07-04  8:09       ` Huang Rui
  2017-07-04  8:46         ` Christian König
  2017-07-04 21:11       ` Felix Kuehling
  2 siblings, 1 reply; 30+ messages in thread
From: Huang Rui @ 2017-07-04  8:09 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Mon, Jul 03, 2017 at 11:44:32AM +0200, Christian König wrote:
> From: Christian König <christian.koenig@amd.com>
> 
> Just mass rename all names related to the hardware GART/GTT functions to SYSVM.
> 
> The name of symbols related to the TTM TT domain stay the same.
> 
> This should improve the distinction between the two.

Christian, we just rename the interface of GART_DEBUGFS, right?
You know, amdgpu_gtt_mm and amdgpu_gtt are very useful when we are
debugging and checking the content of gart memory. Is there any functional
change with this patch set?

Thanks,
Ray

> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/Kconfig         |   9 +-
>  drivers/gpu/drm/amd/amdgpu/Makefile        |   2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  58 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  48 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 423 -----------------------------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |   2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c  | 423 +++++++++++++++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_test.c   |  84 +++---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  76 +++---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |   2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     |  30 +-
>  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      |   4 +-
>  drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   |  16 +-
>  drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h   |   4 +-
>  drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  66 ++---
>  drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  70 ++---
>  drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  70 ++---
>  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      |  66 ++---
>  drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c    |  16 +-
>  drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h    |   4 +-
>  drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     |   4 +-
>  drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c      |   8 +-
>  drivers/gpu/drm/amd/amdgpu/vce_v4_0.c      |   4 +-
>  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c      |   8 +-
>  24 files changed, 749 insertions(+), 748 deletions(-)
>  delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
>  create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
> index e8af1f5..ebbac01 100644
> --- a/drivers/gpu/drm/amd/amdgpu/Kconfig
> +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
> @@ -31,14 +31,15 @@ config DRM_AMDGPU_USERPTR
>  	  This option selects CONFIG_MMU_NOTIFIER if it isn't already
>  	  selected to enabled full userptr support.
>  
> -config DRM_AMDGPU_GART_DEBUGFS
> -	bool "Allow GART access through debugfs"
> +config DRM_AMDGPU_SYSVM_DEBUGFS
> +	bool "Allow SYSVM access through debugfs"
>  	depends on DRM_AMDGPU
>  	depends on DEBUG_FS
>  	default n
>  	help
> -	  Selecting this option creates a debugfs file to inspect the mapped
> -	  pages. Uses more memory for housekeeping, enable only for debugging.
> +	  Selecting this option creates a debugfs file to inspect the SYSVM
> +	  mapped pages. Uses more memory for housekeeping, enable only for
> +	  debugging.
>  
>  source "drivers/gpu/drm/amd/acp/Kconfig"
>  source "drivers/gpu/drm/amd/display/Kconfig"
> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
> index 3661110..d80d49f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
> @@ -22,7 +22,7 @@ amdgpu-y := amdgpu_drv.o
>  # add KMS driver
>  amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>  	amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
> -	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
> +	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_sysvm.o \
>  	amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
>  	amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
>  	amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 4a2b33d..abe191f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -305,7 +305,7 @@ struct amdgpu_vm_pte_funcs {
>  };
>  
>  /* provided by the gmc block */
> -struct amdgpu_gart_funcs {
> +struct amdgpu_sysvm_funcs {
>  	/* flush the vm tlb via mmio */
>  	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
>  			      uint32_t vmid);
> @@ -543,39 +543,39 @@ struct amdgpu_mc;
>  #define AMDGPU_GPU_PAGE_SHIFT 12
>  #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
>  
> -struct amdgpu_gart {
> +struct amdgpu_sysvm {
>  	dma_addr_t			table_addr;
>  	struct amdgpu_bo		*robj;
>  	void				*ptr;
>  	unsigned			num_gpu_pages;
>  	unsigned			num_cpu_pages;
>  	unsigned			table_size;
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>  	struct page			**pages;
>  #endif
>  	bool				ready;
>  
>  	/* Asic default pte flags */
> -	uint64_t			gart_pte_flags;
> +	uint64_t			sysvm_pte_flags;
>  
> -	const struct amdgpu_gart_funcs *gart_funcs;
> +	const struct amdgpu_sysvm_funcs *sysvm_funcs;
>  };
>  
> -void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
> -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
> -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
> -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
> -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
> -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
> -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
> -int amdgpu_gart_init(struct amdgpu_device *adev);
> -void amdgpu_gart_fini(struct amdgpu_device *adev);
> -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
> +void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
> +int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
> +void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
> +int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
> +void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
> +int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
> +void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
> +int amdgpu_sysvm_init(struct amdgpu_device *adev);
> +void amdgpu_sysvm_fini(struct amdgpu_device *adev);
> +int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
>  			int pages);
> -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
> +int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
>  		    int pages, dma_addr_t *dma_addr, uint64_t flags,
>  		    void *dst);
> -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
> +int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
>  		     int pages, struct page **pagelist,
>  		     dma_addr_t *dma_addr, uint64_t flags);
>  int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
> @@ -604,15 +604,15 @@ struct amdgpu_mc {
>  	 * about vram size near mc fb location */
>  	u64			mc_vram_size;
>  	u64			visible_vram_size;
> -	u64			gtt_size;
> -	u64			gtt_start;
> -	u64			gtt_end;
> +	u64			sysvm_size;
> +	u64			sysvm_start;
> +	u64			sysvm_end;
>  	u64			vram_start;
>  	u64			vram_end;
>  	unsigned		vram_width;
>  	u64			real_vram_size;
>  	int			vram_mtrr;
> -	u64                     gtt_base_align;
> +	u64                     sysvm_base_align;
>  	u64                     mc_mask;
>  	const struct firmware   *fw;	/* MC firmware */
>  	uint32_t                fw_version;
> @@ -1575,7 +1575,7 @@ struct amdgpu_device {
>  
>  	/* MC */
>  	struct amdgpu_mc		mc;
> -	struct amdgpu_gart		gart;
> +	struct amdgpu_sysvm		sysvm;
>  	struct amdgpu_dummy_page	dummy_page;
>  	struct amdgpu_vm_manager	vm_manager;
>  	struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
> @@ -1686,8 +1686,8 @@ struct amdgpu_device {
>  	struct list_head                shadow_list;
>  	struct mutex                    shadow_list_lock;
>  	/* link all gtt */
> -	spinlock_t			gtt_list_lock;
> -	struct list_head                gtt_list;
> +	spinlock_t			sysvm_list_lock;
> +	struct list_head                sysvm_list;
>  	/* keep an lru list of rings by HW IP */
>  	struct list_head		ring_lru_list;
>  	spinlock_t			ring_lru_list_lock;
> @@ -1835,13 +1835,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
>  #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
>  #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
>  #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
> -#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
> -#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
> -#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
> +#define amdgpu_sysvm_flush_gpu_tlb(adev, vmid) (adev)->sysvm.sysvm_funcs->flush_gpu_tlb((adev), (vmid))
> +#define amdgpu_sysvm_set_pte_pde(adev, pt, idx, addr, flags) (adev)->sysvm.sysvm_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
> +#define amdgpu_sysvm_get_vm_pde(adev, addr) (adev)->sysvm.sysvm_funcs->get_vm_pde((adev), (addr))
>  #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
>  #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
>  #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
> -#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
> +#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->sysvm.sysvm_funcs->get_vm_pte_flags((adev),(flags))
>  #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
>  #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
>  #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
> @@ -1908,7 +1908,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
>  uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
>  				 struct ttm_mem_reg *mem);
>  void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
> -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>  void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
>  int amdgpu_ttm_init(struct amdgpu_device *adev);
>  void amdgpu_ttm_fini(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 5b1220f..46a82d3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -667,39 +667,39 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
>  }
>  
>  /**
> - * amdgpu_gtt_location - try to find GTT location
> + * amdgpu_sysvm_location - try to find SYSVM location
>   * @adev: amdgpu device structure holding all necessary informations
>   * @mc: memory controller structure holding memory informations
>   *
> - * Function will place try to place GTT before or after VRAM.
> + * Function will place try to place SYSVM before or after VRAM.
>   *
> - * If GTT size is bigger than space left then we ajust GTT size.
> + * If SYSVM size is bigger than space left then we ajust SYSVM size.
>   * Thus function will never fails.
>   *
> - * FIXME: when reducing GTT size align new size on power of 2.
> + * FIXME: when reducing SYSVM size align new size on power of 2.
>   */
> -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
>  {
>  	u64 size_af, size_bf;
>  
> -	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
> -	size_bf = mc->vram_start & ~mc->gtt_base_align;
> +	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) & ~mc->sysvm_base_align;
> +	size_bf = mc->vram_start & ~mc->sysvm_base_align;
>  	if (size_bf > size_af) {
> -		if (mc->gtt_size > size_bf) {
> -			dev_warn(adev->dev, "limiting GTT\n");
> -			mc->gtt_size = size_bf;
> +		if (mc->sysvm_size > size_bf) {
> +			dev_warn(adev->dev, "limiting SYSVM\n");
> +			mc->sysvm_size = size_bf;
>  		}
> -		mc->gtt_start = 0;
> +		mc->sysvm_start = 0;
>  	} else {
> -		if (mc->gtt_size > size_af) {
> -			dev_warn(adev->dev, "limiting GTT\n");
> -			mc->gtt_size = size_af;
> +		if (mc->sysvm_size > size_af) {
> +			dev_warn(adev->dev, "limiting SYSVM\n");
> +			mc->sysvm_size = size_af;
>  		}
> -		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
> +		mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) & ~mc->sysvm_base_align;
>  	}
> -	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
> -	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
> -			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
> +	mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
> +	dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
> +			mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
>  }
>  
>  /*
> @@ -1663,12 +1663,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
>  
>  static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
>  {
> -	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
> +	memcpy(adev->reset_magic, adev->sysvm.ptr, AMDGPU_RESET_MAGIC_NUM);
>  }
>  
>  static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
>  {
> -	return !!memcmp(adev->gart.ptr, adev->reset_magic,
> +	return !!memcmp(adev->sysvm.ptr, adev->reset_magic,
>  			AMDGPU_RESET_MAGIC_NUM);
>  }
>  
> @@ -2052,14 +2052,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>  	adev->flags = flags;
>  	adev->asic_type = flags & AMD_ASIC_MASK;
>  	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
> -	adev->mc.gtt_size = 512 * 1024 * 1024;
> +	adev->mc.sysvm_size = 512 * 1024 * 1024;
>  	adev->accel_working = false;
>  	adev->num_rings = 0;
>  	adev->mman.buffer_funcs = NULL;
>  	adev->mman.buffer_funcs_ring = NULL;
>  	adev->vm_manager.vm_pte_funcs = NULL;
>  	adev->vm_manager.vm_pte_num_rings = 0;
> -	adev->gart.gart_funcs = NULL;
> +	adev->sysvm.sysvm_funcs = NULL;
>  	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
>  
>  	adev->smc_rreg = &amdgpu_invalid_rreg;
> @@ -2110,8 +2110,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>  	INIT_LIST_HEAD(&adev->shadow_list);
>  	mutex_init(&adev->shadow_list_lock);
>  
> -	INIT_LIST_HEAD(&adev->gtt_list);
> -	spin_lock_init(&adev->gtt_list_lock);
> +	INIT_LIST_HEAD(&adev->sysvm_list);
> +	spin_lock_init(&adev->sysvm_list_lock);
>  
>  	INIT_LIST_HEAD(&adev->ring_lru_list);
>  	spin_lock_init(&adev->ring_lru_list_lock);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> deleted file mode 100644
> index c808388..0000000
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> +++ /dev/null
> @@ -1,423 +0,0 @@
> -/*
> - * Copyright 2008 Advanced Micro Devices, Inc.
> - * Copyright 2008 Red Hat Inc.
> - * Copyright 2009 Jerome Glisse.
> - *
> - * Permission is hereby granted, free of charge, to any person obtaining a
> - * copy of this software and associated documentation files (the "Software"),
> - * to deal in the Software without restriction, including without limitation
> - * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> - * and/or sell copies of the Software, and to permit persons to whom the
> - * Software is furnished to do so, subject to the following conditions:
> - *
> - * The above copyright notice and this permission notice shall be included in
> - * all copies or substantial portions of the Software.
> - *
> - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> - * OTHER DEALINGS IN THE SOFTWARE.
> - *
> - * Authors: Dave Airlie
> - *          Alex Deucher
> - *          Jerome Glisse
> - */
> -#include <drm/drmP.h>
> -#include <drm/amdgpu_drm.h>
> -#include "amdgpu.h"
> -
> -/*
> - * GART
> - * The GART (Graphics Aperture Remapping Table) is an aperture
> - * in the GPU's address space.  System pages can be mapped into
> - * the aperture and look like contiguous pages from the GPU's
> - * perspective.  A page table maps the pages in the aperture
> - * to the actual backing pages in system memory.
> - *
> - * Radeon GPUs support both an internal GART, as described above,
> - * and AGP.  AGP works similarly, but the GART table is configured
> - * and maintained by the northbridge rather than the driver.
> - * Radeon hw has a separate AGP aperture that is programmed to
> - * point to the AGP aperture provided by the northbridge and the
> - * requests are passed through to the northbridge aperture.
> - * Both AGP and internal GART can be used at the same time, however
> - * that is not currently supported by the driver.
> - *
> - * This file handles the common internal GART management.
> - */
> -
> -/*
> - * Common GART table functions.
> - */
> -
> -/**
> - * amdgpu_gart_set_defaults - set the default gtt_size
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Set the default gtt_size based on parameters and available VRAM.
> - */
> -void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
> -{
> -	/* unless the user had overridden it, set the gart
> -	 * size equal to the 1024 or vram, whichever is larger.
> -	 */
> -	if (amdgpu_gart_size == -1)
> -		adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
> -					adev->mc.mc_vram_size);
> -	else
> -		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
> -}
> -
> -/**
> - * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Allocate system memory for GART page table
> - * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
> - * gart table to be in system memory.
> - * Returns 0 for success, -ENOMEM for failure.
> - */
> -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
> -{
> -	void *ptr;
> -
> -	ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
> -				   &adev->gart.table_addr);
> -	if (ptr == NULL) {
> -		return -ENOMEM;
> -	}
> -#ifdef CONFIG_X86
> -	if (0) {
> -		set_memory_uc((unsigned long)ptr,
> -			      adev->gart.table_size >> PAGE_SHIFT);
> -	}
> -#endif
> -	adev->gart.ptr = ptr;
> -	memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_table_ram_free - free system ram for gart page table
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Free system memory for GART page table
> - * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
> - * gart table to be in system memory.
> - */
> -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
> -{
> -	if (adev->gart.ptr == NULL) {
> -		return;
> -	}
> -#ifdef CONFIG_X86
> -	if (0) {
> -		set_memory_wb((unsigned long)adev->gart.ptr,
> -			      adev->gart.table_size >> PAGE_SHIFT);
> -	}
> -#endif
> -	pci_free_consistent(adev->pdev, adev->gart.table_size,
> -			    (void *)adev->gart.ptr,
> -			    adev->gart.table_addr);
> -	adev->gart.ptr = NULL;
> -	adev->gart.table_addr = 0;
> -}
> -
> -/**
> - * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Allocate video memory for GART page table
> - * (pcie r4xx, r5xx+).  These asics require the
> - * gart table to be in video memory.
> - * Returns 0 for success, error for failure.
> - */
> -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
> -{
> -	int r;
> -
> -	if (adev->gart.robj == NULL) {
> -		r = amdgpu_bo_create(adev, adev->gart.table_size,
> -				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
> -				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
> -				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
> -				     NULL, NULL, &adev->gart.robj);
> -		if (r) {
> -			return r;
> -		}
> -	}
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_table_vram_pin - pin gart page table in vram
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Pin the GART page table in vram so it will not be moved
> - * by the memory manager (pcie r4xx, r5xx+).  These asics require the
> - * gart table to be in video memory.
> - * Returns 0 for success, error for failure.
> - */
> -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
> -{
> -	uint64_t gpu_addr;
> -	int r;
> -
> -	r = amdgpu_bo_reserve(adev->gart.robj, false);
> -	if (unlikely(r != 0))
> -		return r;
> -	r = amdgpu_bo_pin(adev->gart.robj,
> -				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
> -	if (r) {
> -		amdgpu_bo_unreserve(adev->gart.robj);
> -		return r;
> -	}
> -	r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
> -	if (r)
> -		amdgpu_bo_unpin(adev->gart.robj);
> -	amdgpu_bo_unreserve(adev->gart.robj);
> -	adev->gart.table_addr = gpu_addr;
> -	return r;
> -}
> -
> -/**
> - * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Unpin the GART page table in vram (pcie r4xx, r5xx+).
> - * These asics require the gart table to be in video memory.
> - */
> -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
> -{
> -	int r;
> -
> -	if (adev->gart.robj == NULL) {
> -		return;
> -	}
> -	r = amdgpu_bo_reserve(adev->gart.robj, true);
> -	if (likely(r == 0)) {
> -		amdgpu_bo_kunmap(adev->gart.robj);
> -		amdgpu_bo_unpin(adev->gart.robj);
> -		amdgpu_bo_unreserve(adev->gart.robj);
> -		adev->gart.ptr = NULL;
> -	}
> -}
> -
> -/**
> - * amdgpu_gart_table_vram_free - free gart page table vram
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Free the video memory used for the GART page table
> - * (pcie r4xx, r5xx+).  These asics require the gart table to
> - * be in video memory.
> - */
> -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
> -{
> -	if (adev->gart.robj == NULL) {
> -		return;
> -	}
> -	amdgpu_bo_unref(&adev->gart.robj);
> -}
> -
> -/*
> - * Common gart functions.
> - */
> -/**
> - * amdgpu_gart_unbind - unbind pages from the gart page table
> - *
> - * @adev: amdgpu_device pointer
> - * @offset: offset into the GPU's gart aperture
> - * @pages: number of pages to unbind
> - *
> - * Unbinds the requested pages from the gart page table and
> - * replaces them with the dummy page (all asics).
> - * Returns 0 for success, -EINVAL for failure.
> - */
> -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
> -			int pages)
> -{
> -	unsigned t;
> -	unsigned p;
> -	int i, j;
> -	u64 page_base;
> -	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
> -	uint64_t flags = 0;
> -
> -	if (!adev->gart.ready) {
> -		WARN(1, "trying to unbind memory from uninitialized GART !\n");
> -		return -EINVAL;
> -	}
> -
> -	t = offset / AMDGPU_GPU_PAGE_SIZE;
> -	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
> -	for (i = 0; i < pages; i++, p++) {
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -		adev->gart.pages[p] = NULL;
> -#endif
> -		page_base = adev->dummy_page.addr;
> -		if (!adev->gart.ptr)
> -			continue;
> -
> -		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> -			amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
> -						t, page_base, flags);
> -			page_base += AMDGPU_GPU_PAGE_SIZE;
> -		}
> -	}
> -	mb();
> -	amdgpu_gart_flush_gpu_tlb(adev, 0);
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_map - map dma_addresses into GART entries
> - *
> - * @adev: amdgpu_device pointer
> - * @offset: offset into the GPU's gart aperture
> - * @pages: number of pages to bind
> - * @dma_addr: DMA addresses of pages
> - *
> - * Map the dma_addresses into GART entries (all asics).
> - * Returns 0 for success, -EINVAL for failure.
> - */
> -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
> -		    int pages, dma_addr_t *dma_addr, uint64_t flags,
> -		    void *dst)
> -{
> -	uint64_t page_base;
> -	unsigned i, j, t;
> -
> -	if (!adev->gart.ready) {
> -		WARN(1, "trying to bind memory to uninitialized GART !\n");
> -		return -EINVAL;
> -	}
> -
> -	t = offset / AMDGPU_GPU_PAGE_SIZE;
> -
> -	for (i = 0; i < pages; i++) {
> -		page_base = dma_addr[i];
> -		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> -			amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
> -			page_base += AMDGPU_GPU_PAGE_SIZE;
> -		}
> -	}
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_bind - bind pages into the gart page table
> - *
> - * @adev: amdgpu_device pointer
> - * @offset: offset into the GPU's gart aperture
> - * @pages: number of pages to bind
> - * @pagelist: pages to bind
> - * @dma_addr: DMA addresses of pages
> - *
> - * Binds the requested pages to the gart page table
> - * (all asics).
> - * Returns 0 for success, -EINVAL for failure.
> - */
> -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
> -		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
> -		     uint64_t flags)
> -{
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	unsigned i,t,p;
> -#endif
> -	int r;
> -
> -	if (!adev->gart.ready) {
> -		WARN(1, "trying to bind memory to uninitialized GART !\n");
> -		return -EINVAL;
> -	}
> -
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	t = offset / AMDGPU_GPU_PAGE_SIZE;
> -	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
> -	for (i = 0; i < pages; i++, p++)
> -		adev->gart.pages[p] = pagelist[i];
> -#endif
> -
> -	if (adev->gart.ptr) {
> -		r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
> -			    adev->gart.ptr);
> -		if (r)
> -			return r;
> -	}
> -
> -	mb();
> -	amdgpu_gart_flush_gpu_tlb(adev, 0);
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_init - init the driver info for managing the gart
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Allocate the dummy page and init the gart driver info (all asics).
> - * Returns 0 for success, error for failure.
> - */
> -int amdgpu_gart_init(struct amdgpu_device *adev)
> -{
> -	int r;
> -
> -	if (adev->dummy_page.page)
> -		return 0;
> -
> -	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
> -	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
> -		DRM_ERROR("Page size is smaller than GPU page size!\n");
> -		return -EINVAL;
> -	}
> -	r = amdgpu_dummy_page_init(adev);
> -	if (r)
> -		return r;
> -	/* Compute table size */
> -	adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
> -	adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
> -	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
> -		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
> -
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	/* Allocate pages table */
> -	adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
> -	if (adev->gart.pages == NULL) {
> -		amdgpu_gart_fini(adev);
> -		return -ENOMEM;
> -	}
> -#endif
> -
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_fini - tear down the driver info for managing the gart
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Tear down the gart driver info and free the dummy page (all asics).
> - */
> -void amdgpu_gart_fini(struct amdgpu_device *adev)
> -{
> -	if (adev->gart.ready) {
> -		/* unbind pages */
> -		amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
> -	}
> -	adev->gart.ready = false;
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	vfree(adev->gart.pages);
> -	adev->gart.pages = NULL;
> -#endif
> -	amdgpu_dummy_page_fini(adev);
> -}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 4510627..73a1c64 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -82,7 +82,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
>  	if (r)
>  		kfree(*job);
>  	else
> -		(*job)->vm_pd_addr = adev->gart.table_addr;
> +		(*job)->vm_pd_addr = adev->sysvm.table_addr;
>  
>  	return r;
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> new file mode 100644
> index 0000000..50fc8d7
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> @@ -0,0 +1,423 @@
> +/*
> + * Copyright 2008 Advanced Micro Devices, Inc.
> + * Copyright 2008 Red Hat Inc.
> + * Copyright 2009 Jerome Glisse.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors: Dave Airlie
> + *          Alex Deucher
> + *          Jerome Glisse
> + */
> +#include <drm/drmP.h>
> +#include <drm/amdgpu_drm.h>
> +#include "amdgpu.h"
> +
> +/*
> + * SYSVM
> + * The system VM (previously called GART) is an aperture
> + * in the GPU's address space.  System pages can be mapped into
> + * the aperture and look like contiguous pages from the GPU's
> + * perspective.  A page table maps the pages in the aperture
> + * to the actual backing pages in system memory.
> + *
> + * Radeon GPUs support both an internal SYSVM based GART, as described above,
> + * and AGP.  AGP works similarly, but the GART table is configured
> + * and maintained by the northbridge rather than the driver.
> + * Radeon hw has a separate AGP aperture that is programmed to
> + * point to the AGP aperture provided by the northbridge and the
> + * requests are passed through to the northbridge aperture.
> + * Both AGP and internal GART can be used at the same time, however
> + * that is not currently supported by the driver.
> + *
> + * This file handles the common internal SYSVM management.
> + */
> +
> +/*
> + * Common SYSVM table functions.
> + */
> +
> +/**
> + * amdgpu_sysvm_set_defaults - set the default sysvm_size
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Set the default sysvm_size based on parameters and available VRAM.
> + */
> +void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
> +{
> +	/* unless the user had overridden it, set the gart
> +	 * size equal to the 1024 or vram, whichever is larger.
> +	 */
> +	if (amdgpu_gart_size == -1)
> +		adev->mc.sysvm_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
> +					adev->mc.mc_vram_size);
> +	else
> +		adev->mc.sysvm_size = (uint64_t)amdgpu_gart_size << 20;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_ram_alloc - allocate system ram for gart page table
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Allocate system memory for SYSVM page table
> + * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
> + * gart table to be in system memory.
> + * Returns 0 for success, -ENOMEM for failure.
> + */
> +int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev)
> +{
> +	void *ptr;
> +
> +	ptr = pci_alloc_consistent(adev->pdev, adev->sysvm.table_size,
> +				   &adev->sysvm.table_addr);
> +	if (ptr == NULL) {
> +		return -ENOMEM;
> +	}
> +#ifdef CONFIG_X86
> +	if (0) {
> +		set_memory_uc((unsigned long)ptr,
> +			      adev->sysvm.table_size >> PAGE_SHIFT);
> +	}
> +#endif
> +	adev->sysvm.ptr = ptr;
> +	memset((void *)adev->sysvm.ptr, 0, adev->sysvm.table_size);
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_ram_free - free system ram for gart page table
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Free system memory for SYSVM page table
> + * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
> + * gart table to be in system memory.
> + */
> +void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev)
> +{
> +	if (adev->sysvm.ptr == NULL) {
> +		return;
> +	}
> +#ifdef CONFIG_X86
> +	if (0) {
> +		set_memory_wb((unsigned long)adev->sysvm.ptr,
> +			      adev->sysvm.table_size >> PAGE_SHIFT);
> +	}
> +#endif
> +	pci_free_consistent(adev->pdev, adev->sysvm.table_size,
> +			    (void *)adev->sysvm.ptr,
> +			    adev->sysvm.table_addr);
> +	adev->sysvm.ptr = NULL;
> +	adev->sysvm.table_addr = 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_vram_alloc - allocate vram for gart page table
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Allocate video memory for SYSVM page table
> + * (pcie r4xx, r5xx+).  These asics require the
> + * gart table to be in video memory.
> + * Returns 0 for success, error for failure.
> + */
> +int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev)
> +{
> +	int r;
> +
> +	if (adev->sysvm.robj == NULL) {
> +		r = amdgpu_bo_create(adev, adev->sysvm.table_size,
> +				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
> +				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
> +				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
> +				     NULL, NULL, &adev->sysvm.robj);
> +		if (r) {
> +			return r;
> +		}
> +	}
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_vram_pin - pin gart page table in vram
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Pin the SYSVM page table in vram so it will not be moved
> + * by the memory manager (pcie r4xx, r5xx+).  These asics require the
> + * gart table to be in video memory.
> + * Returns 0 for success, error for failure.
> + */
> +int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev)
> +{
> +	uint64_t gpu_addr;
> +	int r;
> +
> +	r = amdgpu_bo_reserve(adev->sysvm.robj, false);
> +	if (unlikely(r != 0))
> +		return r;
> +	r = amdgpu_bo_pin(adev->sysvm.robj,
> +				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
> +	if (r) {
> +		amdgpu_bo_unreserve(adev->sysvm.robj);
> +		return r;
> +	}
> +	r = amdgpu_bo_kmap(adev->sysvm.robj, &adev->sysvm.ptr);
> +	if (r)
> +		amdgpu_bo_unpin(adev->sysvm.robj);
> +	amdgpu_bo_unreserve(adev->sysvm.robj);
> +	adev->sysvm.table_addr = gpu_addr;
> +	return r;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_vram_unpin - unpin gart page table in vram
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Unpin the GART page table in vram (pcie r4xx, r5xx+).
> + * These asics require the gart table to be in video memory.
> + */
> +void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev)
> +{
> +	int r;
> +
> +	if (adev->sysvm.robj == NULL) {
> +		return;
> +	}
> +	r = amdgpu_bo_reserve(adev->sysvm.robj, true);
> +	if (likely(r == 0)) {
> +		amdgpu_bo_kunmap(adev->sysvm.robj);
> +		amdgpu_bo_unpin(adev->sysvm.robj);
> +		amdgpu_bo_unreserve(adev->sysvm.robj);
> +		adev->sysvm.ptr = NULL;
> +	}
> +}
> +
> +/**
> + * amdgpu_sysvm_table_vram_free - free gart page table vram
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Free the video memory used for the GART page table
> + * (pcie r4xx, r5xx+).  These asics require the gart table to
> + * be in video memory.
> + */
> +void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev)
> +{
> +	if (adev->sysvm.robj == NULL) {
> +		return;
> +	}
> +	amdgpu_bo_unref(&adev->sysvm.robj);
> +}
> +
> +/*
> + * Common gart functions.
> + */
> +/**
> + * amdgpu_sysvm_unbind - unbind pages from the gart page table
> + *
> + * @adev: amdgpu_device pointer
> + * @offset: offset into the GPU's gart aperture
> + * @pages: number of pages to unbind
> + *
> + * Unbinds the requested pages from the gart page table and
> + * replaces them with the dummy page (all asics).
> + * Returns 0 for success, -EINVAL for failure.
> + */
> +int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
> +			int pages)
> +{
> +	unsigned t;
> +	unsigned p;
> +	int i, j;
> +	u64 page_base;
> +	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
> +	uint64_t flags = 0;
> +
> +	if (!adev->sysvm.ready) {
> +		WARN(1, "trying to unbind memory from uninitialized GART !\n");
> +		return -EINVAL;
> +	}
> +
> +	t = offset / AMDGPU_GPU_PAGE_SIZE;
> +	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
> +	for (i = 0; i < pages; i++, p++) {
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +		adev->sysvm.pages[p] = NULL;
> +#endif
> +		page_base = adev->dummy_page.addr;
> +		if (!adev->sysvm.ptr)
> +			continue;
> +
> +		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> +			amdgpu_sysvm_set_pte_pde(adev, adev->sysvm.ptr,
> +						t, page_base, flags);
> +			page_base += AMDGPU_GPU_PAGE_SIZE;
> +		}
> +	}
> +	mb();
> +	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_map - map dma_addresses into GART entries
> + *
> + * @adev: amdgpu_device pointer
> + * @offset: offset into the GPU's gart aperture
> + * @pages: number of pages to bind
> + * @dma_addr: DMA addresses of pages
> + *
> + * Map the dma_addresses into GART entries (all asics).
> + * Returns 0 for success, -EINVAL for failure.
> + */
> +int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
> +		    int pages, dma_addr_t *dma_addr, uint64_t flags,
> +		    void *dst)
> +{
> +	uint64_t page_base;
> +	unsigned i, j, t;
> +
> +	if (!adev->sysvm.ready) {
> +		WARN(1, "trying to bind memory to uninitialized GART !\n");
> +		return -EINVAL;
> +	}
> +
> +	t = offset / AMDGPU_GPU_PAGE_SIZE;
> +
> +	for (i = 0; i < pages; i++) {
> +		page_base = dma_addr[i];
> +		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> +			amdgpu_sysvm_set_pte_pde(adev, dst, t, page_base, flags);
> +			page_base += AMDGPU_GPU_PAGE_SIZE;
> +		}
> +	}
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_bind - bind pages into the gart page table
> + *
> + * @adev: amdgpu_device pointer
> + * @offset: offset into the GPU's gart aperture
> + * @pages: number of pages to bind
> + * @pagelist: pages to bind
> + * @dma_addr: DMA addresses of pages
> + *
> + * Binds the requested pages to the gart page table
> + * (all asics).
> + * Returns 0 for success, -EINVAL for failure.
> + */
> +int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
> +		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
> +		     uint64_t flags)
> +{
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	unsigned i,t,p;
> +#endif
> +	int r;
> +
> +	if (!adev->sysvm.ready) {
> +		WARN(1, "trying to bind memory to uninitialized GART !\n");
> +		return -EINVAL;
> +	}
> +
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	t = offset / AMDGPU_GPU_PAGE_SIZE;
> +	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
> +	for (i = 0; i < pages; i++, p++)
> +		adev->sysvm.pages[p] = pagelist[i];
> +#endif
> +
> +	if (adev->sysvm.ptr) {
> +		r = amdgpu_sysvm_map(adev, offset, pages, dma_addr, flags,
> +			    adev->sysvm.ptr);
> +		if (r)
> +			return r;
> +	}
> +
> +	mb();
> +	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_init - init the driver info for managing the gart
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Allocate the dummy page and init the gart driver info (all asics).
> + * Returns 0 for success, error for failure.
> + */
> +int amdgpu_sysvm_init(struct amdgpu_device *adev)
> +{
> +	int r;
> +
> +	if (adev->dummy_page.page)
> +		return 0;
> +
> +	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
> +	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
> +		DRM_ERROR("Page size is smaller than GPU page size!\n");
> +		return -EINVAL;
> +	}
> +	r = amdgpu_dummy_page_init(adev);
> +	if (r)
> +		return r;
> +	/* Compute table size */
> +	adev->sysvm.num_cpu_pages = adev->mc.sysvm_size / PAGE_SIZE;
> +	adev->sysvm.num_gpu_pages = adev->mc.sysvm_size / AMDGPU_GPU_PAGE_SIZE;
> +	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
> +		 adev->sysvm.num_cpu_pages, adev->sysvm.num_gpu_pages);
> +
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	/* Allocate pages table */
> +	adev->sysvm.pages = vzalloc(sizeof(void *) * adev->sysvm.num_cpu_pages);
> +	if (adev->sysvm.pages == NULL) {
> +		amdgpu_sysvm_fini(adev);
> +		return -ENOMEM;
> +	}
> +#endif
> +
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_fini - tear down the driver info for managing the gart
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Tear down the gart driver info and free the dummy page (all asics).
> + */
> +void amdgpu_sysvm_fini(struct amdgpu_device *adev)
> +{
> +	if (adev->sysvm.ready) {
> +		/* unbind pages */
> +		amdgpu_sysvm_unbind(adev, 0, adev->sysvm.num_cpu_pages);
> +	}
> +	adev->sysvm.ready = false;
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	vfree(adev->sysvm.pages);
> +	adev->sysvm.pages = NULL;
> +#endif
> +	amdgpu_dummy_page_fini(adev);
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
> index d02e611..651712e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
> @@ -32,8 +32,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  {
>  	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
>  	struct amdgpu_bo *vram_obj = NULL;
> -	struct amdgpu_bo **gtt_obj = NULL;
> -	uint64_t gtt_addr, vram_addr;
> +	struct amdgpu_bo **sysvm_obj = NULL;
> +	uint64_t sysvm_addr, vram_addr;
>  	unsigned n, size;
>  	int i, r;
>  
> @@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  	/* Number of tests =
>  	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
>  	 */
> -	n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
> +	n = adev->mc.sysvm_size - AMDGPU_IB_POOL_SIZE*64*1024;
>  	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
>  		if (adev->rings[i])
>  			n -= adev->rings[i]->ring_size;
> @@ -52,8 +52,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  		n -= adev->irq.ih.ring_size;
>  	n /= size;
>  
> -	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
> -	if (!gtt_obj) {
> +	sysvm_obj = kzalloc(n * sizeof(*sysvm_obj), GFP_KERNEL);
> +	if (!sysvm_obj) {
>  		DRM_ERROR("Failed to allocate %d pointers\n", n);
>  		r = 1;
>  		goto out_cleanup;
> @@ -75,42 +75,42 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  		goto out_unres;
>  	}
>  	for (i = 0; i < n; i++) {
> -		void *gtt_map, *vram_map;
> -		void **gtt_start, **gtt_end;
> +		void *sysvm_map, *vram_map;
> +		void **sysvm_start, **sysvm_end;
>  		void **vram_start, **vram_end;
>  		struct dma_fence *fence = NULL;
>  
>  		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
>  				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
> -				     NULL, gtt_obj + i);
> +				     NULL, sysvm_obj + i);
>  		if (r) {
>  			DRM_ERROR("Failed to create GTT object %d\n", i);
>  			goto out_lclean;
>  		}
>  
> -		r = amdgpu_bo_reserve(gtt_obj[i], false);
> +		r = amdgpu_bo_reserve(sysvm_obj[i], false);
>  		if (unlikely(r != 0))
>  			goto out_lclean_unref;
> -		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
> +		r = amdgpu_bo_pin(sysvm_obj[i], AMDGPU_GEM_DOMAIN_GTT, &sysvm_addr);
>  		if (r) {
>  			DRM_ERROR("Failed to pin GTT object %d\n", i);
>  			goto out_lclean_unres;
>  		}
>  
> -		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
> +		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
>  		if (r) {
>  			DRM_ERROR("Failed to map GTT object %d\n", i);
>  			goto out_lclean_unpin;
>  		}
>  
> -		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
> -		     gtt_start < gtt_end;
> -		     gtt_start++)
> -			*gtt_start = gtt_start;
> +		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size;
> +		     sysvm_start < sysvm_end;
> +		     sysvm_start++)
> +			*sysvm_start = sysvm_start;
>  
> -		amdgpu_bo_kunmap(gtt_obj[i]);
> +		amdgpu_bo_kunmap(sysvm_obj[i]);
>  
> -		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
> +		r = amdgpu_copy_buffer(ring, sysvm_addr, vram_addr,
>  				       size, NULL, &fence, false, false);
>  
>  		if (r) {
> @@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  			goto out_lclean_unpin;
>  		}
>  
> -		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
> +		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
>  		     vram_start = vram_map, vram_end = vram_map + size;
>  		     vram_start < vram_end;
> -		     gtt_start++, vram_start++) {
> -			if (*vram_start != gtt_start) {
> +		     sysvm_start++, vram_start++) {
> +			if (*vram_start != sysvm_start) {
>  				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
>  					  "expected 0x%p (GTT/VRAM offset "
>  					  "0x%16llx/0x%16llx)\n",
> -					  i, *vram_start, gtt_start,
> +					  i, *vram_start, sysvm_start,
>  					  (unsigned long long)
> -					  (gtt_addr - adev->mc.gtt_start +
> -					   (void*)gtt_start - gtt_map),
> +					  (sysvm_addr - adev->mc.sysvm_start +
> +					   (void*)sysvm_start - sysvm_map),
>  					  (unsigned long long)
>  					  (vram_addr - adev->mc.vram_start +
> -					   (void*)gtt_start - gtt_map));
> +					   (void*)sysvm_start - sysvm_map));
>  				amdgpu_bo_kunmap(vram_obj);
>  				goto out_lclean_unpin;
>  			}
> @@ -155,7 +155,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  
>  		amdgpu_bo_kunmap(vram_obj);
>  
> -		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
> +		r = amdgpu_copy_buffer(ring, vram_addr, sysvm_addr,
>  				       size, NULL, &fence, false, false);
>  
>  		if (r) {
> @@ -171,49 +171,49 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  
>  		dma_fence_put(fence);
>  
> -		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
> +		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
>  		if (r) {
>  			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
>  			goto out_lclean_unpin;
>  		}
>  
> -		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
> +		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
>  		     vram_start = vram_map, vram_end = vram_map + size;
> -		     gtt_start < gtt_end;
> -		     gtt_start++, vram_start++) {
> -			if (*gtt_start != vram_start) {
> +		     sysvm_start < sysvm_end;
> +		     sysvm_start++, vram_start++) {
> +			if (*sysvm_start != vram_start) {
>  				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
>  					  "expected 0x%p (VRAM/GTT offset "
>  					  "0x%16llx/0x%16llx)\n",
> -					  i, *gtt_start, vram_start,
> +					  i, *sysvm_start, vram_start,
>  					  (unsigned long long)
>  					  (vram_addr - adev->mc.vram_start +
>  					   (void*)vram_start - vram_map),
>  					  (unsigned long long)
> -					  (gtt_addr - adev->mc.gtt_start +
> +					  (sysvm_addr - adev->mc.sysvm_start +
>  					   (void*)vram_start - vram_map));
> -				amdgpu_bo_kunmap(gtt_obj[i]);
> +				amdgpu_bo_kunmap(sysvm_obj[i]);
>  				goto out_lclean_unpin;
>  			}
>  		}
>  
> -		amdgpu_bo_kunmap(gtt_obj[i]);
> +		amdgpu_bo_kunmap(sysvm_obj[i]);
>  
>  		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
> -			 gtt_addr - adev->mc.gtt_start);
> +			 sysvm_addr - adev->mc.sysvm_start);
>  		continue;
>  
>  out_lclean_unpin:
> -		amdgpu_bo_unpin(gtt_obj[i]);
> +		amdgpu_bo_unpin(sysvm_obj[i]);
>  out_lclean_unres:
> -		amdgpu_bo_unreserve(gtt_obj[i]);
> +		amdgpu_bo_unreserve(sysvm_obj[i]);
>  out_lclean_unref:
> -		amdgpu_bo_unref(&gtt_obj[i]);
> +		amdgpu_bo_unref(&sysvm_obj[i]);
>  out_lclean:
>  		for (--i; i >= 0; --i) {
> -			amdgpu_bo_unpin(gtt_obj[i]);
> -			amdgpu_bo_unreserve(gtt_obj[i]);
> -			amdgpu_bo_unref(&gtt_obj[i]);
> +			amdgpu_bo_unpin(sysvm_obj[i]);
> +			amdgpu_bo_unreserve(sysvm_obj[i]);
> +			amdgpu_bo_unref(&sysvm_obj[i]);
>  		}
>  		if (fence)
>  			dma_fence_put(fence);
> @@ -226,7 +226,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  out_unref:
>  	amdgpu_bo_unref(&vram_obj);
>  out_cleanup:
> -	kfree(gtt_obj);
> +	kfree(sysvm_obj);
>  	if (r) {
>  		pr_warn("Error while testing BO move\n");
>  	}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 5c7a6c5..9240357 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -102,7 +102,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
>  		goto error_bo;
>  	}
>  
> -	mutex_init(&adev->mman.gtt_window_lock);
> +	mutex_init(&adev->mman.sysvm_window_lock);
>  
>  	ring = adev->mman.buffer_funcs_ring;
>  	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
> @@ -130,7 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
>  	if (adev->mman.mem_global_referenced) {
>  		amd_sched_entity_fini(adev->mman.entity.sched,
>  				      &adev->mman.entity);
> -		mutex_destroy(&adev->mman.gtt_window_lock);
> +		mutex_destroy(&adev->mman.sysvm_window_lock);
>  		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
>  		drm_global_item_unref(&adev->mman.mem_global_ref);
>  		adev->mman.mem_global_referenced = false;
> @@ -158,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
>  		break;
>  	case TTM_PL_TT:
>  		man->func = &amdgpu_gtt_mgr_func;
> -		man->gpu_offset = adev->mc.gtt_start;
> +		man->gpu_offset = adev->mc.sysvm_start;
>  		man->available_caching = TTM_PL_MASK_CACHING;
>  		man->default_caching = TTM_PL_FLAG_CACHED;
>  		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
> @@ -289,7 +289,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>  	new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
>  
>  	num_pages = new_mem->num_pages;
> -	mutex_lock(&adev->mman.gtt_window_lock);
> +	mutex_lock(&adev->mman.sysvm_window_lock);
>  	while (num_pages) {
>  		unsigned long cur_pages = min(min(old_size, new_size),
>  					      (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
> @@ -341,14 +341,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>  			new_start += cur_pages * PAGE_SIZE;
>  		}
>  	}
> -	mutex_unlock(&adev->mman.gtt_window_lock);
> +	mutex_unlock(&adev->mman.sysvm_window_lock);
>  
>  	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
>  	dma_fence_put(fence);
>  	return r;
>  
>  error:
> -	mutex_unlock(&adev->mman.gtt_window_lock);
> +	mutex_unlock(&adev->mman.sysvm_window_lock);
>  
>  	if (fence)
>  		dma_fence_wait(fence, false);
> @@ -693,10 +693,10 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
>  	uint64_t flags;
>  	int r;
>  
> -	spin_lock(&gtt->adev->gtt_list_lock);
> +	spin_lock(&gtt->adev->sysvm_list_lock);
>  	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
>  	gtt->offset = (u64)mem->start << PAGE_SHIFT;
> -	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
> +	r = amdgpu_sysvm_bind(gtt->adev, gtt->offset, ttm->num_pages,
>  		ttm->pages, gtt->ttm.dma_address, flags);
>  
>  	if (r) {
> @@ -705,9 +705,9 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
>  		goto error_gart_bind;
>  	}
>  
> -	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
> +	list_add_tail(&gtt->list, &gtt->adev->sysvm_list);
>  error_gart_bind:
> -	spin_unlock(&gtt->adev->gtt_list_lock);
> +	spin_unlock(&gtt->adev->sysvm_list_lock);
>  	return r;
>  
>  }
> @@ -774,20 +774,20 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
>  	int r;
>  
>  	bo_mem.mem_type = TTM_PL_TT;
> -	spin_lock(&adev->gtt_list_lock);
> -	list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
> +	spin_lock(&adev->sysvm_list_lock);
> +	list_for_each_entry_safe(gtt, tmp, &adev->sysvm_list, list) {
>  		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
> -		r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
> +		r = amdgpu_sysvm_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
>  				     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
>  				     flags);
>  		if (r) {
> -			spin_unlock(&adev->gtt_list_lock);
> +			spin_unlock(&adev->sysvm_list_lock);
>  			DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
>  				  gtt->ttm.ttm.num_pages, gtt->offset);
>  			return r;
>  		}
>  	}
> -	spin_unlock(&adev->gtt_list_lock);
> +	spin_unlock(&adev->sysvm_list_lock);
>  	return 0;
>  }
>  
> @@ -803,8 +803,8 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
>  		return 0;
>  
>  	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
> -	spin_lock(&gtt->adev->gtt_list_lock);
> -	r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
> +	spin_lock(&gtt->adev->sysvm_list_lock);
> +	r = amdgpu_sysvm_unbind(gtt->adev, gtt->offset, ttm->num_pages);
>  	if (r) {
>  		DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
>  			  gtt->ttm.ttm.num_pages, gtt->offset);
> @@ -812,7 +812,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
>  	}
>  	list_del_init(&gtt->list);
>  error_unbind:
> -	spin_unlock(&gtt->adev->gtt_list_lock);
> +	spin_unlock(&gtt->adev->sysvm_list_lock);
>  	return r;
>  }
>  
> @@ -1037,7 +1037,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
>  			flags |= AMDGPU_PTE_SNOOPED;
>  	}
>  
> -	flags |= adev->gart.gart_pte_flags;
> +	flags |= adev->sysvm.sysvm_pte_flags;
>  	flags |= AMDGPU_PTE_READABLE;
>  
>  	if (!amdgpu_ttm_tt_is_readonly(ttm))
> @@ -1144,13 +1144,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
>  	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
>  		 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
>  	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
> -				adev->mc.gtt_size >> PAGE_SHIFT);
> +				adev->mc.sysvm_size >> PAGE_SHIFT);
>  	if (r) {
>  		DRM_ERROR("Failed initializing GTT heap.\n");
>  		return r;
>  	}
>  	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
> -		 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
> +		 (unsigned)(adev->mc.sysvm_size / (1024 * 1024)));
>  
>  	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
>  	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
> @@ -1223,7 +1223,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
>  	if (adev->gds.oa.total_size)
>  		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
>  	ttm_bo_device_release(&adev->mman.bdev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_fini(adev);
>  	amdgpu_ttm_global_fini(adev);
>  	adev->mman.initialized = false;
>  	DRM_INFO("amdgpu: ttm finalized\n");
> @@ -1279,7 +1279,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>  	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
>  	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
>  
> -	*addr = adev->mc.gtt_start;
> +	*addr = adev->mc.sysvm_start;
>  	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
>  		AMDGPU_GPU_PAGE_SIZE;
>  
> @@ -1296,7 +1296,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>  	src_addr = num_dw * 4;
>  	src_addr += job->ibs[0].gpu_addr;
>  
> -	dst_addr = adev->gart.table_addr;
> +	dst_addr = adev->sysvm.table_addr;
>  	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
>  	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
>  				dst_addr, num_bytes);
> @@ -1306,7 +1306,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>  
>  	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
>  	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
> -	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
> +	r = amdgpu_sysvm_map(adev, 0, num_pages, dma_address, flags,
>  			    &job->ibs[0].ptr[num_dw]);
>  	if (r)
>  		goto error_free;
> @@ -1522,7 +1522,7 @@ static int ttm_pl_tt = TTM_PL_TT;
>  
>  static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
>  	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
> -	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
> +	{"amdgpu_sysvm_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
>  	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
>  #ifdef CONFIG_SWIOTLB
>  	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
> @@ -1574,9 +1574,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
>  	.llseek = default_llseek
>  };
>  
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>  
> -static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
> +static ssize_t amdgpu_ttm_sysvm_read(struct file *f, char __user *buf,
>  				   size_t size, loff_t *pos)
>  {
>  	struct amdgpu_device *adev = file_inode(f)->i_private;
> @@ -1590,16 +1590,16 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>  		struct page *page;
>  		void *ptr;
>  
> -		if (p >= adev->gart.num_cpu_pages)
> +		if (p >= adev->sysvm.num_cpu_pages)
>  			return result;
>  
> -		page = adev->gart.pages[p];
> +		page = adev->sysvm.pages[p];
>  		if (page) {
>  			ptr = kmap(page);
>  			ptr += off;
>  
>  			r = copy_to_user(buf, ptr, cur_size);
> -			kunmap(adev->gart.pages[p]);
> +			kunmap(adev->sysvm.pages[p]);
>  		} else
>  			r = clear_user(buf, cur_size);
>  
> @@ -1615,9 +1615,9 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>  	return result;
>  }
>  
> -static const struct file_operations amdgpu_ttm_gtt_fops = {
> +static const struct file_operations amdgpu_ttm_sysvm_fops = {
>  	.owner = THIS_MODULE,
> -	.read = amdgpu_ttm_gtt_read,
> +	.read = amdgpu_ttm_sysvm_read,
>  	.llseek = default_llseek
>  };
>  
> @@ -1640,12 +1640,12 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
>  	i_size_write(ent->d_inode, adev->mc.mc_vram_size);
>  	adev->mman.vram = ent;
>  
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
> -				  adev, &amdgpu_ttm_gtt_fops);
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	ent = debugfs_create_file("amdgpu_sysvm", S_IFREG | S_IRUGO, root,
> +				  adev, &amdgpu_ttm_sysvm_fops);
>  	if (IS_ERR(ent))
>  		return PTR_ERR(ent);
> -	i_size_write(ent->d_inode, adev->mc.gtt_size);
> +	i_size_write(ent->d_inode, adev->mc.sysvm_size);
>  	adev->mman.gtt = ent;
>  
>  #endif
> @@ -1670,7 +1670,7 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
>  	debugfs_remove(adev->mman.vram);
>  	adev->mman.vram = NULL;
>  
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>  	debugfs_remove(adev->mman.gtt);
>  	adev->mman.gtt = NULL;
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index 4f5c1da..1443038 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -53,7 +53,7 @@ struct amdgpu_mman {
>  	const struct amdgpu_buffer_funcs	*buffer_funcs;
>  	struct amdgpu_ring			*buffer_funcs_ring;
>  
> -	struct mutex				gtt_window_lock;
> +	struct mutex				sysvm_window_lock;
>  	/* Scheduler entity for buffer moves */
>  	struct amd_sched_entity			entity;
>  };
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 1d1810d..8dbacec 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -982,14 +982,14 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
>  		value = params->pages_addr ?
>  			amdgpu_vm_map_gart(params->pages_addr, addr) :
>  			addr;
> -		amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
> +		amdgpu_sysvm_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
>  					i, value, flags);
>  		addr += incr;
>  	}
>  
>  	/* Flush HDP */
>  	mb();
> -	amdgpu_gart_flush_gpu_tlb(params->adev, 0);
> +	amdgpu_sysvm_flush_gpu_tlb(params->adev, 0);
>  }
>  
>  static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
> @@ -1101,7 +1101,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
>  		}
>  
>  		pt = amdgpu_bo_gpu_offset(bo);
> -		pt = amdgpu_gart_get_vm_pde(adev, pt);
> +		pt = amdgpu_sysvm_get_vm_pde(adev, pt);
>  		if (parent->entries[pt_idx].addr == pt)
>  			continue;
>  
> @@ -1564,7 +1564,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>   *
>   * @adev: amdgpu_device pointer
>   * @exclusive: fence we need to sync to
> - * @gtt_flags: flags as they are used for GTT
> + * @sysvm_flags: flags as they are used in the SYSVM
>   * @pages_addr: DMA addresses to use for mapping
>   * @vm: requested vm
>   * @mapping: mapped range and flags to use for the update
> @@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>   */
>  static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
>  				      struct dma_fence *exclusive,
> -				      uint64_t gtt_flags,
> +				      uint64_t sysvm_flags,
>  				      dma_addr_t *pages_addr,
>  				      struct amdgpu_vm *vm,
>  				      struct amdgpu_bo_va_mapping *mapping,
> @@ -1633,8 +1633,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
>  		}
>  
>  		if (pages_addr) {
> -			if (flags == gtt_flags)
> -				src = adev->gart.table_addr +
> +			if (flags == sysvm_flags)
> +				src = adev->sysvm.table_addr +
>  					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
>  			else
>  				max_entries = min(max_entries, 16ull * 1024ull);
> @@ -1681,7 +1681,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>  	struct amdgpu_vm *vm = bo_va->vm;
>  	struct amdgpu_bo_va_mapping *mapping;
>  	dma_addr_t *pages_addr = NULL;
> -	uint64_t gtt_flags, flags;
> +	uint64_t sysvm_flags, flags;
>  	struct ttm_mem_reg *mem;
>  	struct drm_mm_node *nodes;
>  	struct dma_fence *exclusive;
> @@ -1706,12 +1706,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>  
>  	if (bo_va->bo) {
>  		flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
> -		gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
> +		sysvm_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
>  			adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
>  			flags : 0;
>  	} else {
>  		flags = 0x0;
> -		gtt_flags = ~0x0;
> +		sysvm_flags = ~0x0;
>  	}
>  
>  	spin_lock(&vm->status_lock);
> @@ -1721,7 +1721,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>  
>  	list_for_each_entry(mapping, &bo_va->invalids, list) {
>  		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
> -					       gtt_flags, pages_addr, vm,
> +					       sysvm_flags, pages_addr, vm,
>  					       mapping, flags, nodes,
>  					       &bo_va->last_pt_update);
>  		if (r)
> @@ -1756,7 +1756,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
>  
>  	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
>  	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
> -	adev->gart.gart_funcs->set_prt(adev, enable);
> +	adev->sysvm.sysvm_funcs->set_prt(adev, enable);
>  	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
>  }
>  
> @@ -1765,7 +1765,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
>   */
>  static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
>  {
> -	if (!adev->gart.gart_funcs->set_prt)
> +	if (!adev->sysvm.sysvm_funcs->set_prt)
>  		return;
>  
>  	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
> @@ -1800,7 +1800,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
>  {
>  	struct amdgpu_prt_cb *cb;
>  
> -	if (!adev->gart.gart_funcs->set_prt)
> +	if (!adev->sysvm.sysvm_funcs->set_prt)
>  		return;
>  
>  	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
> @@ -2498,7 +2498,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
>  void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>  {
>  	struct amdgpu_bo_va_mapping *mapping, *tmp;
> -	bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
> +	bool prt_fini_needed = !!adev->sysvm.sysvm_funcs->set_prt;
>  	int i;
>  
>  	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index 6986285..708fb84 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -3731,10 +3731,10 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>  	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	gfx_v9_0_write_data_to_reg(ring, usepfp, true,
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
> index a42f483..1290434 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
> @@ -40,8 +40,8 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
>  {
>  	uint64_t value;
>  
> -	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
> -	value = adev->gart.table_addr - adev->mc.vram_start
> +	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
> +	value = adev->sysvm.table_addr - adev->mc.vram_start
>  		+ adev->vm_manager.vram_base_offset;
>  	value &= 0x0000FFFFFFFFF000ULL;
>  	value |= 0x1; /*valid bit*/
> @@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
>  	gfxhub_v1_0_init_gart_pt_regs(adev);
>  
>  	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
> -		     (u32)(adev->mc.gtt_start >> 12));
> +		     (u32)(adev->mc.sysvm_start >> 12));
>  	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
> -		     (u32)(adev->mc.gtt_start >> 44));
> +		     (u32)(adev->mc.sysvm_start >> 44));
>  
>  	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
> -		     (u32)(adev->mc.gtt_end >> 12));
> +		     (u32)(adev->mc.sysvm_end >> 12));
>  	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
> -		     (u32)(adev->mc.gtt_end >> 44));
> +		     (u32)(adev->mc.sysvm_end >> 44));
>  }
>  
>  static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
> @@ -228,7 +228,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
>  	}
>  }
>  
> -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
> +int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	if (amdgpu_sriov_vf(adev)) {
>  		/*
> @@ -256,7 +256,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
> +void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	u32 tmp;
>  	u32 i;
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
> index d2dbb08..d194b7e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
> +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
> @@ -24,8 +24,8 @@
>  #ifndef __GFXHUB_V1_0_H__
>  #define __GFXHUB_V1_0_H__
>  
> -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev);
> -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev);
> +int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
> +void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
>  void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
>  					  bool value);
>  void gfxhub_v1_0_init(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> index 5ed6788f..53c3b8a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> @@ -36,7 +36,7 @@
>  #include "dce/dce_6_0_sh_mask.h"
>  #include "si_enums.h"
>  
> -static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
> +static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev);
>  static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
>  static int gmc_v6_0_wait_for_idle(void *handle);
>  
> @@ -216,7 +216,7 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
> +static void gmc_v6_0_vram_sysvm_location(struct amdgpu_device *adev,
>  				       struct amdgpu_mc *mc)
>  {
>  	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
> @@ -228,8 +228,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
>  		mc->mc_vram_size = 0xFFC0000000ULL;
>  	}
>  	amdgpu_vram_location(adev, &adev->mc, base);
> -	adev->mc.gtt_base_align = 0;
> -	amdgpu_gtt_location(adev, mc);
> +	adev->mc.sysvm_base_align = 0;
> +	amdgpu_sysvm_location(adev, mc);
>  }
>  
>  static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
> @@ -321,8 +321,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
>  	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
>  	adev->mc.visible_vram_size = adev->mc.aper_size;
>  
> -	amdgpu_gart_set_defaults(adev);
> -	gmc_v6_0_vram_gtt_location(adev, &adev->mc);
> +	amdgpu_sysvm_set_defaults(adev);
> +	gmc_v6_0_vram_sysvm_location(adev, &adev->mc);
>  
>  	return 0;
>  }
> @@ -447,15 +447,15 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
>  	}
>  }
>  
> -static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
> +static int gmc_v6_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	int r, i;
>  
> -	if (adev->gart.robj == NULL) {
> +	if (adev->sysvm.robj == NULL) {
>  		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>  		return -EINVAL;
>  	}
> -	r = amdgpu_gart_table_vram_pin(adev);
> +	r = amdgpu_sysvm_table_vram_pin(adev);
>  	if (r)
>  		return r;
>  	/* Setup TLB control */
> @@ -482,9 +482,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>  	       (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
>  	       (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
>  	/* setup context0 */
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>  	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>  			(u32)(adev->dummy_page.addr >> 12));
>  	WREG32(mmVM_CONTEXT0_CNTL2, 0);
> @@ -508,10 +508,10 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>  	for (i = 1; i < 16; i++) {
>  		if (i < 8)
>  			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  		else
>  			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  	}
>  
>  	/* enable context1-15 */
> @@ -530,9 +530,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>  
>  	gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
>  	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
> -		 (unsigned)(adev->mc.gtt_size >> 20),
> -		 (unsigned long long)adev->gart.table_addr);
> -	adev->gart.ready = true;
> +		 (unsigned)(adev->mc.sysvm_size >> 20),
> +		 (unsigned long long)adev->sysvm.table_addr);
> +	adev->sysvm.ready = true;
>  	return 0;
>  }
>  
> @@ -540,19 +540,19 @@ static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
>  {
>  	int r;
>  
> -	if (adev->gart.robj) {
> +	if (adev->sysvm.robj) {
>  		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
>  		return 0;
>  	}
> -	r = amdgpu_gart_init(adev);
> +	r = amdgpu_sysvm_init(adev);
>  	if (r)
>  		return r;
> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
> -	adev->gart.gart_pte_flags = 0;
> -	return amdgpu_gart_table_vram_alloc(adev);
> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
> +	adev->sysvm.sysvm_pte_flags = 0;
> +	return amdgpu_sysvm_table_vram_alloc(adev);
>  }
>  
> -static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
> +static void gmc_v6_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	/*unsigned i;
>  
> @@ -582,13 +582,13 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
>  	WREG32(mmVM_L2_CNTL3,
>  	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
>  	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
> -	amdgpu_gart_table_vram_unpin(adev);
> +	amdgpu_sysvm_table_vram_unpin(adev);
>  }
>  
>  static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
>  {
> -	amdgpu_gart_table_vram_free(adev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_table_vram_free(adev);
> +	amdgpu_sysvm_fini(adev);
>  }
>  
>  static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
> @@ -762,7 +762,7 @@ static int gmc_v6_0_early_init(void *handle)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
> -	gmc_v6_0_set_gart_funcs(adev);
> +	gmc_v6_0_set_sysvm_funcs(adev);
>  	gmc_v6_0_set_irq_funcs(adev);
>  
>  	return 0;
> @@ -889,7 +889,7 @@ static int gmc_v6_0_hw_init(void *handle)
>  		}
>  	}
>  
> -	r = gmc_v6_0_gart_enable(adev);
> +	r = gmc_v6_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> @@ -901,7 +901,7 @@ static int gmc_v6_0_hw_fini(void *handle)
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
>  	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
> -	gmc_v6_0_gart_disable(adev);
> +	gmc_v6_0_sysvm_disable(adev);
>  
>  	return 0;
>  }
> @@ -1095,7 +1095,7 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
>  	.set_powergating_state = gmc_v6_0_set_powergating_state,
>  };
>  
> -static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
> +static const struct amdgpu_sysvm_funcs gmc_v6_0_sysvm_funcs = {
>  	.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
>  	.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
>  	.set_prt = gmc_v6_0_set_prt,
> @@ -1108,10 +1108,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
>  	.process = gmc_v6_0_process_interrupt,
>  };
>  
> -static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
> +static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev)
>  {
> -	if (adev->gart.gart_funcs == NULL)
> -		adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
> +	if (adev->sysvm.sysvm_funcs == NULL)
> +		adev->sysvm.sysvm_funcs = &gmc_v6_0_sysvm_funcs;
>  }
>  
>  static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> index 15f2c0f..2329bdb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> @@ -39,7 +39,7 @@
>  
>  #include "amdgpu_atombios.h"
>  
> -static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
> +static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev);
>  static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
>  static int gmc_v7_0_wait_for_idle(void *handle);
>  
> @@ -231,7 +231,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
> +static void gmc_v7_0_vram_sysvm_location(struct amdgpu_device *adev,
>  				       struct amdgpu_mc *mc)
>  {
>  	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
> @@ -244,8 +244,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
>  		mc->mc_vram_size = 0xFFC0000000ULL;
>  	}
>  	amdgpu_vram_location(adev, &adev->mc, base);
> -	adev->mc.gtt_base_align = 0;
> -	amdgpu_gtt_location(adev, mc);
> +	adev->mc.sysvm_base_align = 0;
> +	amdgpu_sysvm_location(adev, mc);
>  }
>  
>  /**
> @@ -373,8 +373,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
>  	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>  		adev->mc.visible_vram_size = adev->mc.real_vram_size;
>  
> -	amdgpu_gart_set_defaults(adev);
> -	gmc_v7_0_vram_gtt_location(adev, &adev->mc);
> +	amdgpu_sysvm_set_defaults(adev);
> +	gmc_v7_0_vram_sysvm_location(adev, &adev->mc);
>  
>  	return 0;
>  }
> @@ -536,7 +536,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
>  }
>  
>  /**
> - * gmc_v7_0_gart_enable - gart enable
> + * gmc_v7_0_sysvm_enable - gart enable
>   *
>   * @adev: amdgpu_device pointer
>   *
> @@ -546,16 +546,16 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
>   * and GPUVM for FSA64 clients (CIK).
>   * Returns 0 for success, errors for failure.
>   */
> -static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
> +static int gmc_v7_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	int r, i;
>  	u32 tmp;
>  
> -	if (adev->gart.robj == NULL) {
> +	if (adev->sysvm.robj == NULL) {
>  		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>  		return -EINVAL;
>  	}
> -	r = amdgpu_gart_table_vram_pin(adev);
> +	r = amdgpu_sysvm_table_vram_pin(adev);
>  	if (r)
>  		return r;
>  	/* Setup TLB control */
> @@ -585,9 +585,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>  	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
>  	WREG32(mmVM_L2_CNTL3, tmp);
>  	/* setup context0 */
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>  	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>  			(u32)(adev->dummy_page.addr >> 12));
>  	WREG32(mmVM_CONTEXT0_CNTL2, 0);
> @@ -611,10 +611,10 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>  	for (i = 1; i < 16; i++) {
>  		if (i < 8)
>  			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  		else
>  			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  	}
>  
>  	/* enable context1-15 */
> @@ -640,9 +640,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>  
>  	gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
>  	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
> -		 (unsigned)(adev->mc.gtt_size >> 20),
> -		 (unsigned long long)adev->gart.table_addr);
> -	adev->gart.ready = true;
> +		 (unsigned)(adev->mc.sysvm_size >> 20),
> +		 (unsigned long long)adev->sysvm.table_addr);
> +	adev->sysvm.ready = true;
>  	return 0;
>  }
>  
> @@ -650,27 +650,27 @@ static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
>  {
>  	int r;
>  
> -	if (adev->gart.robj) {
> +	if (adev->sysvm.robj) {
>  		WARN(1, "R600 PCIE GART already initialized\n");
>  		return 0;
>  	}
>  	/* Initialize common gart structure */
> -	r = amdgpu_gart_init(adev);
> +	r = amdgpu_sysvm_init(adev);
>  	if (r)
>  		return r;
> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
> -	adev->gart.gart_pte_flags = 0;
> -	return amdgpu_gart_table_vram_alloc(adev);
> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
> +	adev->sysvm.sysvm_pte_flags = 0;
> +	return amdgpu_sysvm_table_vram_alloc(adev);
>  }
>  
>  /**
> - * gmc_v7_0_gart_disable - gart disable
> + * gmc_v7_0_sysvm_disable - gart disable
>   *
>   * @adev: amdgpu_device pointer
>   *
>   * This disables all VM page table (CIK).
>   */
> -static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
> +static void gmc_v7_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	u32 tmp;
>  
> @@ -688,7 +688,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>  	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
>  	WREG32(mmVM_L2_CNTL, tmp);
>  	WREG32(mmVM_L2_CNTL2, 0);
> -	amdgpu_gart_table_vram_unpin(adev);
> +	amdgpu_sysvm_table_vram_unpin(adev);
>  }
>  
>  /**
> @@ -700,8 +700,8 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>   */
>  static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
>  {
> -	amdgpu_gart_table_vram_free(adev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_table_vram_free(adev);
> +	amdgpu_sysvm_fini(adev);
>  }
>  
>  /**
> @@ -885,7 +885,7 @@ static int gmc_v7_0_early_init(void *handle)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
> -	gmc_v7_0_set_gart_funcs(adev);
> +	gmc_v7_0_set_sysvm_funcs(adev);
>  	gmc_v7_0_set_irq_funcs(adev);
>  
>  	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
> @@ -1036,7 +1036,7 @@ static int gmc_v7_0_hw_init(void *handle)
>  		}
>  	}
>  
> -	r = gmc_v7_0_gart_enable(adev);
> +	r = gmc_v7_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> @@ -1048,7 +1048,7 @@ static int gmc_v7_0_hw_fini(void *handle)
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
>  	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
> -	gmc_v7_0_gart_disable(adev);
> +	gmc_v7_0_sysvm_disable(adev);
>  
>  	return 0;
>  }
> @@ -1270,7 +1270,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
>  	.set_powergating_state = gmc_v7_0_set_powergating_state,
>  };
>  
> -static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
> +static const struct amdgpu_sysvm_funcs gmc_v7_0_sysvm_funcs = {
>  	.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
>  	.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
>  	.set_prt = gmc_v7_0_set_prt,
> @@ -1283,10 +1283,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
>  	.process = gmc_v7_0_process_interrupt,
>  };
>  
> -static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
> +static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev)
>  {
> -	if (adev->gart.gart_funcs == NULL)
> -		adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
> +	if (adev->sysvm.sysvm_funcs == NULL)
> +		adev->sysvm.sysvm_funcs = &gmc_v7_0_sysvm_funcs;
>  }
>  
>  static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> index 213af65..cf8f8d2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> @@ -41,7 +41,7 @@
>  #include "amdgpu_atombios.h"
>  
>  
> -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
> +static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev);
>  static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
>  static int gmc_v8_0_wait_for_idle(void *handle);
>  
> @@ -393,7 +393,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
> +static void gmc_v8_0_vram_sysvm_location(struct amdgpu_device *adev,
>  				       struct amdgpu_mc *mc)
>  {
>  	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
> @@ -406,8 +406,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
>  		mc->mc_vram_size = 0xFFC0000000ULL;
>  	}
>  	amdgpu_vram_location(adev, &adev->mc, base);
> -	adev->mc.gtt_base_align = 0;
> -	amdgpu_gtt_location(adev, mc);
> +	adev->mc.sysvm_base_align = 0;
> +	amdgpu_sysvm_location(adev, mc);
>  }
>  
>  /**
> @@ -535,8 +535,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
>  	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>  		adev->mc.visible_vram_size = adev->mc.real_vram_size;
>  
> -	amdgpu_gart_set_defaults(adev);
> -	gmc_v8_0_vram_gtt_location(adev, &adev->mc);
> +	amdgpu_sysvm_set_defaults(adev);
> +	gmc_v8_0_vram_sysvm_location(adev, &adev->mc);
>  
>  	return 0;
>  }
> @@ -722,7 +722,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
>  }
>  
>  /**
> - * gmc_v8_0_gart_enable - gart enable
> + * gmc_v8_0_sysvm_enable - gart enable
>   *
>   * @adev: amdgpu_device pointer
>   *
> @@ -732,16 +732,16 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
>   * and GPUVM for FSA64 clients (CIK).
>   * Returns 0 for success, errors for failure.
>   */
> -static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
> +static int gmc_v8_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	int r, i;
>  	u32 tmp;
>  
> -	if (adev->gart.robj == NULL) {
> +	if (adev->sysvm.robj == NULL) {
>  		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>  		return -EINVAL;
>  	}
> -	r = amdgpu_gart_table_vram_pin(adev);
> +	r = amdgpu_sysvm_table_vram_pin(adev);
>  	if (r)
>  		return r;
>  	/* Setup TLB control */
> @@ -787,9 +787,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>  	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
>  	WREG32(mmVM_L2_CNTL4, tmp);
>  	/* setup context0 */
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>  	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>  			(u32)(adev->dummy_page.addr >> 12));
>  	WREG32(mmVM_CONTEXT0_CNTL2, 0);
> @@ -813,10 +813,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>  	for (i = 1; i < 16; i++) {
>  		if (i < 8)
>  			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  		else
>  			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  	}
>  
>  	/* enable context1-15 */
> @@ -843,9 +843,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>  
>  	gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
>  	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
> -		 (unsigned)(adev->mc.gtt_size >> 20),
> -		 (unsigned long long)adev->gart.table_addr);
> -	adev->gart.ready = true;
> +		 (unsigned)(adev->mc.sysvm_size >> 20),
> +		 (unsigned long long)adev->sysvm.table_addr);
> +	adev->sysvm.ready = true;
>  	return 0;
>  }
>  
> @@ -853,27 +853,27 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
>  {
>  	int r;
>  
> -	if (adev->gart.robj) {
> +	if (adev->sysvm.robj) {
>  		WARN(1, "R600 PCIE GART already initialized\n");
>  		return 0;
>  	}
>  	/* Initialize common gart structure */
> -	r = amdgpu_gart_init(adev);
> +	r = amdgpu_sysvm_init(adev);
>  	if (r)
>  		return r;
> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
> -	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
> -	return amdgpu_gart_table_vram_alloc(adev);
> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
> +	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_EXECUTABLE;
> +	return amdgpu_sysvm_table_vram_alloc(adev);
>  }
>  
>  /**
> - * gmc_v8_0_gart_disable - gart disable
> + * gmc_v8_0_sysvm_disable - gart disable
>   *
>   * @adev: amdgpu_device pointer
>   *
>   * This disables all VM page table (CIK).
>   */
> -static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
> +static void gmc_v8_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	u32 tmp;
>  
> @@ -891,7 +891,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>  	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
>  	WREG32(mmVM_L2_CNTL, tmp);
>  	WREG32(mmVM_L2_CNTL2, 0);
> -	amdgpu_gart_table_vram_unpin(adev);
> +	amdgpu_sysvm_table_vram_unpin(adev);
>  }
>  
>  /**
> @@ -903,8 +903,8 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>   */
>  static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
>  {
> -	amdgpu_gart_table_vram_free(adev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_table_vram_free(adev);
> +	amdgpu_sysvm_fini(adev);
>  }
>  
>  /**
> @@ -962,7 +962,7 @@ static int gmc_v8_0_early_init(void *handle)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
> -	gmc_v8_0_set_gart_funcs(adev);
> +	gmc_v8_0_set_sysvm_funcs(adev);
>  	gmc_v8_0_set_irq_funcs(adev);
>  
>  	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
> @@ -1128,7 +1128,7 @@ static int gmc_v8_0_hw_init(void *handle)
>  		}
>  	}
>  
> -	r = gmc_v8_0_gart_enable(adev);
> +	r = gmc_v8_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> @@ -1140,7 +1140,7 @@ static int gmc_v8_0_hw_fini(void *handle)
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
>  	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
> -	gmc_v8_0_gart_disable(adev);
> +	gmc_v8_0_sysvm_disable(adev);
>  
>  	return 0;
>  }
> @@ -1590,7 +1590,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
>  	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
>  };
>  
> -static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
> +static const struct amdgpu_sysvm_funcs gmc_v8_0_sysvm_funcs = {
>  	.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
>  	.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
>  	.set_prt = gmc_v8_0_set_prt,
> @@ -1603,10 +1603,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
>  	.process = gmc_v8_0_process_interrupt,
>  };
>  
> -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
> +static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev)
>  {
> -	if (adev->gart.gart_funcs == NULL)
> -		adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
> +	if (adev->sysvm.sysvm_funcs == NULL)
> +		adev->sysvm.sysvm_funcs = &gmc_v8_0_sysvm_funcs;
>  }
>  
>  static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index dbb43d9..f067465 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -365,7 +365,7 @@ static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
>  	return addr;
>  }
>  
> -static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
> +static const struct amdgpu_sysvm_funcs gmc_v9_0_sysvm_funcs = {
>  	.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
>  	.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
>  	.get_invalidate_req = gmc_v9_0_get_invalidate_req,
> @@ -373,17 +373,17 @@ static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
>  	.get_vm_pde = gmc_v9_0_get_vm_pde
>  };
>  
> -static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
> +static void gmc_v9_0_set_sysvm_funcs(struct amdgpu_device *adev)
>  {
> -	if (adev->gart.gart_funcs == NULL)
> -		adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
> +	if (adev->sysvm.sysvm_funcs == NULL)
> +		adev->sysvm.sysvm_funcs = &gmc_v9_0_sysvm_funcs;
>  }
>  
>  static int gmc_v9_0_early_init(void *handle)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
> -	gmc_v9_0_set_gart_funcs(adev);
> +	gmc_v9_0_set_sysvm_funcs(adev);
>  	gmc_v9_0_set_irq_funcs(adev);
>  
>  	return 0;
> @@ -412,15 +412,15 @@ static int gmc_v9_0_late_init(void *handle)
>  	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
>  }
>  
> -static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
> +static void gmc_v9_0_vram_sysvm_location(struct amdgpu_device *adev,
>  					struct amdgpu_mc *mc)
>  {
>  	u64 base = 0;
>  	if (!amdgpu_sriov_vf(adev))
>  		base = mmhub_v1_0_get_fb_location(adev);
>  	amdgpu_vram_location(adev, &adev->mc, base);
> -	adev->mc.gtt_base_align = 0;
> -	amdgpu_gtt_location(adev, mc);
> +	adev->mc.sysvm_base_align = 0;
> +	amdgpu_sysvm_location(adev, mc);
>  	/* base offset of vram pages */
>  	if (adev->flags & AMD_IS_APU)
>  		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
> @@ -494,8 +494,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
>  	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>  		adev->mc.visible_vram_size = adev->mc.real_vram_size;
>  
> -	amdgpu_gart_set_defaults(adev);
> -	gmc_v9_0_vram_gtt_location(adev, &adev->mc);
> +	amdgpu_sysvm_set_defaults(adev);
> +	gmc_v9_0_vram_sysvm_location(adev, &adev->mc);
>  
>  	return 0;
>  }
> @@ -504,18 +504,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
>  {
>  	int r;
>  
> -	if (adev->gart.robj) {
> +	if (adev->sysvm.robj) {
>  		WARN(1, "VEGA10 PCIE GART already initialized\n");
>  		return 0;
>  	}
>  	/* Initialize common gart structure */
> -	r = amdgpu_gart_init(adev);
> +	r = amdgpu_sysvm_init(adev);
>  	if (r)
>  		return r;
> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
> -	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
> +	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
>  				 AMDGPU_PTE_EXECUTABLE;
> -	return amdgpu_gart_table_vram_alloc(adev);
> +	return amdgpu_sysvm_table_vram_alloc(adev);
>  }
>  
>  static int gmc_v9_0_sw_init(void *handle)
> @@ -640,8 +640,8 @@ static int gmc_v9_0_sw_init(void *handle)
>   */
>  static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
>  {
> -	amdgpu_gart_table_vram_free(adev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_table_vram_free(adev);
> +	amdgpu_sysvm_fini(adev);
>  }
>  
>  static int gmc_v9_0_sw_fini(void *handle)
> @@ -669,11 +669,11 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
>  }
>  
>  /**
> - * gmc_v9_0_gart_enable - gart enable
> + * gmc_v9_0_sysvm_enable - gart enable
>   *
>   * @adev: amdgpu_device pointer
>   */
> -static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
> +static int gmc_v9_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	int r;
>  	bool value;
> @@ -683,11 +683,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>  		golden_settings_vega10_hdp,
>  		(const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
>  
> -	if (adev->gart.robj == NULL) {
> +	if (adev->sysvm.robj == NULL) {
>  		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>  		return -EINVAL;
>  	}
> -	r = amdgpu_gart_table_vram_pin(adev);
> +	r = amdgpu_sysvm_table_vram_pin(adev);
>  	if (r)
>  		return r;
>  
> @@ -706,11 +706,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>  		break;
>  	}
>  
> -	r = gfxhub_v1_0_gart_enable(adev);
> +	r = gfxhub_v1_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> -	r = mmhub_v1_0_gart_enable(adev);
> +	r = mmhub_v1_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> @@ -733,9 +733,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>  	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
>  
>  	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
> -		 (unsigned)(adev->mc.gtt_size >> 20),
> -		 (unsigned long long)adev->gart.table_addr);
> -	adev->gart.ready = true;
> +		 (unsigned)(adev->mc.sysvm_size >> 20),
> +		 (unsigned long long)adev->sysvm.table_addr);
> +	adev->sysvm.ready = true;
>  	return 0;
>  }
>  
> @@ -747,23 +747,23 @@ static int gmc_v9_0_hw_init(void *handle)
>  	/* The sequence of these two function calls matters.*/
>  	gmc_v9_0_init_golden_registers(adev);
>  
> -	r = gmc_v9_0_gart_enable(adev);
> +	r = gmc_v9_0_sysvm_enable(adev);
>  
>  	return r;
>  }
>  
>  /**
> - * gmc_v9_0_gart_disable - gart disable
> + * gmc_v9_0_sysvm_disable - gart disable
>   *
>   * @adev: amdgpu_device pointer
>   *
>   * This disables all VM page table.
>   */
> -static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
> +static void gmc_v9_0_sysvm_disable(struct amdgpu_device *adev)
>  {
> -	gfxhub_v1_0_gart_disable(adev);
> -	mmhub_v1_0_gart_disable(adev);
> -	amdgpu_gart_table_vram_unpin(adev);
> +	gfxhub_v1_0_sysvm_disable(adev);
> +	mmhub_v1_0_sysvm_disable(adev);
> +	amdgpu_sysvm_table_vram_unpin(adev);
>  }
>  
>  static int gmc_v9_0_hw_fini(void *handle)
> @@ -777,7 +777,7 @@ static int gmc_v9_0_hw_fini(void *handle)
>  	}
>  
>  	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
> -	gmc_v9_0_gart_disable(adev);
> +	gmc_v9_0_sysvm_disable(adev);
>  
>  	return 0;
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
> index 9804318..fbc8f6e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
> @@ -51,8 +51,8 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
>  {
>  	uint64_t value;
>  
> -	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
> -	value = adev->gart.table_addr - adev->mc.vram_start +
> +	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
> +	value = adev->sysvm.table_addr - adev->mc.vram_start +
>  		adev->vm_manager.vram_base_offset;
>  	value &= 0x0000FFFFFFFFF000ULL;
>  	value |= 0x1; /* valid bit */
> @@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
>  	mmhub_v1_0_init_gart_pt_regs(adev);
>  
>  	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
> -		     (u32)(adev->mc.gtt_start >> 12));
> +		     (u32)(adev->mc.sysvm_start >> 12));
>  	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
> -		     (u32)(adev->mc.gtt_start >> 44));
> +		     (u32)(adev->mc.sysvm_start >> 44));
>  
>  	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
> -		     (u32)(adev->mc.gtt_end >> 12));
> +		     (u32)(adev->mc.sysvm_end >> 12));
>  	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
> -		     (u32)(adev->mc.gtt_end >> 44));
> +		     (u32)(adev->mc.sysvm_end >> 44));
>  }
>  
>  static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
> @@ -462,7 +462,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
>  	}
>  }
>  
> -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
> +int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	if (amdgpu_sriov_vf(adev)) {
>  		/*
> @@ -490,7 +490,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
> +void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	u32 tmp;
>  	u32 i;
> diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
> index 57bb940..23128e5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
> +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
> @@ -24,8 +24,8 @@
>  #define __MMHUB_V1_0_H__
>  
>  u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
> -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
> -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
> +int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
> +void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
>  void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
>  					 bool value);
>  void mmhub_v1_0_init(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index 4a65697..056b169 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -1121,10 +1121,10 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  					 unsigned vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> index 987b958..95913fd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> @@ -1312,11 +1312,11 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  					unsigned vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	uint32_t data0, data1, mask;
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
> @@ -1353,10 +1353,10 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  			 unsigned int vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
> diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
> index 1ecd6bb..b869423 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
> @@ -952,10 +952,10 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
>  			 unsigned int vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> index 21e7b88..2ca49af 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> @@ -878,11 +878,11 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  					unsigned vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	uint32_t data0, data1, mask;
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
> @@ -1010,10 +1010,10 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  			 unsigned int vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
> -- 
> 2.7.4
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
  2017-07-04  8:09       ` Huang Rui
@ 2017-07-04  8:46         ` Christian König
  0 siblings, 0 replies; 30+ messages in thread
From: Christian König @ 2017-07-04  8:46 UTC (permalink / raw)
  To: Huang Rui; +Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 04.07.2017 um 10:09 schrieb Huang Rui:
> On Mon, Jul 03, 2017 at 11:44:32AM +0200, Christian König wrote:
>> From: Christian König <christian.koenig@amd.com>
>>
>> Just mass rename all names related to the hardware GART/GTT functions to SYSVM.
>>
>> The name of symbols related to the TTM TT domain stay the same.
>>
>> This should improve the distinction between the two.
> Christian, we just rename the interface of GART_DEBUGFS, right?
> You know, amdgpu_gtt_mm and amdgpu_gtt are very useful when we are
> debugging and checking the content of gart memory. Is there any functional
> change with this patch set?

No, it's just the name. amdgpu_gtt_mm stays the same, but amdgpu_gtt is 
renamed to amdgpu_sysvm.

It's just debugfs, so that isn't a problem for upstreaming. But if it 
causes any issues with our tools I'm fine with keeping the name unchanged.

Regards,
Christian.

>
> Thanks,
> Ray
>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/Kconfig         |   9 +-
>>   drivers/gpu/drm/amd/amdgpu/Makefile        |   2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  58 ++--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  48 ++--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 423 -----------------------------
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |   2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c  | 423 +++++++++++++++++++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_test.c   |  84 +++---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  76 +++---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |   2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     |  30 +-
>>   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      |   4 +-
>>   drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   |  16 +-
>>   drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h   |   4 +-
>>   drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  66 ++---
>>   drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  70 ++---
>>   drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  70 ++---
>>   drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      |  66 ++---
>>   drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c    |  16 +-
>>   drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h    |   4 +-
>>   drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     |   4 +-
>>   drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c      |   8 +-
>>   drivers/gpu/drm/amd/amdgpu/vce_v4_0.c      |   4 +-
>>   drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c      |   8 +-
>>   24 files changed, 749 insertions(+), 748 deletions(-)
>>   delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
>>   create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
>> index e8af1f5..ebbac01 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/Kconfig
>> +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
>> @@ -31,14 +31,15 @@ config DRM_AMDGPU_USERPTR
>>   	  This option selects CONFIG_MMU_NOTIFIER if it isn't already
>>   	  selected to enabled full userptr support.
>>   
>> -config DRM_AMDGPU_GART_DEBUGFS
>> -	bool "Allow GART access through debugfs"
>> +config DRM_AMDGPU_SYSVM_DEBUGFS
>> +	bool "Allow SYSVM access through debugfs"
>>   	depends on DRM_AMDGPU
>>   	depends on DEBUG_FS
>>   	default n
>>   	help
>> -	  Selecting this option creates a debugfs file to inspect the mapped
>> -	  pages. Uses more memory for housekeeping, enable only for debugging.
>> +	  Selecting this option creates a debugfs file to inspect the SYSVM
>> +	  mapped pages. Uses more memory for housekeeping, enable only for
>> +	  debugging.
>>   
>>   source "drivers/gpu/drm/amd/acp/Kconfig"
>>   source "drivers/gpu/drm/amd/display/Kconfig"
>> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
>> index 3661110..d80d49f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
>> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
>> @@ -22,7 +22,7 @@ amdgpu-y := amdgpu_drv.o
>>   # add KMS driver
>>   amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>>   	amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
>> -	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
>> +	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_sysvm.o \
>>   	amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
>>   	amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
>>   	amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index 4a2b33d..abe191f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -305,7 +305,7 @@ struct amdgpu_vm_pte_funcs {
>>   };
>>   
>>   /* provided by the gmc block */
>> -struct amdgpu_gart_funcs {
>> +struct amdgpu_sysvm_funcs {
>>   	/* flush the vm tlb via mmio */
>>   	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
>>   			      uint32_t vmid);
>> @@ -543,39 +543,39 @@ struct amdgpu_mc;
>>   #define AMDGPU_GPU_PAGE_SHIFT 12
>>   #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
>>   
>> -struct amdgpu_gart {
>> +struct amdgpu_sysvm {
>>   	dma_addr_t			table_addr;
>>   	struct amdgpu_bo		*robj;
>>   	void				*ptr;
>>   	unsigned			num_gpu_pages;
>>   	unsigned			num_cpu_pages;
>>   	unsigned			table_size;
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>>   	struct page			**pages;
>>   #endif
>>   	bool				ready;
>>   
>>   	/* Asic default pte flags */
>> -	uint64_t			gart_pte_flags;
>> +	uint64_t			sysvm_pte_flags;
>>   
>> -	const struct amdgpu_gart_funcs *gart_funcs;
>> +	const struct amdgpu_sysvm_funcs *sysvm_funcs;
>>   };
>>   
>> -void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
>> -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
>> -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
>> -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
>> -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
>> -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
>> -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
>> -int amdgpu_gart_init(struct amdgpu_device *adev);
>> -void amdgpu_gart_fini(struct amdgpu_device *adev);
>> -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
>> +void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
>> +void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
>> +void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
>> +void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_init(struct amdgpu_device *adev);
>> +void amdgpu_sysvm_fini(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
>>   			int pages);
>> -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
>> +int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
>>   		    int pages, dma_addr_t *dma_addr, uint64_t flags,
>>   		    void *dst);
>> -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
>> +int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
>>   		     int pages, struct page **pagelist,
>>   		     dma_addr_t *dma_addr, uint64_t flags);
>>   int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
>> @@ -604,15 +604,15 @@ struct amdgpu_mc {
>>   	 * about vram size near mc fb location */
>>   	u64			mc_vram_size;
>>   	u64			visible_vram_size;
>> -	u64			gtt_size;
>> -	u64			gtt_start;
>> -	u64			gtt_end;
>> +	u64			sysvm_size;
>> +	u64			sysvm_start;
>> +	u64			sysvm_end;
>>   	u64			vram_start;
>>   	u64			vram_end;
>>   	unsigned		vram_width;
>>   	u64			real_vram_size;
>>   	int			vram_mtrr;
>> -	u64                     gtt_base_align;
>> +	u64                     sysvm_base_align;
>>   	u64                     mc_mask;
>>   	const struct firmware   *fw;	/* MC firmware */
>>   	uint32_t                fw_version;
>> @@ -1575,7 +1575,7 @@ struct amdgpu_device {
>>   
>>   	/* MC */
>>   	struct amdgpu_mc		mc;
>> -	struct amdgpu_gart		gart;
>> +	struct amdgpu_sysvm		sysvm;
>>   	struct amdgpu_dummy_page	dummy_page;
>>   	struct amdgpu_vm_manager	vm_manager;
>>   	struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
>> @@ -1686,8 +1686,8 @@ struct amdgpu_device {
>>   	struct list_head                shadow_list;
>>   	struct mutex                    shadow_list_lock;
>>   	/* link all gtt */
>> -	spinlock_t			gtt_list_lock;
>> -	struct list_head                gtt_list;
>> +	spinlock_t			sysvm_list_lock;
>> +	struct list_head                sysvm_list;
>>   	/* keep an lru list of rings by HW IP */
>>   	struct list_head		ring_lru_list;
>>   	spinlock_t			ring_lru_list_lock;
>> @@ -1835,13 +1835,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
>>   #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
>>   #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
>>   #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
>> -#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
>> -#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
>> -#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
>> +#define amdgpu_sysvm_flush_gpu_tlb(adev, vmid) (adev)->sysvm.sysvm_funcs->flush_gpu_tlb((adev), (vmid))
>> +#define amdgpu_sysvm_set_pte_pde(adev, pt, idx, addr, flags) (adev)->sysvm.sysvm_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
>> +#define amdgpu_sysvm_get_vm_pde(adev, addr) (adev)->sysvm.sysvm_funcs->get_vm_pde((adev), (addr))
>>   #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
>>   #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
>>   #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
>> -#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
>> +#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->sysvm.sysvm_funcs->get_vm_pte_flags((adev),(flags))
>>   #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
>>   #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
>>   #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
>> @@ -1908,7 +1908,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
>>   uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
>>   				 struct ttm_mem_reg *mem);
>>   void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
>> -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>>   void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
>>   int amdgpu_ttm_init(struct amdgpu_device *adev);
>>   void amdgpu_ttm_fini(struct amdgpu_device *adev);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 5b1220f..46a82d3 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -667,39 +667,39 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
>>   }
>>   
>>   /**
>> - * amdgpu_gtt_location - try to find GTT location
>> + * amdgpu_sysvm_location - try to find SYSVM location
>>    * @adev: amdgpu device structure holding all necessary informations
>>    * @mc: memory controller structure holding memory informations
>>    *
>> - * Function will place try to place GTT before or after VRAM.
>> + * Function will place try to place SYSVM before or after VRAM.
>>    *
>> - * If GTT size is bigger than space left then we ajust GTT size.
>> + * If SYSVM size is bigger than space left then we ajust SYSVM size.
>>    * Thus function will never fails.
>>    *
>> - * FIXME: when reducing GTT size align new size on power of 2.
>> + * FIXME: when reducing SYSVM size align new size on power of 2.
>>    */
>> -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
>> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
>>   {
>>   	u64 size_af, size_bf;
>>   
>> -	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
>> -	size_bf = mc->vram_start & ~mc->gtt_base_align;
>> +	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) & ~mc->sysvm_base_align;
>> +	size_bf = mc->vram_start & ~mc->sysvm_base_align;
>>   	if (size_bf > size_af) {
>> -		if (mc->gtt_size > size_bf) {
>> -			dev_warn(adev->dev, "limiting GTT\n");
>> -			mc->gtt_size = size_bf;
>> +		if (mc->sysvm_size > size_bf) {
>> +			dev_warn(adev->dev, "limiting SYSVM\n");
>> +			mc->sysvm_size = size_bf;
>>   		}
>> -		mc->gtt_start = 0;
>> +		mc->sysvm_start = 0;
>>   	} else {
>> -		if (mc->gtt_size > size_af) {
>> -			dev_warn(adev->dev, "limiting GTT\n");
>> -			mc->gtt_size = size_af;
>> +		if (mc->sysvm_size > size_af) {
>> +			dev_warn(adev->dev, "limiting SYSVM\n");
>> +			mc->sysvm_size = size_af;
>>   		}
>> -		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
>> +		mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) & ~mc->sysvm_base_align;
>>   	}
>> -	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
>> -	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
>> -			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
>> +	mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
>> +	dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
>> +			mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
>>   }
>>   
>>   /*
>> @@ -1663,12 +1663,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
>>   
>>   static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
>>   {
>> -	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
>> +	memcpy(adev->reset_magic, adev->sysvm.ptr, AMDGPU_RESET_MAGIC_NUM);
>>   }
>>   
>>   static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
>>   {
>> -	return !!memcmp(adev->gart.ptr, adev->reset_magic,
>> +	return !!memcmp(adev->sysvm.ptr, adev->reset_magic,
>>   			AMDGPU_RESET_MAGIC_NUM);
>>   }
>>   
>> @@ -2052,14 +2052,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>   	adev->flags = flags;
>>   	adev->asic_type = flags & AMD_ASIC_MASK;
>>   	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
>> -	adev->mc.gtt_size = 512 * 1024 * 1024;
>> +	adev->mc.sysvm_size = 512 * 1024 * 1024;
>>   	adev->accel_working = false;
>>   	adev->num_rings = 0;
>>   	adev->mman.buffer_funcs = NULL;
>>   	adev->mman.buffer_funcs_ring = NULL;
>>   	adev->vm_manager.vm_pte_funcs = NULL;
>>   	adev->vm_manager.vm_pte_num_rings = 0;
>> -	adev->gart.gart_funcs = NULL;
>> +	adev->sysvm.sysvm_funcs = NULL;
>>   	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
>>   
>>   	adev->smc_rreg = &amdgpu_invalid_rreg;
>> @@ -2110,8 +2110,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>   	INIT_LIST_HEAD(&adev->shadow_list);
>>   	mutex_init(&adev->shadow_list_lock);
>>   
>> -	INIT_LIST_HEAD(&adev->gtt_list);
>> -	spin_lock_init(&adev->gtt_list_lock);
>> +	INIT_LIST_HEAD(&adev->sysvm_list);
>> +	spin_lock_init(&adev->sysvm_list_lock);
>>   
>>   	INIT_LIST_HEAD(&adev->ring_lru_list);
>>   	spin_lock_init(&adev->ring_lru_list_lock);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
>> deleted file mode 100644
>> index c808388..0000000
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
>> +++ /dev/null
>> @@ -1,423 +0,0 @@
>> -/*
>> - * Copyright 2008 Advanced Micro Devices, Inc.
>> - * Copyright 2008 Red Hat Inc.
>> - * Copyright 2009 Jerome Glisse.
>> - *
>> - * Permission is hereby granted, free of charge, to any person obtaining a
>> - * copy of this software and associated documentation files (the "Software"),
>> - * to deal in the Software without restriction, including without limitation
>> - * the rights to use, copy, modify, merge, publish, distribute, sublicense,
>> - * and/or sell copies of the Software, and to permit persons to whom the
>> - * Software is furnished to do so, subject to the following conditions:
>> - *
>> - * The above copyright notice and this permission notice shall be included in
>> - * all copies or substantial portions of the Software.
>> - *
>> - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>> - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
>> - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
>> - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
>> - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
>> - * OTHER DEALINGS IN THE SOFTWARE.
>> - *
>> - * Authors: Dave Airlie
>> - *          Alex Deucher
>> - *          Jerome Glisse
>> - */
>> -#include <drm/drmP.h>
>> -#include <drm/amdgpu_drm.h>
>> -#include "amdgpu.h"
>> -
>> -/*
>> - * GART
>> - * The GART (Graphics Aperture Remapping Table) is an aperture
>> - * in the GPU's address space.  System pages can be mapped into
>> - * the aperture and look like contiguous pages from the GPU's
>> - * perspective.  A page table maps the pages in the aperture
>> - * to the actual backing pages in system memory.
>> - *
>> - * Radeon GPUs support both an internal GART, as described above,
>> - * and AGP.  AGP works similarly, but the GART table is configured
>> - * and maintained by the northbridge rather than the driver.
>> - * Radeon hw has a separate AGP aperture that is programmed to
>> - * point to the AGP aperture provided by the northbridge and the
>> - * requests are passed through to the northbridge aperture.
>> - * Both AGP and internal GART can be used at the same time, however
>> - * that is not currently supported by the driver.
>> - *
>> - * This file handles the common internal GART management.
>> - */
>> -
>> -/*
>> - * Common GART table functions.
>> - */
>> -
>> -/**
>> - * amdgpu_gart_set_defaults - set the default gtt_size
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Set the default gtt_size based on parameters and available VRAM.
>> - */
>> -void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
>> -{
>> -	/* unless the user had overridden it, set the gart
>> -	 * size equal to the 1024 or vram, whichever is larger.
>> -	 */
>> -	if (amdgpu_gart_size == -1)
>> -		adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
>> -					adev->mc.mc_vram_size);
>> -	else
>> -		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Allocate system memory for GART page table
>> - * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
>> - * gart table to be in system memory.
>> - * Returns 0 for success, -ENOMEM for failure.
>> - */
>> -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
>> -{
>> -	void *ptr;
>> -
>> -	ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
>> -				   &adev->gart.table_addr);
>> -	if (ptr == NULL) {
>> -		return -ENOMEM;
>> -	}
>> -#ifdef CONFIG_X86
>> -	if (0) {
>> -		set_memory_uc((unsigned long)ptr,
>> -			      adev->gart.table_size >> PAGE_SHIFT);
>> -	}
>> -#endif
>> -	adev->gart.ptr = ptr;
>> -	memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
>> -	return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_ram_free - free system ram for gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Free system memory for GART page table
>> - * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
>> - * gart table to be in system memory.
>> - */
>> -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
>> -{
>> -	if (adev->gart.ptr == NULL) {
>> -		return;
>> -	}
>> -#ifdef CONFIG_X86
>> -	if (0) {
>> -		set_memory_wb((unsigned long)adev->gart.ptr,
>> -			      adev->gart.table_size >> PAGE_SHIFT);
>> -	}
>> -#endif
>> -	pci_free_consistent(adev->pdev, adev->gart.table_size,
>> -			    (void *)adev->gart.ptr,
>> -			    adev->gart.table_addr);
>> -	adev->gart.ptr = NULL;
>> -	adev->gart.table_addr = 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Allocate video memory for GART page table
>> - * (pcie r4xx, r5xx+).  These asics require the
>> - * gart table to be in video memory.
>> - * Returns 0 for success, error for failure.
>> - */
>> -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
>> -{
>> -	int r;
>> -
>> -	if (adev->gart.robj == NULL) {
>> -		r = amdgpu_bo_create(adev, adev->gart.table_size,
>> -				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
>> -				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
>> -				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
>> -				     NULL, NULL, &adev->gart.robj);
>> -		if (r) {
>> -			return r;
>> -		}
>> -	}
>> -	return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_vram_pin - pin gart page table in vram
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Pin the GART page table in vram so it will not be moved
>> - * by the memory manager (pcie r4xx, r5xx+).  These asics require the
>> - * gart table to be in video memory.
>> - * Returns 0 for success, error for failure.
>> - */
>> -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
>> -{
>> -	uint64_t gpu_addr;
>> -	int r;
>> -
>> -	r = amdgpu_bo_reserve(adev->gart.robj, false);
>> -	if (unlikely(r != 0))
>> -		return r;
>> -	r = amdgpu_bo_pin(adev->gart.robj,
>> -				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
>> -	if (r) {
>> -		amdgpu_bo_unreserve(adev->gart.robj);
>> -		return r;
>> -	}
>> -	r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
>> -	if (r)
>> -		amdgpu_bo_unpin(adev->gart.robj);
>> -	amdgpu_bo_unreserve(adev->gart.robj);
>> -	adev->gart.table_addr = gpu_addr;
>> -	return r;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Unpin the GART page table in vram (pcie r4xx, r5xx+).
>> - * These asics require the gart table to be in video memory.
>> - */
>> -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
>> -{
>> -	int r;
>> -
>> -	if (adev->gart.robj == NULL) {
>> -		return;
>> -	}
>> -	r = amdgpu_bo_reserve(adev->gart.robj, true);
>> -	if (likely(r == 0)) {
>> -		amdgpu_bo_kunmap(adev->gart.robj);
>> -		amdgpu_bo_unpin(adev->gart.robj);
>> -		amdgpu_bo_unreserve(adev->gart.robj);
>> -		adev->gart.ptr = NULL;
>> -	}
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_vram_free - free gart page table vram
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Free the video memory used for the GART page table
>> - * (pcie r4xx, r5xx+).  These asics require the gart table to
>> - * be in video memory.
>> - */
>> -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
>> -{
>> -	if (adev->gart.robj == NULL) {
>> -		return;
>> -	}
>> -	amdgpu_bo_unref(&adev->gart.robj);
>> -}
>> -
>> -/*
>> - * Common gart functions.
>> - */
>> -/**
>> - * amdgpu_gart_unbind - unbind pages from the gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - * @offset: offset into the GPU's gart aperture
>> - * @pages: number of pages to unbind
>> - *
>> - * Unbinds the requested pages from the gart page table and
>> - * replaces them with the dummy page (all asics).
>> - * Returns 0 for success, -EINVAL for failure.
>> - */
>> -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
>> -			int pages)
>> -{
>> -	unsigned t;
>> -	unsigned p;
>> -	int i, j;
>> -	u64 page_base;
>> -	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
>> -	uint64_t flags = 0;
>> -
>> -	if (!adev->gart.ready) {
>> -		WARN(1, "trying to unbind memory from uninitialized GART !\n");
>> -		return -EINVAL;
>> -	}
>> -
>> -	t = offset / AMDGPU_GPU_PAGE_SIZE;
>> -	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
>> -	for (i = 0; i < pages; i++, p++) {
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -		adev->gart.pages[p] = NULL;
>> -#endif
>> -		page_base = adev->dummy_page.addr;
>> -		if (!adev->gart.ptr)
>> -			continue;
>> -
>> -		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
>> -			amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
>> -						t, page_base, flags);
>> -			page_base += AMDGPU_GPU_PAGE_SIZE;
>> -		}
>> -	}
>> -	mb();
>> -	amdgpu_gart_flush_gpu_tlb(adev, 0);
>> -	return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_map - map dma_addresses into GART entries
>> - *
>> - * @adev: amdgpu_device pointer
>> - * @offset: offset into the GPU's gart aperture
>> - * @pages: number of pages to bind
>> - * @dma_addr: DMA addresses of pages
>> - *
>> - * Map the dma_addresses into GART entries (all asics).
>> - * Returns 0 for success, -EINVAL for failure.
>> - */
>> -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
>> -		    int pages, dma_addr_t *dma_addr, uint64_t flags,
>> -		    void *dst)
>> -{
>> -	uint64_t page_base;
>> -	unsigned i, j, t;
>> -
>> -	if (!adev->gart.ready) {
>> -		WARN(1, "trying to bind memory to uninitialized GART !\n");
>> -		return -EINVAL;
>> -	}
>> -
>> -	t = offset / AMDGPU_GPU_PAGE_SIZE;
>> -
>> -	for (i = 0; i < pages; i++) {
>> -		page_base = dma_addr[i];
>> -		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
>> -			amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
>> -			page_base += AMDGPU_GPU_PAGE_SIZE;
>> -		}
>> -	}
>> -	return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_bind - bind pages into the gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - * @offset: offset into the GPU's gart aperture
>> - * @pages: number of pages to bind
>> - * @pagelist: pages to bind
>> - * @dma_addr: DMA addresses of pages
>> - *
>> - * Binds the requested pages to the gart page table
>> - * (all asics).
>> - * Returns 0 for success, -EINVAL for failure.
>> - */
>> -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
>> -		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
>> -		     uint64_t flags)
>> -{
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -	unsigned i,t,p;
>> -#endif
>> -	int r;
>> -
>> -	if (!adev->gart.ready) {
>> -		WARN(1, "trying to bind memory to uninitialized GART !\n");
>> -		return -EINVAL;
>> -	}
>> -
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -	t = offset / AMDGPU_GPU_PAGE_SIZE;
>> -	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
>> -	for (i = 0; i < pages; i++, p++)
>> -		adev->gart.pages[p] = pagelist[i];
>> -#endif
>> -
>> -	if (adev->gart.ptr) {
>> -		r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
>> -			    adev->gart.ptr);
>> -		if (r)
>> -			return r;
>> -	}
>> -
>> -	mb();
>> -	amdgpu_gart_flush_gpu_tlb(adev, 0);
>> -	return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_init - init the driver info for managing the gart
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Allocate the dummy page and init the gart driver info (all asics).
>> - * Returns 0 for success, error for failure.
>> - */
>> -int amdgpu_gart_init(struct amdgpu_device *adev)
>> -{
>> -	int r;
>> -
>> -	if (adev->dummy_page.page)
>> -		return 0;
>> -
>> -	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
>> -	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
>> -		DRM_ERROR("Page size is smaller than GPU page size!\n");
>> -		return -EINVAL;
>> -	}
>> -	r = amdgpu_dummy_page_init(adev);
>> -	if (r)
>> -		return r;
>> -	/* Compute table size */
>> -	adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
>> -	adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
>> -	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
>> -		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
>> -
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -	/* Allocate pages table */
>> -	adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
>> -	if (adev->gart.pages == NULL) {
>> -		amdgpu_gart_fini(adev);
>> -		return -ENOMEM;
>> -	}
>> -#endif
>> -
>> -	return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_fini - tear down the driver info for managing the gart
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Tear down the gart driver info and free the dummy page (all asics).
>> - */
>> -void amdgpu_gart_fini(struct amdgpu_device *adev)
>> -{
>> -	if (adev->gart.ready) {
>> -		/* unbind pages */
>> -		amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
>> -	}
>> -	adev->gart.ready = false;
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -	vfree(adev->gart.pages);
>> -	adev->gart.pages = NULL;
>> -#endif
>> -	amdgpu_dummy_page_fini(adev);
>> -}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> index 4510627..73a1c64 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> @@ -82,7 +82,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
>>   	if (r)
>>   		kfree(*job);
>>   	else
>> -		(*job)->vm_pd_addr = adev->gart.table_addr;
>> +		(*job)->vm_pd_addr = adev->sysvm.table_addr;
>>   
>>   	return r;
>>   }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
>> new file mode 100644
>> index 0000000..50fc8d7
>> --- /dev/null
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
>> @@ -0,0 +1,423 @@
>> +/*
>> + * Copyright 2008 Advanced Micro Devices, Inc.
>> + * Copyright 2008 Red Hat Inc.
>> + * Copyright 2009 Jerome Glisse.
>> + *
>> + * Permission is hereby granted, free of charge, to any person obtaining a
>> + * copy of this software and associated documentation files (the "Software"),
>> + * to deal in the Software without restriction, including without limitation
>> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
>> + * and/or sell copies of the Software, and to permit persons to whom the
>> + * Software is furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice shall be included in
>> + * all copies or substantial portions of the Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
>> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
>> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
>> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
>> + * OTHER DEALINGS IN THE SOFTWARE.
>> + *
>> + * Authors: Dave Airlie
>> + *          Alex Deucher
>> + *          Jerome Glisse
>> + */
>> +#include <drm/drmP.h>
>> +#include <drm/amdgpu_drm.h>
>> +#include "amdgpu.h"
>> +
>> +/*
>> + * SYSVM
>> + * The system VM (previously called GART) is an aperture
>> + * in the GPU's address space.  System pages can be mapped into
>> + * the aperture and look like contiguous pages from the GPU's
>> + * perspective.  A page table maps the pages in the aperture
>> + * to the actual backing pages in system memory.
>> + *
>> + * Radeon GPUs support both an internal SYSVM based GART, as described above,
>> + * and AGP.  AGP works similarly, but the GART table is configured
>> + * and maintained by the northbridge rather than the driver.
>> + * Radeon hw has a separate AGP aperture that is programmed to
>> + * point to the AGP aperture provided by the northbridge and the
>> + * requests are passed through to the northbridge aperture.
>> + * Both AGP and internal GART can be used at the same time, however
>> + * that is not currently supported by the driver.
>> + *
>> + * This file handles the common internal SYSVM management.
>> + */
>> +
>> +/*
>> + * Common SYSVM table functions.
>> + */
>> +
>> +/**
>> + * amdgpu_sysvm_set_defaults - set the default sysvm_size
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Set the default sysvm_size based on parameters and available VRAM.
>> + */
>> +void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
>> +{
>> +	/* unless the user had overridden it, set the gart
>> +	 * size equal to the 1024 or vram, whichever is larger.
>> +	 */
>> +	if (amdgpu_gart_size == -1)
>> +		adev->mc.sysvm_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
>> +					adev->mc.mc_vram_size);
>> +	else
>> +		adev->mc.sysvm_size = (uint64_t)amdgpu_gart_size << 20;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_ram_alloc - allocate system ram for gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Allocate system memory for SYSVM page table
>> + * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
>> + * gart table to be in system memory.
>> + * Returns 0 for success, -ENOMEM for failure.
>> + */
>> +int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev)
>> +{
>> +	void *ptr;
>> +
>> +	ptr = pci_alloc_consistent(adev->pdev, adev->sysvm.table_size,
>> +				   &adev->sysvm.table_addr);
>> +	if (ptr == NULL) {
>> +		return -ENOMEM;
>> +	}
>> +#ifdef CONFIG_X86
>> +	if (0) {
>> +		set_memory_uc((unsigned long)ptr,
>> +			      adev->sysvm.table_size >> PAGE_SHIFT);
>> +	}
>> +#endif
>> +	adev->sysvm.ptr = ptr;
>> +	memset((void *)adev->sysvm.ptr, 0, adev->sysvm.table_size);
>> +	return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_ram_free - free system ram for gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Free system memory for SYSVM page table
>> + * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
>> + * gart table to be in system memory.
>> + */
>> +void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev)
>> +{
>> +	if (adev->sysvm.ptr == NULL) {
>> +		return;
>> +	}
>> +#ifdef CONFIG_X86
>> +	if (0) {
>> +		set_memory_wb((unsigned long)adev->sysvm.ptr,
>> +			      adev->sysvm.table_size >> PAGE_SHIFT);
>> +	}
>> +#endif
>> +	pci_free_consistent(adev->pdev, adev->sysvm.table_size,
>> +			    (void *)adev->sysvm.ptr,
>> +			    adev->sysvm.table_addr);
>> +	adev->sysvm.ptr = NULL;
>> +	adev->sysvm.table_addr = 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_vram_alloc - allocate vram for gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Allocate video memory for SYSVM page table
>> + * (pcie r4xx, r5xx+).  These asics require the
>> + * gart table to be in video memory.
>> + * Returns 0 for success, error for failure.
>> + */
>> +int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev)
>> +{
>> +	int r;
>> +
>> +	if (adev->sysvm.robj == NULL) {
>> +		r = amdgpu_bo_create(adev, adev->sysvm.table_size,
>> +				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
>> +				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
>> +				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
>> +				     NULL, NULL, &adev->sysvm.robj);
>> +		if (r) {
>> +			return r;
>> +		}
>> +	}
>> +	return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_vram_pin - pin gart page table in vram
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Pin the SYSVM page table in vram so it will not be moved
>> + * by the memory manager (pcie r4xx, r5xx+).  These asics require the
>> + * gart table to be in video memory.
>> + * Returns 0 for success, error for failure.
>> + */
>> +int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev)
>> +{
>> +	uint64_t gpu_addr;
>> +	int r;
>> +
>> +	r = amdgpu_bo_reserve(adev->sysvm.robj, false);
>> +	if (unlikely(r != 0))
>> +		return r;
>> +	r = amdgpu_bo_pin(adev->sysvm.robj,
>> +				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
>> +	if (r) {
>> +		amdgpu_bo_unreserve(adev->sysvm.robj);
>> +		return r;
>> +	}
>> +	r = amdgpu_bo_kmap(adev->sysvm.robj, &adev->sysvm.ptr);
>> +	if (r)
>> +		amdgpu_bo_unpin(adev->sysvm.robj);
>> +	amdgpu_bo_unreserve(adev->sysvm.robj);
>> +	adev->sysvm.table_addr = gpu_addr;
>> +	return r;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_vram_unpin - unpin gart page table in vram
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Unpin the GART page table in vram (pcie r4xx, r5xx+).
>> + * These asics require the gart table to be in video memory.
>> + */
>> +void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev)
>> +{
>> +	int r;
>> +
>> +	if (adev->sysvm.robj == NULL) {
>> +		return;
>> +	}
>> +	r = amdgpu_bo_reserve(adev->sysvm.robj, true);
>> +	if (likely(r == 0)) {
>> +		amdgpu_bo_kunmap(adev->sysvm.robj);
>> +		amdgpu_bo_unpin(adev->sysvm.robj);
>> +		amdgpu_bo_unreserve(adev->sysvm.robj);
>> +		adev->sysvm.ptr = NULL;
>> +	}
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_vram_free - free gart page table vram
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Free the video memory used for the GART page table
>> + * (pcie r4xx, r5xx+).  These asics require the gart table to
>> + * be in video memory.
>> + */
>> +void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev)
>> +{
>> +	if (adev->sysvm.robj == NULL) {
>> +		return;
>> +	}
>> +	amdgpu_bo_unref(&adev->sysvm.robj);
>> +}
>> +
>> +/*
>> + * Common gart functions.
>> + */
>> +/**
>> + * amdgpu_sysvm_unbind - unbind pages from the gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + * @offset: offset into the GPU's gart aperture
>> + * @pages: number of pages to unbind
>> + *
>> + * Unbinds the requested pages from the gart page table and
>> + * replaces them with the dummy page (all asics).
>> + * Returns 0 for success, -EINVAL for failure.
>> + */
>> +int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
>> +			int pages)
>> +{
>> +	unsigned t;
>> +	unsigned p;
>> +	int i, j;
>> +	u64 page_base;
>> +	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
>> +	uint64_t flags = 0;
>> +
>> +	if (!adev->sysvm.ready) {
>> +		WARN(1, "trying to unbind memory from uninitialized GART !\n");
>> +		return -EINVAL;
>> +	}
>> +
>> +	t = offset / AMDGPU_GPU_PAGE_SIZE;
>> +	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
>> +	for (i = 0; i < pages; i++, p++) {
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +		adev->sysvm.pages[p] = NULL;
>> +#endif
>> +		page_base = adev->dummy_page.addr;
>> +		if (!adev->sysvm.ptr)
>> +			continue;
>> +
>> +		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
>> +			amdgpu_sysvm_set_pte_pde(adev, adev->sysvm.ptr,
>> +						t, page_base, flags);
>> +			page_base += AMDGPU_GPU_PAGE_SIZE;
>> +		}
>> +	}
>> +	mb();
>> +	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
>> +	return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_map - map dma_addresses into GART entries
>> + *
>> + * @adev: amdgpu_device pointer
>> + * @offset: offset into the GPU's gart aperture
>> + * @pages: number of pages to bind
>> + * @dma_addr: DMA addresses of pages
>> + *
>> + * Map the dma_addresses into GART entries (all asics).
>> + * Returns 0 for success, -EINVAL for failure.
>> + */
>> +int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
>> +		    int pages, dma_addr_t *dma_addr, uint64_t flags,
>> +		    void *dst)
>> +{
>> +	uint64_t page_base;
>> +	unsigned i, j, t;
>> +
>> +	if (!adev->sysvm.ready) {
>> +		WARN(1, "trying to bind memory to uninitialized GART !\n");
>> +		return -EINVAL;
>> +	}
>> +
>> +	t = offset / AMDGPU_GPU_PAGE_SIZE;
>> +
>> +	for (i = 0; i < pages; i++) {
>> +		page_base = dma_addr[i];
>> +		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
>> +			amdgpu_sysvm_set_pte_pde(adev, dst, t, page_base, flags);
>> +			page_base += AMDGPU_GPU_PAGE_SIZE;
>> +		}
>> +	}
>> +	return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_bind - bind pages into the gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + * @offset: offset into the GPU's gart aperture
>> + * @pages: number of pages to bind
>> + * @pagelist: pages to bind
>> + * @dma_addr: DMA addresses of pages
>> + *
>> + * Binds the requested pages to the gart page table
>> + * (all asics).
>> + * Returns 0 for success, -EINVAL for failure.
>> + */
>> +int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
>> +		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
>> +		     uint64_t flags)
>> +{
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +	unsigned i,t,p;
>> +#endif
>> +	int r;
>> +
>> +	if (!adev->sysvm.ready) {
>> +		WARN(1, "trying to bind memory to uninitialized GART !\n");
>> +		return -EINVAL;
>> +	}
>> +
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +	t = offset / AMDGPU_GPU_PAGE_SIZE;
>> +	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
>> +	for (i = 0; i < pages; i++, p++)
>> +		adev->sysvm.pages[p] = pagelist[i];
>> +#endif
>> +
>> +	if (adev->sysvm.ptr) {
>> +		r = amdgpu_sysvm_map(adev, offset, pages, dma_addr, flags,
>> +			    adev->sysvm.ptr);
>> +		if (r)
>> +			return r;
>> +	}
>> +
>> +	mb();
>> +	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
>> +	return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_init - init the driver info for managing the gart
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Allocate the dummy page and init the gart driver info (all asics).
>> + * Returns 0 for success, error for failure.
>> + */
>> +int amdgpu_sysvm_init(struct amdgpu_device *adev)
>> +{
>> +	int r;
>> +
>> +	if (adev->dummy_page.page)
>> +		return 0;
>> +
>> +	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
>> +	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
>> +		DRM_ERROR("Page size is smaller than GPU page size!\n");
>> +		return -EINVAL;
>> +	}
>> +	r = amdgpu_dummy_page_init(adev);
>> +	if (r)
>> +		return r;
>> +	/* Compute table size */
>> +	adev->sysvm.num_cpu_pages = adev->mc.sysvm_size / PAGE_SIZE;
>> +	adev->sysvm.num_gpu_pages = adev->mc.sysvm_size / AMDGPU_GPU_PAGE_SIZE;
>> +	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
>> +		 adev->sysvm.num_cpu_pages, adev->sysvm.num_gpu_pages);
>> +
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +	/* Allocate pages table */
>> +	adev->sysvm.pages = vzalloc(sizeof(void *) * adev->sysvm.num_cpu_pages);
>> +	if (adev->sysvm.pages == NULL) {
>> +		amdgpu_sysvm_fini(adev);
>> +		return -ENOMEM;
>> +	}
>> +#endif
>> +
>> +	return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_fini - tear down the driver info for managing the gart
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Tear down the gart driver info and free the dummy page (all asics).
>> + */
>> +void amdgpu_sysvm_fini(struct amdgpu_device *adev)
>> +{
>> +	if (adev->sysvm.ready) {
>> +		/* unbind pages */
>> +		amdgpu_sysvm_unbind(adev, 0, adev->sysvm.num_cpu_pages);
>> +	}
>> +	adev->sysvm.ready = false;
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +	vfree(adev->sysvm.pages);
>> +	adev->sysvm.pages = NULL;
>> +#endif
>> +	amdgpu_dummy_page_fini(adev);
>> +}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
>> index d02e611..651712e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
>> @@ -32,8 +32,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>   {
>>   	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
>>   	struct amdgpu_bo *vram_obj = NULL;
>> -	struct amdgpu_bo **gtt_obj = NULL;
>> -	uint64_t gtt_addr, vram_addr;
>> +	struct amdgpu_bo **sysvm_obj = NULL;
>> +	uint64_t sysvm_addr, vram_addr;
>>   	unsigned n, size;
>>   	int i, r;
>>   
>> @@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>   	/* Number of tests =
>>   	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
>>   	 */
>> -	n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
>> +	n = adev->mc.sysvm_size - AMDGPU_IB_POOL_SIZE*64*1024;
>>   	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
>>   		if (adev->rings[i])
>>   			n -= adev->rings[i]->ring_size;
>> @@ -52,8 +52,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>   		n -= adev->irq.ih.ring_size;
>>   	n /= size;
>>   
>> -	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
>> -	if (!gtt_obj) {
>> +	sysvm_obj = kzalloc(n * sizeof(*sysvm_obj), GFP_KERNEL);
>> +	if (!sysvm_obj) {
>>   		DRM_ERROR("Failed to allocate %d pointers\n", n);
>>   		r = 1;
>>   		goto out_cleanup;
>> @@ -75,42 +75,42 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>   		goto out_unres;
>>   	}
>>   	for (i = 0; i < n; i++) {
>> -		void *gtt_map, *vram_map;
>> -		void **gtt_start, **gtt_end;
>> +		void *sysvm_map, *vram_map;
>> +		void **sysvm_start, **sysvm_end;
>>   		void **vram_start, **vram_end;
>>   		struct dma_fence *fence = NULL;
>>   
>>   		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
>>   				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
>> -				     NULL, gtt_obj + i);
>> +				     NULL, sysvm_obj + i);
>>   		if (r) {
>>   			DRM_ERROR("Failed to create GTT object %d\n", i);
>>   			goto out_lclean;
>>   		}
>>   
>> -		r = amdgpu_bo_reserve(gtt_obj[i], false);
>> +		r = amdgpu_bo_reserve(sysvm_obj[i], false);
>>   		if (unlikely(r != 0))
>>   			goto out_lclean_unref;
>> -		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
>> +		r = amdgpu_bo_pin(sysvm_obj[i], AMDGPU_GEM_DOMAIN_GTT, &sysvm_addr);
>>   		if (r) {
>>   			DRM_ERROR("Failed to pin GTT object %d\n", i);
>>   			goto out_lclean_unres;
>>   		}
>>   
>> -		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
>> +		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
>>   		if (r) {
>>   			DRM_ERROR("Failed to map GTT object %d\n", i);
>>   			goto out_lclean_unpin;
>>   		}
>>   
>> -		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
>> -		     gtt_start < gtt_end;
>> -		     gtt_start++)
>> -			*gtt_start = gtt_start;
>> +		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size;
>> +		     sysvm_start < sysvm_end;
>> +		     sysvm_start++)
>> +			*sysvm_start = sysvm_start;
>>   
>> -		amdgpu_bo_kunmap(gtt_obj[i]);
>> +		amdgpu_bo_kunmap(sysvm_obj[i]);
>>   
>> -		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
>> +		r = amdgpu_copy_buffer(ring, sysvm_addr, vram_addr,
>>   				       size, NULL, &fence, false, false);
>>   
>>   		if (r) {
>> @@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>   			goto out_lclean_unpin;
>>   		}
>>   
>> -		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
>> +		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
>>   		     vram_start = vram_map, vram_end = vram_map + size;
>>   		     vram_start < vram_end;
>> -		     gtt_start++, vram_start++) {
>> -			if (*vram_start != gtt_start) {
>> +		     sysvm_start++, vram_start++) {
>> +			if (*vram_start != sysvm_start) {
>>   				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
>>   					  "expected 0x%p (GTT/VRAM offset "
>>   					  "0x%16llx/0x%16llx)\n",
>> -					  i, *vram_start, gtt_start,
>> +					  i, *vram_start, sysvm_start,
>>   					  (unsigned long long)
>> -					  (gtt_addr - adev->mc.gtt_start +
>> -					   (void*)gtt_start - gtt_map),
>> +					  (sysvm_addr - adev->mc.sysvm_start +
>> +					   (void*)sysvm_start - sysvm_map),
>>   					  (unsigned long long)
>>   					  (vram_addr - adev->mc.vram_start +
>> -					   (void*)gtt_start - gtt_map));
>> +					   (void*)sysvm_start - sysvm_map));
>>   				amdgpu_bo_kunmap(vram_obj);
>>   				goto out_lclean_unpin;
>>   			}
>> @@ -155,7 +155,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>   
>>   		amdgpu_bo_kunmap(vram_obj);
>>   
>> -		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
>> +		r = amdgpu_copy_buffer(ring, vram_addr, sysvm_addr,
>>   				       size, NULL, &fence, false, false);
>>   
>>   		if (r) {
>> @@ -171,49 +171,49 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>   
>>   		dma_fence_put(fence);
>>   
>> -		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
>> +		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
>>   		if (r) {
>>   			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
>>   			goto out_lclean_unpin;
>>   		}
>>   
>> -		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
>> +		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
>>   		     vram_start = vram_map, vram_end = vram_map + size;
>> -		     gtt_start < gtt_end;
>> -		     gtt_start++, vram_start++) {
>> -			if (*gtt_start != vram_start) {
>> +		     sysvm_start < sysvm_end;
>> +		     sysvm_start++, vram_start++) {
>> +			if (*sysvm_start != vram_start) {
>>   				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
>>   					  "expected 0x%p (VRAM/GTT offset "
>>   					  "0x%16llx/0x%16llx)\n",
>> -					  i, *gtt_start, vram_start,
>> +					  i, *sysvm_start, vram_start,
>>   					  (unsigned long long)
>>   					  (vram_addr - adev->mc.vram_start +
>>   					   (void*)vram_start - vram_map),
>>   					  (unsigned long long)
>> -					  (gtt_addr - adev->mc.gtt_start +
>> +					  (sysvm_addr - adev->mc.sysvm_start +
>>   					   (void*)vram_start - vram_map));
>> -				amdgpu_bo_kunmap(gtt_obj[i]);
>> +				amdgpu_bo_kunmap(sysvm_obj[i]);
>>   				goto out_lclean_unpin;
>>   			}
>>   		}
>>   
>> -		amdgpu_bo_kunmap(gtt_obj[i]);
>> +		amdgpu_bo_kunmap(sysvm_obj[i]);
>>   
>>   		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
>> -			 gtt_addr - adev->mc.gtt_start);
>> +			 sysvm_addr - adev->mc.sysvm_start);
>>   		continue;
>>   
>>   out_lclean_unpin:
>> -		amdgpu_bo_unpin(gtt_obj[i]);
>> +		amdgpu_bo_unpin(sysvm_obj[i]);
>>   out_lclean_unres:
>> -		amdgpu_bo_unreserve(gtt_obj[i]);
>> +		amdgpu_bo_unreserve(sysvm_obj[i]);
>>   out_lclean_unref:
>> -		amdgpu_bo_unref(&gtt_obj[i]);
>> +		amdgpu_bo_unref(&sysvm_obj[i]);
>>   out_lclean:
>>   		for (--i; i >= 0; --i) {
>> -			amdgpu_bo_unpin(gtt_obj[i]);
>> -			amdgpu_bo_unreserve(gtt_obj[i]);
>> -			amdgpu_bo_unref(&gtt_obj[i]);
>> +			amdgpu_bo_unpin(sysvm_obj[i]);
>> +			amdgpu_bo_unreserve(sysvm_obj[i]);
>> +			amdgpu_bo_unref(&sysvm_obj[i]);
>>   		}
>>   		if (fence)
>>   			dma_fence_put(fence);
>> @@ -226,7 +226,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>   out_unref:
>>   	amdgpu_bo_unref(&vram_obj);
>>   out_cleanup:
>> -	kfree(gtt_obj);
>> +	kfree(sysvm_obj);
>>   	if (r) {
>>   		pr_warn("Error while testing BO move\n");
>>   	}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index 5c7a6c5..9240357 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -102,7 +102,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
>>   		goto error_bo;
>>   	}
>>   
>> -	mutex_init(&adev->mman.gtt_window_lock);
>> +	mutex_init(&adev->mman.sysvm_window_lock);
>>   
>>   	ring = adev->mman.buffer_funcs_ring;
>>   	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
>> @@ -130,7 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
>>   	if (adev->mman.mem_global_referenced) {
>>   		amd_sched_entity_fini(adev->mman.entity.sched,
>>   				      &adev->mman.entity);
>> -		mutex_destroy(&adev->mman.gtt_window_lock);
>> +		mutex_destroy(&adev->mman.sysvm_window_lock);
>>   		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
>>   		drm_global_item_unref(&adev->mman.mem_global_ref);
>>   		adev->mman.mem_global_referenced = false;
>> @@ -158,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
>>   		break;
>>   	case TTM_PL_TT:
>>   		man->func = &amdgpu_gtt_mgr_func;
>> -		man->gpu_offset = adev->mc.gtt_start;
>> +		man->gpu_offset = adev->mc.sysvm_start;
>>   		man->available_caching = TTM_PL_MASK_CACHING;
>>   		man->default_caching = TTM_PL_FLAG_CACHED;
>>   		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
>> @@ -289,7 +289,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>>   	new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
>>   
>>   	num_pages = new_mem->num_pages;
>> -	mutex_lock(&adev->mman.gtt_window_lock);
>> +	mutex_lock(&adev->mman.sysvm_window_lock);
>>   	while (num_pages) {
>>   		unsigned long cur_pages = min(min(old_size, new_size),
>>   					      (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
>> @@ -341,14 +341,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>>   			new_start += cur_pages * PAGE_SIZE;
>>   		}
>>   	}
>> -	mutex_unlock(&adev->mman.gtt_window_lock);
>> +	mutex_unlock(&adev->mman.sysvm_window_lock);
>>   
>>   	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
>>   	dma_fence_put(fence);
>>   	return r;
>>   
>>   error:
>> -	mutex_unlock(&adev->mman.gtt_window_lock);
>> +	mutex_unlock(&adev->mman.sysvm_window_lock);
>>   
>>   	if (fence)
>>   		dma_fence_wait(fence, false);
>> @@ -693,10 +693,10 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
>>   	uint64_t flags;
>>   	int r;
>>   
>> -	spin_lock(&gtt->adev->gtt_list_lock);
>> +	spin_lock(&gtt->adev->sysvm_list_lock);
>>   	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
>>   	gtt->offset = (u64)mem->start << PAGE_SHIFT;
>> -	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
>> +	r = amdgpu_sysvm_bind(gtt->adev, gtt->offset, ttm->num_pages,
>>   		ttm->pages, gtt->ttm.dma_address, flags);
>>   
>>   	if (r) {
>> @@ -705,9 +705,9 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
>>   		goto error_gart_bind;
>>   	}
>>   
>> -	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
>> +	list_add_tail(&gtt->list, &gtt->adev->sysvm_list);
>>   error_gart_bind:
>> -	spin_unlock(&gtt->adev->gtt_list_lock);
>> +	spin_unlock(&gtt->adev->sysvm_list_lock);
>>   	return r;
>>   
>>   }
>> @@ -774,20 +774,20 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
>>   	int r;
>>   
>>   	bo_mem.mem_type = TTM_PL_TT;
>> -	spin_lock(&adev->gtt_list_lock);
>> -	list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
>> +	spin_lock(&adev->sysvm_list_lock);
>> +	list_for_each_entry_safe(gtt, tmp, &adev->sysvm_list, list) {
>>   		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
>> -		r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
>> +		r = amdgpu_sysvm_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
>>   				     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
>>   				     flags);
>>   		if (r) {
>> -			spin_unlock(&adev->gtt_list_lock);
>> +			spin_unlock(&adev->sysvm_list_lock);
>>   			DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
>>   				  gtt->ttm.ttm.num_pages, gtt->offset);
>>   			return r;
>>   		}
>>   	}
>> -	spin_unlock(&adev->gtt_list_lock);
>> +	spin_unlock(&adev->sysvm_list_lock);
>>   	return 0;
>>   }
>>   
>> @@ -803,8 +803,8 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
>>   		return 0;
>>   
>>   	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
>> -	spin_lock(&gtt->adev->gtt_list_lock);
>> -	r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
>> +	spin_lock(&gtt->adev->sysvm_list_lock);
>> +	r = amdgpu_sysvm_unbind(gtt->adev, gtt->offset, ttm->num_pages);
>>   	if (r) {
>>   		DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
>>   			  gtt->ttm.ttm.num_pages, gtt->offset);
>> @@ -812,7 +812,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
>>   	}
>>   	list_del_init(&gtt->list);
>>   error_unbind:
>> -	spin_unlock(&gtt->adev->gtt_list_lock);
>> +	spin_unlock(&gtt->adev->sysvm_list_lock);
>>   	return r;
>>   }
>>   
>> @@ -1037,7 +1037,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
>>   			flags |= AMDGPU_PTE_SNOOPED;
>>   	}
>>   
>> -	flags |= adev->gart.gart_pte_flags;
>> +	flags |= adev->sysvm.sysvm_pte_flags;
>>   	flags |= AMDGPU_PTE_READABLE;
>>   
>>   	if (!amdgpu_ttm_tt_is_readonly(ttm))
>> @@ -1144,13 +1144,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
>>   	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
>>   		 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
>>   	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
>> -				adev->mc.gtt_size >> PAGE_SHIFT);
>> +				adev->mc.sysvm_size >> PAGE_SHIFT);
>>   	if (r) {
>>   		DRM_ERROR("Failed initializing GTT heap.\n");
>>   		return r;
>>   	}
>>   	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
>> -		 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
>> +		 (unsigned)(adev->mc.sysvm_size / (1024 * 1024)));
>>   
>>   	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
>>   	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
>> @@ -1223,7 +1223,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
>>   	if (adev->gds.oa.total_size)
>>   		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
>>   	ttm_bo_device_release(&adev->mman.bdev);
>> -	amdgpu_gart_fini(adev);
>> +	amdgpu_sysvm_fini(adev);
>>   	amdgpu_ttm_global_fini(adev);
>>   	adev->mman.initialized = false;
>>   	DRM_INFO("amdgpu: ttm finalized\n");
>> @@ -1279,7 +1279,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>>   	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
>>   	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
>>   
>> -	*addr = adev->mc.gtt_start;
>> +	*addr = adev->mc.sysvm_start;
>>   	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
>>   		AMDGPU_GPU_PAGE_SIZE;
>>   
>> @@ -1296,7 +1296,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>>   	src_addr = num_dw * 4;
>>   	src_addr += job->ibs[0].gpu_addr;
>>   
>> -	dst_addr = adev->gart.table_addr;
>> +	dst_addr = adev->sysvm.table_addr;
>>   	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
>>   	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
>>   				dst_addr, num_bytes);
>> @@ -1306,7 +1306,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>>   
>>   	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
>>   	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
>> -	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
>> +	r = amdgpu_sysvm_map(adev, 0, num_pages, dma_address, flags,
>>   			    &job->ibs[0].ptr[num_dw]);
>>   	if (r)
>>   		goto error_free;
>> @@ -1522,7 +1522,7 @@ static int ttm_pl_tt = TTM_PL_TT;
>>   
>>   static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
>>   	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
>> -	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
>> +	{"amdgpu_sysvm_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
>>   	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
>>   #ifdef CONFIG_SWIOTLB
>>   	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
>> @@ -1574,9 +1574,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
>>   	.llseek = default_llseek
>>   };
>>   
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>>   
>> -static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>> +static ssize_t amdgpu_ttm_sysvm_read(struct file *f, char __user *buf,
>>   				   size_t size, loff_t *pos)
>>   {
>>   	struct amdgpu_device *adev = file_inode(f)->i_private;
>> @@ -1590,16 +1590,16 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>>   		struct page *page;
>>   		void *ptr;
>>   
>> -		if (p >= adev->gart.num_cpu_pages)
>> +		if (p >= adev->sysvm.num_cpu_pages)
>>   			return result;
>>   
>> -		page = adev->gart.pages[p];
>> +		page = adev->sysvm.pages[p];
>>   		if (page) {
>>   			ptr = kmap(page);
>>   			ptr += off;
>>   
>>   			r = copy_to_user(buf, ptr, cur_size);
>> -			kunmap(adev->gart.pages[p]);
>> +			kunmap(adev->sysvm.pages[p]);
>>   		} else
>>   			r = clear_user(buf, cur_size);
>>   
>> @@ -1615,9 +1615,9 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>>   	return result;
>>   }
>>   
>> -static const struct file_operations amdgpu_ttm_gtt_fops = {
>> +static const struct file_operations amdgpu_ttm_sysvm_fops = {
>>   	.owner = THIS_MODULE,
>> -	.read = amdgpu_ttm_gtt_read,
>> +	.read = amdgpu_ttm_sysvm_read,
>>   	.llseek = default_llseek
>>   };
>>   
>> @@ -1640,12 +1640,12 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
>>   	i_size_write(ent->d_inode, adev->mc.mc_vram_size);
>>   	adev->mman.vram = ent;
>>   
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -	ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
>> -				  adev, &amdgpu_ttm_gtt_fops);
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +	ent = debugfs_create_file("amdgpu_sysvm", S_IFREG | S_IRUGO, root,
>> +				  adev, &amdgpu_ttm_sysvm_fops);
>>   	if (IS_ERR(ent))
>>   		return PTR_ERR(ent);
>> -	i_size_write(ent->d_inode, adev->mc.gtt_size);
>> +	i_size_write(ent->d_inode, adev->mc.sysvm_size);
>>   	adev->mman.gtt = ent;
>>   
>>   #endif
>> @@ -1670,7 +1670,7 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
>>   	debugfs_remove(adev->mman.vram);
>>   	adev->mman.vram = NULL;
>>   
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>>   	debugfs_remove(adev->mman.gtt);
>>   	adev->mman.gtt = NULL;
>>   #endif
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> index 4f5c1da..1443038 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> @@ -53,7 +53,7 @@ struct amdgpu_mman {
>>   	const struct amdgpu_buffer_funcs	*buffer_funcs;
>>   	struct amdgpu_ring			*buffer_funcs_ring;
>>   
>> -	struct mutex				gtt_window_lock;
>> +	struct mutex				sysvm_window_lock;
>>   	/* Scheduler entity for buffer moves */
>>   	struct amd_sched_entity			entity;
>>   };
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index 1d1810d..8dbacec 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -982,14 +982,14 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
>>   		value = params->pages_addr ?
>>   			amdgpu_vm_map_gart(params->pages_addr, addr) :
>>   			addr;
>> -		amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
>> +		amdgpu_sysvm_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
>>   					i, value, flags);
>>   		addr += incr;
>>   	}
>>   
>>   	/* Flush HDP */
>>   	mb();
>> -	amdgpu_gart_flush_gpu_tlb(params->adev, 0);
>> +	amdgpu_sysvm_flush_gpu_tlb(params->adev, 0);
>>   }
>>   
>>   static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
>> @@ -1101,7 +1101,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
>>   		}
>>   
>>   		pt = amdgpu_bo_gpu_offset(bo);
>> -		pt = amdgpu_gart_get_vm_pde(adev, pt);
>> +		pt = amdgpu_sysvm_get_vm_pde(adev, pt);
>>   		if (parent->entries[pt_idx].addr == pt)
>>   			continue;
>>   
>> @@ -1564,7 +1564,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>    *
>>    * @adev: amdgpu_device pointer
>>    * @exclusive: fence we need to sync to
>> - * @gtt_flags: flags as they are used for GTT
>> + * @sysvm_flags: flags as they are used in the SYSVM
>>    * @pages_addr: DMA addresses to use for mapping
>>    * @vm: requested vm
>>    * @mapping: mapped range and flags to use for the update
>> @@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>    */
>>   static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
>>   				      struct dma_fence *exclusive,
>> -				      uint64_t gtt_flags,
>> +				      uint64_t sysvm_flags,
>>   				      dma_addr_t *pages_addr,
>>   				      struct amdgpu_vm *vm,
>>   				      struct amdgpu_bo_va_mapping *mapping,
>> @@ -1633,8 +1633,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
>>   		}
>>   
>>   		if (pages_addr) {
>> -			if (flags == gtt_flags)
>> -				src = adev->gart.table_addr +
>> +			if (flags == sysvm_flags)
>> +				src = adev->sysvm.table_addr +
>>   					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
>>   			else
>>   				max_entries = min(max_entries, 16ull * 1024ull);
>> @@ -1681,7 +1681,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>>   	struct amdgpu_vm *vm = bo_va->vm;
>>   	struct amdgpu_bo_va_mapping *mapping;
>>   	dma_addr_t *pages_addr = NULL;
>> -	uint64_t gtt_flags, flags;
>> +	uint64_t sysvm_flags, flags;
>>   	struct ttm_mem_reg *mem;
>>   	struct drm_mm_node *nodes;
>>   	struct dma_fence *exclusive;
>> @@ -1706,12 +1706,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>>   
>>   	if (bo_va->bo) {
>>   		flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
>> -		gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
>> +		sysvm_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
>>   			adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
>>   			flags : 0;
>>   	} else {
>>   		flags = 0x0;
>> -		gtt_flags = ~0x0;
>> +		sysvm_flags = ~0x0;
>>   	}
>>   
>>   	spin_lock(&vm->status_lock);
>> @@ -1721,7 +1721,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>>   
>>   	list_for_each_entry(mapping, &bo_va->invalids, list) {
>>   		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
>> -					       gtt_flags, pages_addr, vm,
>> +					       sysvm_flags, pages_addr, vm,
>>   					       mapping, flags, nodes,
>>   					       &bo_va->last_pt_update);
>>   		if (r)
>> @@ -1756,7 +1756,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
>>   
>>   	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
>>   	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
>> -	adev->gart.gart_funcs->set_prt(adev, enable);
>> +	adev->sysvm.sysvm_funcs->set_prt(adev, enable);
>>   	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
>>   }
>>   
>> @@ -1765,7 +1765,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
>>    */
>>   static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
>>   {
>> -	if (!adev->gart.gart_funcs->set_prt)
>> +	if (!adev->sysvm.sysvm_funcs->set_prt)
>>   		return;
>>   
>>   	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
>> @@ -1800,7 +1800,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
>>   {
>>   	struct amdgpu_prt_cb *cb;
>>   
>> -	if (!adev->gart.gart_funcs->set_prt)
>> +	if (!adev->sysvm.sysvm_funcs->set_prt)
>>   		return;
>>   
>>   	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
>> @@ -2498,7 +2498,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
>>   void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>>   {
>>   	struct amdgpu_bo_va_mapping *mapping, *tmp;
>> -	bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
>> +	bool prt_fini_needed = !!adev->sysvm.sysvm_funcs->set_prt;
>>   	int i;
>>   
>>   	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> index 6986285..708fb84 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> @@ -3731,10 +3731,10 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>   {
>>   	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>>   	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
>> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>   	unsigned eng = ring->vm_inv_eng;
>>   
>> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>   	pd_addr |= AMDGPU_PTE_VALID;
>>   
>>   	gfx_v9_0_write_data_to_reg(ring, usepfp, true,
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
>> index a42f483..1290434 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
>> @@ -40,8 +40,8 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
>>   {
>>   	uint64_t value;
>>   
>> -	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
>> -	value = adev->gart.table_addr - adev->mc.vram_start
>> +	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
>> +	value = adev->sysvm.table_addr - adev->mc.vram_start
>>   		+ adev->vm_manager.vram_base_offset;
>>   	value &= 0x0000FFFFFFFFF000ULL;
>>   	value |= 0x1; /*valid bit*/
>> @@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
>>   	gfxhub_v1_0_init_gart_pt_regs(adev);
>>   
>>   	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
>> -		     (u32)(adev->mc.gtt_start >> 12));
>> +		     (u32)(adev->mc.sysvm_start >> 12));
>>   	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
>> -		     (u32)(adev->mc.gtt_start >> 44));
>> +		     (u32)(adev->mc.sysvm_start >> 44));
>>   
>>   	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
>> -		     (u32)(adev->mc.gtt_end >> 12));
>> +		     (u32)(adev->mc.sysvm_end >> 12));
>>   	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
>> -		     (u32)(adev->mc.gtt_end >> 44));
>> +		     (u32)(adev->mc.sysvm_end >> 44));
>>   }
>>   
>>   static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
>> @@ -228,7 +228,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
>>   	}
>>   }
>>   
>> -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
>> +int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
>>   {
>>   	if (amdgpu_sriov_vf(adev)) {
>>   		/*
>> @@ -256,7 +256,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
>>   	return 0;
>>   }
>>   
>> -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
>> +void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
>>   {
>>   	u32 tmp;
>>   	u32 i;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
>> index d2dbb08..d194b7e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
>> @@ -24,8 +24,8 @@
>>   #ifndef __GFXHUB_V1_0_H__
>>   #define __GFXHUB_V1_0_H__
>>   
>> -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev);
>> -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev);
>> +int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
>> +void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
>>   void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
>>   					  bool value);
>>   void gfxhub_v1_0_init(struct amdgpu_device *adev);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> index 5ed6788f..53c3b8a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> @@ -36,7 +36,7 @@
>>   #include "dce/dce_6_0_sh_mask.h"
>>   #include "si_enums.h"
>>   
>> -static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
>> +static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev);
>>   static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
>>   static int gmc_v6_0_wait_for_idle(void *handle);
>>   
>> @@ -216,7 +216,7 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
>>   	return 0;
>>   }
>>   
>> -static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
>> +static void gmc_v6_0_vram_sysvm_location(struct amdgpu_device *adev,
>>   				       struct amdgpu_mc *mc)
>>   {
>>   	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
>> @@ -228,8 +228,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
>>   		mc->mc_vram_size = 0xFFC0000000ULL;
>>   	}
>>   	amdgpu_vram_location(adev, &adev->mc, base);
>> -	adev->mc.gtt_base_align = 0;
>> -	amdgpu_gtt_location(adev, mc);
>> +	adev->mc.sysvm_base_align = 0;
>> +	amdgpu_sysvm_location(adev, mc);
>>   }
>>   
>>   static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
>> @@ -321,8 +321,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
>>   	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
>>   	adev->mc.visible_vram_size = adev->mc.aper_size;
>>   
>> -	amdgpu_gart_set_defaults(adev);
>> -	gmc_v6_0_vram_gtt_location(adev, &adev->mc);
>> +	amdgpu_sysvm_set_defaults(adev);
>> +	gmc_v6_0_vram_sysvm_location(adev, &adev->mc);
>>   
>>   	return 0;
>>   }
>> @@ -447,15 +447,15 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
>>   	}
>>   }
>>   
>> -static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>> +static int gmc_v6_0_sysvm_enable(struct amdgpu_device *adev)
>>   {
>>   	int r, i;
>>   
>> -	if (adev->gart.robj == NULL) {
>> +	if (adev->sysvm.robj == NULL) {
>>   		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>>   		return -EINVAL;
>>   	}
>> -	r = amdgpu_gart_table_vram_pin(adev);
>> +	r = amdgpu_sysvm_table_vram_pin(adev);
>>   	if (r)
>>   		return r;
>>   	/* Setup TLB control */
>> @@ -482,9 +482,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>>   	       (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
>>   	       (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
>>   	/* setup context0 */
>> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
>> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
>> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
>> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
>> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
>> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>>   	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>>   			(u32)(adev->dummy_page.addr >> 12));
>>   	WREG32(mmVM_CONTEXT0_CNTL2, 0);
>> @@ -508,10 +508,10 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>>   	for (i = 1; i < 16; i++) {
>>   		if (i < 8)
>>   			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
>> -			       adev->gart.table_addr >> 12);
>> +			       adev->sysvm.table_addr >> 12);
>>   		else
>>   			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
>> -			       adev->gart.table_addr >> 12);
>> +			       adev->sysvm.table_addr >> 12);
>>   	}
>>   
>>   	/* enable context1-15 */
>> @@ -530,9 +530,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>>   
>>   	gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
>>   	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
>> -		 (unsigned)(adev->mc.gtt_size >> 20),
>> -		 (unsigned long long)adev->gart.table_addr);
>> -	adev->gart.ready = true;
>> +		 (unsigned)(adev->mc.sysvm_size >> 20),
>> +		 (unsigned long long)adev->sysvm.table_addr);
>> +	adev->sysvm.ready = true;
>>   	return 0;
>>   }
>>   
>> @@ -540,19 +540,19 @@ static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
>>   {
>>   	int r;
>>   
>> -	if (adev->gart.robj) {
>> +	if (adev->sysvm.robj) {
>>   		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
>>   		return 0;
>>   	}
>> -	r = amdgpu_gart_init(adev);
>> +	r = amdgpu_sysvm_init(adev);
>>   	if (r)
>>   		return r;
>> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
>> -	adev->gart.gart_pte_flags = 0;
>> -	return amdgpu_gart_table_vram_alloc(adev);
>> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
>> +	adev->sysvm.sysvm_pte_flags = 0;
>> +	return amdgpu_sysvm_table_vram_alloc(adev);
>>   }
>>   
>> -static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
>> +static void gmc_v6_0_sysvm_disable(struct amdgpu_device *adev)
>>   {
>>   	/*unsigned i;
>>   
>> @@ -582,13 +582,13 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
>>   	WREG32(mmVM_L2_CNTL3,
>>   	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
>>   	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
>> -	amdgpu_gart_table_vram_unpin(adev);
>> +	amdgpu_sysvm_table_vram_unpin(adev);
>>   }
>>   
>>   static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
>>   {
>> -	amdgpu_gart_table_vram_free(adev);
>> -	amdgpu_gart_fini(adev);
>> +	amdgpu_sysvm_table_vram_free(adev);
>> +	amdgpu_sysvm_fini(adev);
>>   }
>>   
>>   static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
>> @@ -762,7 +762,7 @@ static int gmc_v6_0_early_init(void *handle)
>>   {
>>   	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>   
>> -	gmc_v6_0_set_gart_funcs(adev);
>> +	gmc_v6_0_set_sysvm_funcs(adev);
>>   	gmc_v6_0_set_irq_funcs(adev);
>>   
>>   	return 0;
>> @@ -889,7 +889,7 @@ static int gmc_v6_0_hw_init(void *handle)
>>   		}
>>   	}
>>   
>> -	r = gmc_v6_0_gart_enable(adev);
>> +	r = gmc_v6_0_sysvm_enable(adev);
>>   	if (r)
>>   		return r;
>>   
>> @@ -901,7 +901,7 @@ static int gmc_v6_0_hw_fini(void *handle)
>>   	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>   
>>   	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
>> -	gmc_v6_0_gart_disable(adev);
>> +	gmc_v6_0_sysvm_disable(adev);
>>   
>>   	return 0;
>>   }
>> @@ -1095,7 +1095,7 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
>>   	.set_powergating_state = gmc_v6_0_set_powergating_state,
>>   };
>>   
>> -static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
>> +static const struct amdgpu_sysvm_funcs gmc_v6_0_sysvm_funcs = {
>>   	.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
>>   	.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
>>   	.set_prt = gmc_v6_0_set_prt,
>> @@ -1108,10 +1108,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
>>   	.process = gmc_v6_0_process_interrupt,
>>   };
>>   
>> -static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
>> +static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev)
>>   {
>> -	if (adev->gart.gart_funcs == NULL)
>> -		adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
>> +	if (adev->sysvm.sysvm_funcs == NULL)
>> +		adev->sysvm.sysvm_funcs = &gmc_v6_0_sysvm_funcs;
>>   }
>>   
>>   static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
>> index 15f2c0f..2329bdb 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
>> @@ -39,7 +39,7 @@
>>   
>>   #include "amdgpu_atombios.h"
>>   
>> -static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
>> +static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev);
>>   static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
>>   static int gmc_v7_0_wait_for_idle(void *handle);
>>   
>> @@ -231,7 +231,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
>>   	return 0;
>>   }
>>   
>> -static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
>> +static void gmc_v7_0_vram_sysvm_location(struct amdgpu_device *adev,
>>   				       struct amdgpu_mc *mc)
>>   {
>>   	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
>> @@ -244,8 +244,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
>>   		mc->mc_vram_size = 0xFFC0000000ULL;
>>   	}
>>   	amdgpu_vram_location(adev, &adev->mc, base);
>> -	adev->mc.gtt_base_align = 0;
>> -	amdgpu_gtt_location(adev, mc);
>> +	adev->mc.sysvm_base_align = 0;
>> +	amdgpu_sysvm_location(adev, mc);
>>   }
>>   
>>   /**
>> @@ -373,8 +373,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
>>   	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>>   		adev->mc.visible_vram_size = adev->mc.real_vram_size;
>>   
>> -	amdgpu_gart_set_defaults(adev);
>> -	gmc_v7_0_vram_gtt_location(adev, &adev->mc);
>> +	amdgpu_sysvm_set_defaults(adev);
>> +	gmc_v7_0_vram_sysvm_location(adev, &adev->mc);
>>   
>>   	return 0;
>>   }
>> @@ -536,7 +536,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
>>   }
>>   
>>   /**
>> - * gmc_v7_0_gart_enable - gart enable
>> + * gmc_v7_0_sysvm_enable - gart enable
>>    *
>>    * @adev: amdgpu_device pointer
>>    *
>> @@ -546,16 +546,16 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
>>    * and GPUVM for FSA64 clients (CIK).
>>    * Returns 0 for success, errors for failure.
>>    */
>> -static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>> +static int gmc_v7_0_sysvm_enable(struct amdgpu_device *adev)
>>   {
>>   	int r, i;
>>   	u32 tmp;
>>   
>> -	if (adev->gart.robj == NULL) {
>> +	if (adev->sysvm.robj == NULL) {
>>   		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>>   		return -EINVAL;
>>   	}
>> -	r = amdgpu_gart_table_vram_pin(adev);
>> +	r = amdgpu_sysvm_table_vram_pin(adev);
>>   	if (r)
>>   		return r;
>>   	/* Setup TLB control */
>> @@ -585,9 +585,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>>   	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
>>   	WREG32(mmVM_L2_CNTL3, tmp);
>>   	/* setup context0 */
>> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
>> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
>> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
>> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
>> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
>> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>>   	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>>   			(u32)(adev->dummy_page.addr >> 12));
>>   	WREG32(mmVM_CONTEXT0_CNTL2, 0);
>> @@ -611,10 +611,10 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>>   	for (i = 1; i < 16; i++) {
>>   		if (i < 8)
>>   			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
>> -			       adev->gart.table_addr >> 12);
>> +			       adev->sysvm.table_addr >> 12);
>>   		else
>>   			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
>> -			       adev->gart.table_addr >> 12);
>> +			       adev->sysvm.table_addr >> 12);
>>   	}
>>   
>>   	/* enable context1-15 */
>> @@ -640,9 +640,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>>   
>>   	gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
>>   	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
>> -		 (unsigned)(adev->mc.gtt_size >> 20),
>> -		 (unsigned long long)adev->gart.table_addr);
>> -	adev->gart.ready = true;
>> +		 (unsigned)(adev->mc.sysvm_size >> 20),
>> +		 (unsigned long long)adev->sysvm.table_addr);
>> +	adev->sysvm.ready = true;
>>   	return 0;
>>   }
>>   
>> @@ -650,27 +650,27 @@ static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
>>   {
>>   	int r;
>>   
>> -	if (adev->gart.robj) {
>> +	if (adev->sysvm.robj) {
>>   		WARN(1, "R600 PCIE GART already initialized\n");
>>   		return 0;
>>   	}
>>   	/* Initialize common gart structure */
>> -	r = amdgpu_gart_init(adev);
>> +	r = amdgpu_sysvm_init(adev);
>>   	if (r)
>>   		return r;
>> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
>> -	adev->gart.gart_pte_flags = 0;
>> -	return amdgpu_gart_table_vram_alloc(adev);
>> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
>> +	adev->sysvm.sysvm_pte_flags = 0;
>> +	return amdgpu_sysvm_table_vram_alloc(adev);
>>   }
>>   
>>   /**
>> - * gmc_v7_0_gart_disable - gart disable
>> + * gmc_v7_0_sysvm_disable - gart disable
>>    *
>>    * @adev: amdgpu_device pointer
>>    *
>>    * This disables all VM page table (CIK).
>>    */
>> -static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>> +static void gmc_v7_0_sysvm_disable(struct amdgpu_device *adev)
>>   {
>>   	u32 tmp;
>>   
>> @@ -688,7 +688,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>>   	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
>>   	WREG32(mmVM_L2_CNTL, tmp);
>>   	WREG32(mmVM_L2_CNTL2, 0);
>> -	amdgpu_gart_table_vram_unpin(adev);
>> +	amdgpu_sysvm_table_vram_unpin(adev);
>>   }
>>   
>>   /**
>> @@ -700,8 +700,8 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>>    */
>>   static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
>>   {
>> -	amdgpu_gart_table_vram_free(adev);
>> -	amdgpu_gart_fini(adev);
>> +	amdgpu_sysvm_table_vram_free(adev);
>> +	amdgpu_sysvm_fini(adev);
>>   }
>>   
>>   /**
>> @@ -885,7 +885,7 @@ static int gmc_v7_0_early_init(void *handle)
>>   {
>>   	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>   
>> -	gmc_v7_0_set_gart_funcs(adev);
>> +	gmc_v7_0_set_sysvm_funcs(adev);
>>   	gmc_v7_0_set_irq_funcs(adev);
>>   
>>   	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
>> @@ -1036,7 +1036,7 @@ static int gmc_v7_0_hw_init(void *handle)
>>   		}
>>   	}
>>   
>> -	r = gmc_v7_0_gart_enable(adev);
>> +	r = gmc_v7_0_sysvm_enable(adev);
>>   	if (r)
>>   		return r;
>>   
>> @@ -1048,7 +1048,7 @@ static int gmc_v7_0_hw_fini(void *handle)
>>   	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>   
>>   	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
>> -	gmc_v7_0_gart_disable(adev);
>> +	gmc_v7_0_sysvm_disable(adev);
>>   
>>   	return 0;
>>   }
>> @@ -1270,7 +1270,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
>>   	.set_powergating_state = gmc_v7_0_set_powergating_state,
>>   };
>>   
>> -static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
>> +static const struct amdgpu_sysvm_funcs gmc_v7_0_sysvm_funcs = {
>>   	.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
>>   	.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
>>   	.set_prt = gmc_v7_0_set_prt,
>> @@ -1283,10 +1283,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
>>   	.process = gmc_v7_0_process_interrupt,
>>   };
>>   
>> -static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
>> +static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev)
>>   {
>> -	if (adev->gart.gart_funcs == NULL)
>> -		adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
>> +	if (adev->sysvm.sysvm_funcs == NULL)
>> +		adev->sysvm.sysvm_funcs = &gmc_v7_0_sysvm_funcs;
>>   }
>>   
>>   static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
>> index 213af65..cf8f8d2 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
>> @@ -41,7 +41,7 @@
>>   #include "amdgpu_atombios.h"
>>   
>>   
>> -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
>> +static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev);
>>   static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
>>   static int gmc_v8_0_wait_for_idle(void *handle);
>>   
>> @@ -393,7 +393,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
>>   	return 0;
>>   }
>>   
>> -static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
>> +static void gmc_v8_0_vram_sysvm_location(struct amdgpu_device *adev,
>>   				       struct amdgpu_mc *mc)
>>   {
>>   	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
>> @@ -406,8 +406,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
>>   		mc->mc_vram_size = 0xFFC0000000ULL;
>>   	}
>>   	amdgpu_vram_location(adev, &adev->mc, base);
>> -	adev->mc.gtt_base_align = 0;
>> -	amdgpu_gtt_location(adev, mc);
>> +	adev->mc.sysvm_base_align = 0;
>> +	amdgpu_sysvm_location(adev, mc);
>>   }
>>   
>>   /**
>> @@ -535,8 +535,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
>>   	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>>   		adev->mc.visible_vram_size = adev->mc.real_vram_size;
>>   
>> -	amdgpu_gart_set_defaults(adev);
>> -	gmc_v8_0_vram_gtt_location(adev, &adev->mc);
>> +	amdgpu_sysvm_set_defaults(adev);
>> +	gmc_v8_0_vram_sysvm_location(adev, &adev->mc);
>>   
>>   	return 0;
>>   }
>> @@ -722,7 +722,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
>>   }
>>   
>>   /**
>> - * gmc_v8_0_gart_enable - gart enable
>> + * gmc_v8_0_sysvm_enable - gart enable
>>    *
>>    * @adev: amdgpu_device pointer
>>    *
>> @@ -732,16 +732,16 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
>>    * and GPUVM for FSA64 clients (CIK).
>>    * Returns 0 for success, errors for failure.
>>    */
>> -static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>> +static int gmc_v8_0_sysvm_enable(struct amdgpu_device *adev)
>>   {
>>   	int r, i;
>>   	u32 tmp;
>>   
>> -	if (adev->gart.robj == NULL) {
>> +	if (adev->sysvm.robj == NULL) {
>>   		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>>   		return -EINVAL;
>>   	}
>> -	r = amdgpu_gart_table_vram_pin(adev);
>> +	r = amdgpu_sysvm_table_vram_pin(adev);
>>   	if (r)
>>   		return r;
>>   	/* Setup TLB control */
>> @@ -787,9 +787,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>>   	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
>>   	WREG32(mmVM_L2_CNTL4, tmp);
>>   	/* setup context0 */
>> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
>> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
>> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
>> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
>> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
>> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>>   	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>>   			(u32)(adev->dummy_page.addr >> 12));
>>   	WREG32(mmVM_CONTEXT0_CNTL2, 0);
>> @@ -813,10 +813,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>>   	for (i = 1; i < 16; i++) {
>>   		if (i < 8)
>>   			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
>> -			       adev->gart.table_addr >> 12);
>> +			       adev->sysvm.table_addr >> 12);
>>   		else
>>   			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
>> -			       adev->gart.table_addr >> 12);
>> +			       adev->sysvm.table_addr >> 12);
>>   	}
>>   
>>   	/* enable context1-15 */
>> @@ -843,9 +843,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>>   
>>   	gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
>>   	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
>> -		 (unsigned)(adev->mc.gtt_size >> 20),
>> -		 (unsigned long long)adev->gart.table_addr);
>> -	adev->gart.ready = true;
>> +		 (unsigned)(adev->mc.sysvm_size >> 20),
>> +		 (unsigned long long)adev->sysvm.table_addr);
>> +	adev->sysvm.ready = true;
>>   	return 0;
>>   }
>>   
>> @@ -853,27 +853,27 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
>>   {
>>   	int r;
>>   
>> -	if (adev->gart.robj) {
>> +	if (adev->sysvm.robj) {
>>   		WARN(1, "R600 PCIE GART already initialized\n");
>>   		return 0;
>>   	}
>>   	/* Initialize common gart structure */
>> -	r = amdgpu_gart_init(adev);
>> +	r = amdgpu_sysvm_init(adev);
>>   	if (r)
>>   		return r;
>> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
>> -	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
>> -	return amdgpu_gart_table_vram_alloc(adev);
>> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
>> +	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_EXECUTABLE;
>> +	return amdgpu_sysvm_table_vram_alloc(adev);
>>   }
>>   
>>   /**
>> - * gmc_v8_0_gart_disable - gart disable
>> + * gmc_v8_0_sysvm_disable - gart disable
>>    *
>>    * @adev: amdgpu_device pointer
>>    *
>>    * This disables all VM page table (CIK).
>>    */
>> -static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>> +static void gmc_v8_0_sysvm_disable(struct amdgpu_device *adev)
>>   {
>>   	u32 tmp;
>>   
>> @@ -891,7 +891,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>>   	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
>>   	WREG32(mmVM_L2_CNTL, tmp);
>>   	WREG32(mmVM_L2_CNTL2, 0);
>> -	amdgpu_gart_table_vram_unpin(adev);
>> +	amdgpu_sysvm_table_vram_unpin(adev);
>>   }
>>   
>>   /**
>> @@ -903,8 +903,8 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>>    */
>>   static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
>>   {
>> -	amdgpu_gart_table_vram_free(adev);
>> -	amdgpu_gart_fini(adev);
>> +	amdgpu_sysvm_table_vram_free(adev);
>> +	amdgpu_sysvm_fini(adev);
>>   }
>>   
>>   /**
>> @@ -962,7 +962,7 @@ static int gmc_v8_0_early_init(void *handle)
>>   {
>>   	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>   
>> -	gmc_v8_0_set_gart_funcs(adev);
>> +	gmc_v8_0_set_sysvm_funcs(adev);
>>   	gmc_v8_0_set_irq_funcs(adev);
>>   
>>   	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
>> @@ -1128,7 +1128,7 @@ static int gmc_v8_0_hw_init(void *handle)
>>   		}
>>   	}
>>   
>> -	r = gmc_v8_0_gart_enable(adev);
>> +	r = gmc_v8_0_sysvm_enable(adev);
>>   	if (r)
>>   		return r;
>>   
>> @@ -1140,7 +1140,7 @@ static int gmc_v8_0_hw_fini(void *handle)
>>   	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>   
>>   	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
>> -	gmc_v8_0_gart_disable(adev);
>> +	gmc_v8_0_sysvm_disable(adev);
>>   
>>   	return 0;
>>   }
>> @@ -1590,7 +1590,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
>>   	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
>>   };
>>   
>> -static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
>> +static const struct amdgpu_sysvm_funcs gmc_v8_0_sysvm_funcs = {
>>   	.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
>>   	.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
>>   	.set_prt = gmc_v8_0_set_prt,
>> @@ -1603,10 +1603,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
>>   	.process = gmc_v8_0_process_interrupt,
>>   };
>>   
>> -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
>> +static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev)
>>   {
>> -	if (adev->gart.gart_funcs == NULL)
>> -		adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
>> +	if (adev->sysvm.sysvm_funcs == NULL)
>> +		adev->sysvm.sysvm_funcs = &gmc_v8_0_sysvm_funcs;
>>   }
>>   
>>   static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> index dbb43d9..f067465 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> @@ -365,7 +365,7 @@ static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
>>   	return addr;
>>   }
>>   
>> -static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
>> +static const struct amdgpu_sysvm_funcs gmc_v9_0_sysvm_funcs = {
>>   	.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
>>   	.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
>>   	.get_invalidate_req = gmc_v9_0_get_invalidate_req,
>> @@ -373,17 +373,17 @@ static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
>>   	.get_vm_pde = gmc_v9_0_get_vm_pde
>>   };
>>   
>> -static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
>> +static void gmc_v9_0_set_sysvm_funcs(struct amdgpu_device *adev)
>>   {
>> -	if (adev->gart.gart_funcs == NULL)
>> -		adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
>> +	if (adev->sysvm.sysvm_funcs == NULL)
>> +		adev->sysvm.sysvm_funcs = &gmc_v9_0_sysvm_funcs;
>>   }
>>   
>>   static int gmc_v9_0_early_init(void *handle)
>>   {
>>   	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>   
>> -	gmc_v9_0_set_gart_funcs(adev);
>> +	gmc_v9_0_set_sysvm_funcs(adev);
>>   	gmc_v9_0_set_irq_funcs(adev);
>>   
>>   	return 0;
>> @@ -412,15 +412,15 @@ static int gmc_v9_0_late_init(void *handle)
>>   	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
>>   }
>>   
>> -static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
>> +static void gmc_v9_0_vram_sysvm_location(struct amdgpu_device *adev,
>>   					struct amdgpu_mc *mc)
>>   {
>>   	u64 base = 0;
>>   	if (!amdgpu_sriov_vf(adev))
>>   		base = mmhub_v1_0_get_fb_location(adev);
>>   	amdgpu_vram_location(adev, &adev->mc, base);
>> -	adev->mc.gtt_base_align = 0;
>> -	amdgpu_gtt_location(adev, mc);
>> +	adev->mc.sysvm_base_align = 0;
>> +	amdgpu_sysvm_location(adev, mc);
>>   	/* base offset of vram pages */
>>   	if (adev->flags & AMD_IS_APU)
>>   		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
>> @@ -494,8 +494,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
>>   	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>>   		adev->mc.visible_vram_size = adev->mc.real_vram_size;
>>   
>> -	amdgpu_gart_set_defaults(adev);
>> -	gmc_v9_0_vram_gtt_location(adev, &adev->mc);
>> +	amdgpu_sysvm_set_defaults(adev);
>> +	gmc_v9_0_vram_sysvm_location(adev, &adev->mc);
>>   
>>   	return 0;
>>   }
>> @@ -504,18 +504,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
>>   {
>>   	int r;
>>   
>> -	if (adev->gart.robj) {
>> +	if (adev->sysvm.robj) {
>>   		WARN(1, "VEGA10 PCIE GART already initialized\n");
>>   		return 0;
>>   	}
>>   	/* Initialize common gart structure */
>> -	r = amdgpu_gart_init(adev);
>> +	r = amdgpu_sysvm_init(adev);
>>   	if (r)
>>   		return r;
>> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
>> -	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
>> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
>> +	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
>>   				 AMDGPU_PTE_EXECUTABLE;
>> -	return amdgpu_gart_table_vram_alloc(adev);
>> +	return amdgpu_sysvm_table_vram_alloc(adev);
>>   }
>>   
>>   static int gmc_v9_0_sw_init(void *handle)
>> @@ -640,8 +640,8 @@ static int gmc_v9_0_sw_init(void *handle)
>>    */
>>   static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
>>   {
>> -	amdgpu_gart_table_vram_free(adev);
>> -	amdgpu_gart_fini(adev);
>> +	amdgpu_sysvm_table_vram_free(adev);
>> +	amdgpu_sysvm_fini(adev);
>>   }
>>   
>>   static int gmc_v9_0_sw_fini(void *handle)
>> @@ -669,11 +669,11 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
>>   }
>>   
>>   /**
>> - * gmc_v9_0_gart_enable - gart enable
>> + * gmc_v9_0_sysvm_enable - gart enable
>>    *
>>    * @adev: amdgpu_device pointer
>>    */
>> -static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>> +static int gmc_v9_0_sysvm_enable(struct amdgpu_device *adev)
>>   {
>>   	int r;
>>   	bool value;
>> @@ -683,11 +683,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>>   		golden_settings_vega10_hdp,
>>   		(const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
>>   
>> -	if (adev->gart.robj == NULL) {
>> +	if (adev->sysvm.robj == NULL) {
>>   		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>>   		return -EINVAL;
>>   	}
>> -	r = amdgpu_gart_table_vram_pin(adev);
>> +	r = amdgpu_sysvm_table_vram_pin(adev);
>>   	if (r)
>>   		return r;
>>   
>> @@ -706,11 +706,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>>   		break;
>>   	}
>>   
>> -	r = gfxhub_v1_0_gart_enable(adev);
>> +	r = gfxhub_v1_0_sysvm_enable(adev);
>>   	if (r)
>>   		return r;
>>   
>> -	r = mmhub_v1_0_gart_enable(adev);
>> +	r = mmhub_v1_0_sysvm_enable(adev);
>>   	if (r)
>>   		return r;
>>   
>> @@ -733,9 +733,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>>   	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
>>   
>>   	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
>> -		 (unsigned)(adev->mc.gtt_size >> 20),
>> -		 (unsigned long long)adev->gart.table_addr);
>> -	adev->gart.ready = true;
>> +		 (unsigned)(adev->mc.sysvm_size >> 20),
>> +		 (unsigned long long)adev->sysvm.table_addr);
>> +	adev->sysvm.ready = true;
>>   	return 0;
>>   }
>>   
>> @@ -747,23 +747,23 @@ static int gmc_v9_0_hw_init(void *handle)
>>   	/* The sequence of these two function calls matters.*/
>>   	gmc_v9_0_init_golden_registers(adev);
>>   
>> -	r = gmc_v9_0_gart_enable(adev);
>> +	r = gmc_v9_0_sysvm_enable(adev);
>>   
>>   	return r;
>>   }
>>   
>>   /**
>> - * gmc_v9_0_gart_disable - gart disable
>> + * gmc_v9_0_sysvm_disable - gart disable
>>    *
>>    * @adev: amdgpu_device pointer
>>    *
>>    * This disables all VM page table.
>>    */
>> -static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
>> +static void gmc_v9_0_sysvm_disable(struct amdgpu_device *adev)
>>   {
>> -	gfxhub_v1_0_gart_disable(adev);
>> -	mmhub_v1_0_gart_disable(adev);
>> -	amdgpu_gart_table_vram_unpin(adev);
>> +	gfxhub_v1_0_sysvm_disable(adev);
>> +	mmhub_v1_0_sysvm_disable(adev);
>> +	amdgpu_sysvm_table_vram_unpin(adev);
>>   }
>>   
>>   static int gmc_v9_0_hw_fini(void *handle)
>> @@ -777,7 +777,7 @@ static int gmc_v9_0_hw_fini(void *handle)
>>   	}
>>   
>>   	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
>> -	gmc_v9_0_gart_disable(adev);
>> +	gmc_v9_0_sysvm_disable(adev);
>>   
>>   	return 0;
>>   }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
>> index 9804318..fbc8f6e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
>> @@ -51,8 +51,8 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
>>   {
>>   	uint64_t value;
>>   
>> -	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
>> -	value = adev->gart.table_addr - adev->mc.vram_start +
>> +	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
>> +	value = adev->sysvm.table_addr - adev->mc.vram_start +
>>   		adev->vm_manager.vram_base_offset;
>>   	value &= 0x0000FFFFFFFFF000ULL;
>>   	value |= 0x1; /* valid bit */
>> @@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
>>   	mmhub_v1_0_init_gart_pt_regs(adev);
>>   
>>   	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
>> -		     (u32)(adev->mc.gtt_start >> 12));
>> +		     (u32)(adev->mc.sysvm_start >> 12));
>>   	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
>> -		     (u32)(adev->mc.gtt_start >> 44));
>> +		     (u32)(adev->mc.sysvm_start >> 44));
>>   
>>   	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
>> -		     (u32)(adev->mc.gtt_end >> 12));
>> +		     (u32)(adev->mc.sysvm_end >> 12));
>>   	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
>> -		     (u32)(adev->mc.gtt_end >> 44));
>> +		     (u32)(adev->mc.sysvm_end >> 44));
>>   }
>>   
>>   static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
>> @@ -462,7 +462,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
>>   	}
>>   }
>>   
>> -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
>> +int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
>>   {
>>   	if (amdgpu_sriov_vf(adev)) {
>>   		/*
>> @@ -490,7 +490,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
>>   	return 0;
>>   }
>>   
>> -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
>> +void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
>>   {
>>   	u32 tmp;
>>   	u32 i;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
>> index 57bb940..23128e5 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
>> @@ -24,8 +24,8 @@
>>   #define __MMHUB_V1_0_H__
>>   
>>   u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
>> -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
>> -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
>> +int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
>> +void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
>>   void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
>>   					 bool value);
>>   void mmhub_v1_0_init(struct amdgpu_device *adev);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
>> index 4a65697..056b169 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
>> @@ -1121,10 +1121,10 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>   					 unsigned vm_id, uint64_t pd_addr)
>>   {
>>   	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>   	unsigned eng = ring->vm_inv_eng;
>>   
>> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>   	pd_addr |= AMDGPU_PTE_VALID;
>>   
>>   	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
>> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
>> index 987b958..95913fd 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
>> @@ -1312,11 +1312,11 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>   					unsigned vm_id, uint64_t pd_addr)
>>   {
>>   	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>   	uint32_t data0, data1, mask;
>>   	unsigned eng = ring->vm_inv_eng;
>>   
>> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>   	pd_addr |= AMDGPU_PTE_VALID;
>>   
>>   	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
>> @@ -1353,10 +1353,10 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>   			 unsigned int vm_id, uint64_t pd_addr)
>>   {
>>   	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>   	unsigned eng = ring->vm_inv_eng;
>>   
>> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>   	pd_addr |= AMDGPU_PTE_VALID;
>>   
>>   	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
>> index 1ecd6bb..b869423 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
>> @@ -952,10 +952,10 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
>>   			 unsigned int vm_id, uint64_t pd_addr)
>>   {
>>   	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>   	unsigned eng = ring->vm_inv_eng;
>>   
>> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>   	pd_addr |= AMDGPU_PTE_VALID;
>>   
>>   	amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
>> index 21e7b88..2ca49af 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
>> @@ -878,11 +878,11 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>   					unsigned vm_id, uint64_t pd_addr)
>>   {
>>   	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>   	uint32_t data0, data1, mask;
>>   	unsigned eng = ring->vm_inv_eng;
>>   
>> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>   	pd_addr |= AMDGPU_PTE_VALID;
>>   
>>   	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
>> @@ -1010,10 +1010,10 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>   			 unsigned int vm_id, uint64_t pd_addr)
>>   {
>>   	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>   	unsigned eng = ring->vm_inv_eng;
>>   
>> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>   	pd_addr |= AMDGPU_PTE_VALID;
>>   
>>   	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
>> -- 
>> 2.7.4
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
       [not found]     ` <1499075076-1851-7-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
  2017-07-04  3:13       ` Zhou, David(ChunMing)
  2017-07-04  8:09       ` Huang Rui
@ 2017-07-04 21:11       ` Felix Kuehling
       [not found]         ` <61109920-9d05-cb27-67b3-51a1b46b15bc-5C7GfCeVMHo@public.gmane.org>
  2 siblings, 1 reply; 30+ messages in thread
From: Felix Kuehling @ 2017-07-04 21:11 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Christian König

I'm afraid this will lead to more confusion when talking to different
teams in AMD. At least to me "GART" was always understood to be the
system-wide address translation table (VMID-0). The remnant from the
pre-GPUVM days. You're now calling that SYSVM, while all GPU-accessible
system memory is still called GTT.

I think it would make more sense to call general GPU-mapped system
memory "SYSVM" or "GPUVM", and only call the global VMID-0 page table
"GART".

But of course that would require changes in TTM and all TTM drivers, so
it's not a realistic option.

Regards,
  Felix


On 17-07-03 05:44 AM, Christian König wrote:
> From: Christian König <christian.koenig@amd.com>
>
> Just mass rename all names related to the hardware GART/GTT functions to SYSVM.
>
> The name of symbols related to the TTM TT domain stay the same.
>
> This should improve the distinction between the two.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/Kconfig         |   9 +-
>  drivers/gpu/drm/amd/amdgpu/Makefile        |   2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  58 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  48 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 423 -----------------------------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |   2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c  | 423 +++++++++++++++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_test.c   |  84 +++---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  76 +++---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |   2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     |  30 +-
>  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      |   4 +-
>  drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   |  16 +-
>  drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h   |   4 +-
>  drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  66 ++---
>  drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  70 ++---
>  drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  70 ++---
>  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      |  66 ++---
>  drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c    |  16 +-
>  drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h    |   4 +-
>  drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     |   4 +-
>  drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c      |   8 +-
>  drivers/gpu/drm/amd/amdgpu/vce_v4_0.c      |   4 +-
>  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c      |   8 +-
>  24 files changed, 749 insertions(+), 748 deletions(-)
>  delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
>  create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
> index e8af1f5..ebbac01 100644
> --- a/drivers/gpu/drm/amd/amdgpu/Kconfig
> +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
> @@ -31,14 +31,15 @@ config DRM_AMDGPU_USERPTR
>  	  This option selects CONFIG_MMU_NOTIFIER if it isn't already
>  	  selected to enabled full userptr support.
>  
> -config DRM_AMDGPU_GART_DEBUGFS
> -	bool "Allow GART access through debugfs"
> +config DRM_AMDGPU_SYSVM_DEBUGFS
> +	bool "Allow SYSVM access through debugfs"
>  	depends on DRM_AMDGPU
>  	depends on DEBUG_FS
>  	default n
>  	help
> -	  Selecting this option creates a debugfs file to inspect the mapped
> -	  pages. Uses more memory for housekeeping, enable only for debugging.
> +	  Selecting this option creates a debugfs file to inspect the SYSVM
> +	  mapped pages. Uses more memory for housekeeping, enable only for
> +	  debugging.
>  
>  source "drivers/gpu/drm/amd/acp/Kconfig"
>  source "drivers/gpu/drm/amd/display/Kconfig"
> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
> index 3661110..d80d49f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
> @@ -22,7 +22,7 @@ amdgpu-y := amdgpu_drv.o
>  # add KMS driver
>  amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>  	amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
> -	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
> +	atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_sysvm.o \
>  	amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
>  	amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
>  	amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 4a2b33d..abe191f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -305,7 +305,7 @@ struct amdgpu_vm_pte_funcs {
>  };
>  
>  /* provided by the gmc block */
> -struct amdgpu_gart_funcs {
> +struct amdgpu_sysvm_funcs {
>  	/* flush the vm tlb via mmio */
>  	void (*flush_gpu_tlb)(struct amdgpu_device *adev,
>  			      uint32_t vmid);
> @@ -543,39 +543,39 @@ struct amdgpu_mc;
>  #define AMDGPU_GPU_PAGE_SHIFT 12
>  #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
>  
> -struct amdgpu_gart {
> +struct amdgpu_sysvm {
>  	dma_addr_t			table_addr;
>  	struct amdgpu_bo		*robj;
>  	void				*ptr;
>  	unsigned			num_gpu_pages;
>  	unsigned			num_cpu_pages;
>  	unsigned			table_size;
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>  	struct page			**pages;
>  #endif
>  	bool				ready;
>  
>  	/* Asic default pte flags */
> -	uint64_t			gart_pte_flags;
> +	uint64_t			sysvm_pte_flags;
>  
> -	const struct amdgpu_gart_funcs *gart_funcs;
> +	const struct amdgpu_sysvm_funcs *sysvm_funcs;
>  };
>  
> -void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
> -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
> -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
> -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
> -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
> -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
> -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
> -int amdgpu_gart_init(struct amdgpu_device *adev);
> -void amdgpu_gart_fini(struct amdgpu_device *adev);
> -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
> +void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
> +int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
> +void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
> +int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
> +void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
> +int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
> +void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
> +int amdgpu_sysvm_init(struct amdgpu_device *adev);
> +void amdgpu_sysvm_fini(struct amdgpu_device *adev);
> +int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
>  			int pages);
> -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
> +int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
>  		    int pages, dma_addr_t *dma_addr, uint64_t flags,
>  		    void *dst);
> -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
> +int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
>  		     int pages, struct page **pagelist,
>  		     dma_addr_t *dma_addr, uint64_t flags);
>  int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
> @@ -604,15 +604,15 @@ struct amdgpu_mc {
>  	 * about vram size near mc fb location */
>  	u64			mc_vram_size;
>  	u64			visible_vram_size;
> -	u64			gtt_size;
> -	u64			gtt_start;
> -	u64			gtt_end;
> +	u64			sysvm_size;
> +	u64			sysvm_start;
> +	u64			sysvm_end;
>  	u64			vram_start;
>  	u64			vram_end;
>  	unsigned		vram_width;
>  	u64			real_vram_size;
>  	int			vram_mtrr;
> -	u64                     gtt_base_align;
> +	u64                     sysvm_base_align;
>  	u64                     mc_mask;
>  	const struct firmware   *fw;	/* MC firmware */
>  	uint32_t                fw_version;
> @@ -1575,7 +1575,7 @@ struct amdgpu_device {
>  
>  	/* MC */
>  	struct amdgpu_mc		mc;
> -	struct amdgpu_gart		gart;
> +	struct amdgpu_sysvm		sysvm;
>  	struct amdgpu_dummy_page	dummy_page;
>  	struct amdgpu_vm_manager	vm_manager;
>  	struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
> @@ -1686,8 +1686,8 @@ struct amdgpu_device {
>  	struct list_head                shadow_list;
>  	struct mutex                    shadow_list_lock;
>  	/* link all gtt */
> -	spinlock_t			gtt_list_lock;
> -	struct list_head                gtt_list;
> +	spinlock_t			sysvm_list_lock;
> +	struct list_head                sysvm_list;
>  	/* keep an lru list of rings by HW IP */
>  	struct list_head		ring_lru_list;
>  	spinlock_t			ring_lru_list_lock;
> @@ -1835,13 +1835,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
>  #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
>  #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
>  #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
> -#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
> -#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
> -#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
> +#define amdgpu_sysvm_flush_gpu_tlb(adev, vmid) (adev)->sysvm.sysvm_funcs->flush_gpu_tlb((adev), (vmid))
> +#define amdgpu_sysvm_set_pte_pde(adev, pt, idx, addr, flags) (adev)->sysvm.sysvm_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
> +#define amdgpu_sysvm_get_vm_pde(adev, addr) (adev)->sysvm.sysvm_funcs->get_vm_pde((adev), (addr))
>  #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
>  #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
>  #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
> -#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
> +#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->sysvm.sysvm_funcs->get_vm_pte_flags((adev),(flags))
>  #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
>  #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
>  #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
> @@ -1908,7 +1908,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
>  uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
>  				 struct ttm_mem_reg *mem);
>  void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
> -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>  void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
>  int amdgpu_ttm_init(struct amdgpu_device *adev);
>  void amdgpu_ttm_fini(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 5b1220f..46a82d3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -667,39 +667,39 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
>  }
>  
>  /**
> - * amdgpu_gtt_location - try to find GTT location
> + * amdgpu_sysvm_location - try to find SYSVM location
>   * @adev: amdgpu device structure holding all necessary informations
>   * @mc: memory controller structure holding memory informations
>   *
> - * Function will place try to place GTT before or after VRAM.
> + * Function will place try to place SYSVM before or after VRAM.
>   *
> - * If GTT size is bigger than space left then we ajust GTT size.
> + * If SYSVM size is bigger than space left then we ajust SYSVM size.
>   * Thus function will never fails.
>   *
> - * FIXME: when reducing GTT size align new size on power of 2.
> + * FIXME: when reducing SYSVM size align new size on power of 2.
>   */
> -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
>  {
>  	u64 size_af, size_bf;
>  
> -	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
> -	size_bf = mc->vram_start & ~mc->gtt_base_align;
> +	size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) & ~mc->sysvm_base_align;
> +	size_bf = mc->vram_start & ~mc->sysvm_base_align;
>  	if (size_bf > size_af) {
> -		if (mc->gtt_size > size_bf) {
> -			dev_warn(adev->dev, "limiting GTT\n");
> -			mc->gtt_size = size_bf;
> +		if (mc->sysvm_size > size_bf) {
> +			dev_warn(adev->dev, "limiting SYSVM\n");
> +			mc->sysvm_size = size_bf;
>  		}
> -		mc->gtt_start = 0;
> +		mc->sysvm_start = 0;
>  	} else {
> -		if (mc->gtt_size > size_af) {
> -			dev_warn(adev->dev, "limiting GTT\n");
> -			mc->gtt_size = size_af;
> +		if (mc->sysvm_size > size_af) {
> +			dev_warn(adev->dev, "limiting SYSVM\n");
> +			mc->sysvm_size = size_af;
>  		}
> -		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
> +		mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) & ~mc->sysvm_base_align;
>  	}
> -	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
> -	dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
> -			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
> +	mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
> +	dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
> +			mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
>  }
>  
>  /*
> @@ -1663,12 +1663,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
>  
>  static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
>  {
> -	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
> +	memcpy(adev->reset_magic, adev->sysvm.ptr, AMDGPU_RESET_MAGIC_NUM);
>  }
>  
>  static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
>  {
> -	return !!memcmp(adev->gart.ptr, adev->reset_magic,
> +	return !!memcmp(adev->sysvm.ptr, adev->reset_magic,
>  			AMDGPU_RESET_MAGIC_NUM);
>  }
>  
> @@ -2052,14 +2052,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>  	adev->flags = flags;
>  	adev->asic_type = flags & AMD_ASIC_MASK;
>  	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
> -	adev->mc.gtt_size = 512 * 1024 * 1024;
> +	adev->mc.sysvm_size = 512 * 1024 * 1024;
>  	adev->accel_working = false;
>  	adev->num_rings = 0;
>  	adev->mman.buffer_funcs = NULL;
>  	adev->mman.buffer_funcs_ring = NULL;
>  	adev->vm_manager.vm_pte_funcs = NULL;
>  	adev->vm_manager.vm_pte_num_rings = 0;
> -	adev->gart.gart_funcs = NULL;
> +	adev->sysvm.sysvm_funcs = NULL;
>  	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
>  
>  	adev->smc_rreg = &amdgpu_invalid_rreg;
> @@ -2110,8 +2110,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>  	INIT_LIST_HEAD(&adev->shadow_list);
>  	mutex_init(&adev->shadow_list_lock);
>  
> -	INIT_LIST_HEAD(&adev->gtt_list);
> -	spin_lock_init(&adev->gtt_list_lock);
> +	INIT_LIST_HEAD(&adev->sysvm_list);
> +	spin_lock_init(&adev->sysvm_list_lock);
>  
>  	INIT_LIST_HEAD(&adev->ring_lru_list);
>  	spin_lock_init(&adev->ring_lru_list_lock);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> deleted file mode 100644
> index c808388..0000000
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> +++ /dev/null
> @@ -1,423 +0,0 @@
> -/*
> - * Copyright 2008 Advanced Micro Devices, Inc.
> - * Copyright 2008 Red Hat Inc.
> - * Copyright 2009 Jerome Glisse.
> - *
> - * Permission is hereby granted, free of charge, to any person obtaining a
> - * copy of this software and associated documentation files (the "Software"),
> - * to deal in the Software without restriction, including without limitation
> - * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> - * and/or sell copies of the Software, and to permit persons to whom the
> - * Software is furnished to do so, subject to the following conditions:
> - *
> - * The above copyright notice and this permission notice shall be included in
> - * all copies or substantial portions of the Software.
> - *
> - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> - * OTHER DEALINGS IN THE SOFTWARE.
> - *
> - * Authors: Dave Airlie
> - *          Alex Deucher
> - *          Jerome Glisse
> - */
> -#include <drm/drmP.h>
> -#include <drm/amdgpu_drm.h>
> -#include "amdgpu.h"
> -
> -/*
> - * GART
> - * The GART (Graphics Aperture Remapping Table) is an aperture
> - * in the GPU's address space.  System pages can be mapped into
> - * the aperture and look like contiguous pages from the GPU's
> - * perspective.  A page table maps the pages in the aperture
> - * to the actual backing pages in system memory.
> - *
> - * Radeon GPUs support both an internal GART, as described above,
> - * and AGP.  AGP works similarly, but the GART table is configured
> - * and maintained by the northbridge rather than the driver.
> - * Radeon hw has a separate AGP aperture that is programmed to
> - * point to the AGP aperture provided by the northbridge and the
> - * requests are passed through to the northbridge aperture.
> - * Both AGP and internal GART can be used at the same time, however
> - * that is not currently supported by the driver.
> - *
> - * This file handles the common internal GART management.
> - */
> -
> -/*
> - * Common GART table functions.
> - */
> -
> -/**
> - * amdgpu_gart_set_defaults - set the default gtt_size
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Set the default gtt_size based on parameters and available VRAM.
> - */
> -void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
> -{
> -	/* unless the user had overridden it, set the gart
> -	 * size equal to the 1024 or vram, whichever is larger.
> -	 */
> -	if (amdgpu_gart_size == -1)
> -		adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
> -					adev->mc.mc_vram_size);
> -	else
> -		adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
> -}
> -
> -/**
> - * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Allocate system memory for GART page table
> - * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
> - * gart table to be in system memory.
> - * Returns 0 for success, -ENOMEM for failure.
> - */
> -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
> -{
> -	void *ptr;
> -
> -	ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
> -				   &adev->gart.table_addr);
> -	if (ptr == NULL) {
> -		return -ENOMEM;
> -	}
> -#ifdef CONFIG_X86
> -	if (0) {
> -		set_memory_uc((unsigned long)ptr,
> -			      adev->gart.table_size >> PAGE_SHIFT);
> -	}
> -#endif
> -	adev->gart.ptr = ptr;
> -	memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_table_ram_free - free system ram for gart page table
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Free system memory for GART page table
> - * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
> - * gart table to be in system memory.
> - */
> -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
> -{
> -	if (adev->gart.ptr == NULL) {
> -		return;
> -	}
> -#ifdef CONFIG_X86
> -	if (0) {
> -		set_memory_wb((unsigned long)adev->gart.ptr,
> -			      adev->gart.table_size >> PAGE_SHIFT);
> -	}
> -#endif
> -	pci_free_consistent(adev->pdev, adev->gart.table_size,
> -			    (void *)adev->gart.ptr,
> -			    adev->gart.table_addr);
> -	adev->gart.ptr = NULL;
> -	adev->gart.table_addr = 0;
> -}
> -
> -/**
> - * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Allocate video memory for GART page table
> - * (pcie r4xx, r5xx+).  These asics require the
> - * gart table to be in video memory.
> - * Returns 0 for success, error for failure.
> - */
> -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
> -{
> -	int r;
> -
> -	if (adev->gart.robj == NULL) {
> -		r = amdgpu_bo_create(adev, adev->gart.table_size,
> -				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
> -				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
> -				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
> -				     NULL, NULL, &adev->gart.robj);
> -		if (r) {
> -			return r;
> -		}
> -	}
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_table_vram_pin - pin gart page table in vram
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Pin the GART page table in vram so it will not be moved
> - * by the memory manager (pcie r4xx, r5xx+).  These asics require the
> - * gart table to be in video memory.
> - * Returns 0 for success, error for failure.
> - */
> -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
> -{
> -	uint64_t gpu_addr;
> -	int r;
> -
> -	r = amdgpu_bo_reserve(adev->gart.robj, false);
> -	if (unlikely(r != 0))
> -		return r;
> -	r = amdgpu_bo_pin(adev->gart.robj,
> -				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
> -	if (r) {
> -		amdgpu_bo_unreserve(adev->gart.robj);
> -		return r;
> -	}
> -	r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
> -	if (r)
> -		amdgpu_bo_unpin(adev->gart.robj);
> -	amdgpu_bo_unreserve(adev->gart.robj);
> -	adev->gart.table_addr = gpu_addr;
> -	return r;
> -}
> -
> -/**
> - * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Unpin the GART page table in vram (pcie r4xx, r5xx+).
> - * These asics require the gart table to be in video memory.
> - */
> -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
> -{
> -	int r;
> -
> -	if (adev->gart.robj == NULL) {
> -		return;
> -	}
> -	r = amdgpu_bo_reserve(adev->gart.robj, true);
> -	if (likely(r == 0)) {
> -		amdgpu_bo_kunmap(adev->gart.robj);
> -		amdgpu_bo_unpin(adev->gart.robj);
> -		amdgpu_bo_unreserve(adev->gart.robj);
> -		adev->gart.ptr = NULL;
> -	}
> -}
> -
> -/**
> - * amdgpu_gart_table_vram_free - free gart page table vram
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Free the video memory used for the GART page table
> - * (pcie r4xx, r5xx+).  These asics require the gart table to
> - * be in video memory.
> - */
> -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
> -{
> -	if (adev->gart.robj == NULL) {
> -		return;
> -	}
> -	amdgpu_bo_unref(&adev->gart.robj);
> -}
> -
> -/*
> - * Common gart functions.
> - */
> -/**
> - * amdgpu_gart_unbind - unbind pages from the gart page table
> - *
> - * @adev: amdgpu_device pointer
> - * @offset: offset into the GPU's gart aperture
> - * @pages: number of pages to unbind
> - *
> - * Unbinds the requested pages from the gart page table and
> - * replaces them with the dummy page (all asics).
> - * Returns 0 for success, -EINVAL for failure.
> - */
> -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
> -			int pages)
> -{
> -	unsigned t;
> -	unsigned p;
> -	int i, j;
> -	u64 page_base;
> -	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
> -	uint64_t flags = 0;
> -
> -	if (!adev->gart.ready) {
> -		WARN(1, "trying to unbind memory from uninitialized GART !\n");
> -		return -EINVAL;
> -	}
> -
> -	t = offset / AMDGPU_GPU_PAGE_SIZE;
> -	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
> -	for (i = 0; i < pages; i++, p++) {
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -		adev->gart.pages[p] = NULL;
> -#endif
> -		page_base = adev->dummy_page.addr;
> -		if (!adev->gart.ptr)
> -			continue;
> -
> -		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> -			amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
> -						t, page_base, flags);
> -			page_base += AMDGPU_GPU_PAGE_SIZE;
> -		}
> -	}
> -	mb();
> -	amdgpu_gart_flush_gpu_tlb(adev, 0);
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_map - map dma_addresses into GART entries
> - *
> - * @adev: amdgpu_device pointer
> - * @offset: offset into the GPU's gart aperture
> - * @pages: number of pages to bind
> - * @dma_addr: DMA addresses of pages
> - *
> - * Map the dma_addresses into GART entries (all asics).
> - * Returns 0 for success, -EINVAL for failure.
> - */
> -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
> -		    int pages, dma_addr_t *dma_addr, uint64_t flags,
> -		    void *dst)
> -{
> -	uint64_t page_base;
> -	unsigned i, j, t;
> -
> -	if (!adev->gart.ready) {
> -		WARN(1, "trying to bind memory to uninitialized GART !\n");
> -		return -EINVAL;
> -	}
> -
> -	t = offset / AMDGPU_GPU_PAGE_SIZE;
> -
> -	for (i = 0; i < pages; i++) {
> -		page_base = dma_addr[i];
> -		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> -			amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
> -			page_base += AMDGPU_GPU_PAGE_SIZE;
> -		}
> -	}
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_bind - bind pages into the gart page table
> - *
> - * @adev: amdgpu_device pointer
> - * @offset: offset into the GPU's gart aperture
> - * @pages: number of pages to bind
> - * @pagelist: pages to bind
> - * @dma_addr: DMA addresses of pages
> - *
> - * Binds the requested pages to the gart page table
> - * (all asics).
> - * Returns 0 for success, -EINVAL for failure.
> - */
> -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
> -		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
> -		     uint64_t flags)
> -{
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	unsigned i,t,p;
> -#endif
> -	int r;
> -
> -	if (!adev->gart.ready) {
> -		WARN(1, "trying to bind memory to uninitialized GART !\n");
> -		return -EINVAL;
> -	}
> -
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	t = offset / AMDGPU_GPU_PAGE_SIZE;
> -	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
> -	for (i = 0; i < pages; i++, p++)
> -		adev->gart.pages[p] = pagelist[i];
> -#endif
> -
> -	if (adev->gart.ptr) {
> -		r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
> -			    adev->gart.ptr);
> -		if (r)
> -			return r;
> -	}
> -
> -	mb();
> -	amdgpu_gart_flush_gpu_tlb(adev, 0);
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_init - init the driver info for managing the gart
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Allocate the dummy page and init the gart driver info (all asics).
> - * Returns 0 for success, error for failure.
> - */
> -int amdgpu_gart_init(struct amdgpu_device *adev)
> -{
> -	int r;
> -
> -	if (adev->dummy_page.page)
> -		return 0;
> -
> -	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
> -	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
> -		DRM_ERROR("Page size is smaller than GPU page size!\n");
> -		return -EINVAL;
> -	}
> -	r = amdgpu_dummy_page_init(adev);
> -	if (r)
> -		return r;
> -	/* Compute table size */
> -	adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
> -	adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
> -	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
> -		 adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
> -
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	/* Allocate pages table */
> -	adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
> -	if (adev->gart.pages == NULL) {
> -		amdgpu_gart_fini(adev);
> -		return -ENOMEM;
> -	}
> -#endif
> -
> -	return 0;
> -}
> -
> -/**
> - * amdgpu_gart_fini - tear down the driver info for managing the gart
> - *
> - * @adev: amdgpu_device pointer
> - *
> - * Tear down the gart driver info and free the dummy page (all asics).
> - */
> -void amdgpu_gart_fini(struct amdgpu_device *adev)
> -{
> -	if (adev->gart.ready) {
> -		/* unbind pages */
> -		amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
> -	}
> -	adev->gart.ready = false;
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	vfree(adev->gart.pages);
> -	adev->gart.pages = NULL;
> -#endif
> -	amdgpu_dummy_page_fini(adev);
> -}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 4510627..73a1c64 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -82,7 +82,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
>  	if (r)
>  		kfree(*job);
>  	else
> -		(*job)->vm_pd_addr = adev->gart.table_addr;
> +		(*job)->vm_pd_addr = adev->sysvm.table_addr;
>  
>  	return r;
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> new file mode 100644
> index 0000000..50fc8d7
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> @@ -0,0 +1,423 @@
> +/*
> + * Copyright 2008 Advanced Micro Devices, Inc.
> + * Copyright 2008 Red Hat Inc.
> + * Copyright 2009 Jerome Glisse.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors: Dave Airlie
> + *          Alex Deucher
> + *          Jerome Glisse
> + */
> +#include <drm/drmP.h>
> +#include <drm/amdgpu_drm.h>
> +#include "amdgpu.h"
> +
> +/*
> + * SYSVM
> + * The system VM (previously called GART) is an aperture
> + * in the GPU's address space.  System pages can be mapped into
> + * the aperture and look like contiguous pages from the GPU's
> + * perspective.  A page table maps the pages in the aperture
> + * to the actual backing pages in system memory.
> + *
> + * Radeon GPUs support both an internal SYSVM based GART, as described above,
> + * and AGP.  AGP works similarly, but the GART table is configured
> + * and maintained by the northbridge rather than the driver.
> + * Radeon hw has a separate AGP aperture that is programmed to
> + * point to the AGP aperture provided by the northbridge and the
> + * requests are passed through to the northbridge aperture.
> + * Both AGP and internal GART can be used at the same time, however
> + * that is not currently supported by the driver.
> + *
> + * This file handles the common internal SYSVM management.
> + */
> +
> +/*
> + * Common SYSVM table functions.
> + */
> +
> +/**
> + * amdgpu_sysvm_set_defaults - set the default sysvm_size
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Set the default sysvm_size based on parameters and available VRAM.
> + */
> +void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
> +{
> +	/* unless the user had overridden it, set the gart
> +	 * size equal to the 1024 or vram, whichever is larger.
> +	 */
> +	if (amdgpu_gart_size == -1)
> +		adev->mc.sysvm_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
> +					adev->mc.mc_vram_size);
> +	else
> +		adev->mc.sysvm_size = (uint64_t)amdgpu_gart_size << 20;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_ram_alloc - allocate system ram for gart page table
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Allocate system memory for SYSVM page table
> + * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
> + * gart table to be in system memory.
> + * Returns 0 for success, -ENOMEM for failure.
> + */
> +int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev)
> +{
> +	void *ptr;
> +
> +	ptr = pci_alloc_consistent(adev->pdev, adev->sysvm.table_size,
> +				   &adev->sysvm.table_addr);
> +	if (ptr == NULL) {
> +		return -ENOMEM;
> +	}
> +#ifdef CONFIG_X86
> +	if (0) {
> +		set_memory_uc((unsigned long)ptr,
> +			      adev->sysvm.table_size >> PAGE_SHIFT);
> +	}
> +#endif
> +	adev->sysvm.ptr = ptr;
> +	memset((void *)adev->sysvm.ptr, 0, adev->sysvm.table_size);
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_ram_free - free system ram for gart page table
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Free system memory for SYSVM page table
> + * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
> + * gart table to be in system memory.
> + */
> +void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev)
> +{
> +	if (adev->sysvm.ptr == NULL) {
> +		return;
> +	}
> +#ifdef CONFIG_X86
> +	if (0) {
> +		set_memory_wb((unsigned long)adev->sysvm.ptr,
> +			      adev->sysvm.table_size >> PAGE_SHIFT);
> +	}
> +#endif
> +	pci_free_consistent(adev->pdev, adev->sysvm.table_size,
> +			    (void *)adev->sysvm.ptr,
> +			    adev->sysvm.table_addr);
> +	adev->sysvm.ptr = NULL;
> +	adev->sysvm.table_addr = 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_vram_alloc - allocate vram for gart page table
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Allocate video memory for SYSVM page table
> + * (pcie r4xx, r5xx+).  These asics require the
> + * gart table to be in video memory.
> + * Returns 0 for success, error for failure.
> + */
> +int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev)
> +{
> +	int r;
> +
> +	if (adev->sysvm.robj == NULL) {
> +		r = amdgpu_bo_create(adev, adev->sysvm.table_size,
> +				     PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
> +				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
> +				     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
> +				     NULL, NULL, &adev->sysvm.robj);
> +		if (r) {
> +			return r;
> +		}
> +	}
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_vram_pin - pin gart page table in vram
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Pin the SYSVM page table in vram so it will not be moved
> + * by the memory manager (pcie r4xx, r5xx+).  These asics require the
> + * gart table to be in video memory.
> + * Returns 0 for success, error for failure.
> + */
> +int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev)
> +{
> +	uint64_t gpu_addr;
> +	int r;
> +
> +	r = amdgpu_bo_reserve(adev->sysvm.robj, false);
> +	if (unlikely(r != 0))
> +		return r;
> +	r = amdgpu_bo_pin(adev->sysvm.robj,
> +				AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
> +	if (r) {
> +		amdgpu_bo_unreserve(adev->sysvm.robj);
> +		return r;
> +	}
> +	r = amdgpu_bo_kmap(adev->sysvm.robj, &adev->sysvm.ptr);
> +	if (r)
> +		amdgpu_bo_unpin(adev->sysvm.robj);
> +	amdgpu_bo_unreserve(adev->sysvm.robj);
> +	adev->sysvm.table_addr = gpu_addr;
> +	return r;
> +}
> +
> +/**
> + * amdgpu_sysvm_table_vram_unpin - unpin gart page table in vram
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Unpin the GART page table in vram (pcie r4xx, r5xx+).
> + * These asics require the gart table to be in video memory.
> + */
> +void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev)
> +{
> +	int r;
> +
> +	if (adev->sysvm.robj == NULL) {
> +		return;
> +	}
> +	r = amdgpu_bo_reserve(adev->sysvm.robj, true);
> +	if (likely(r == 0)) {
> +		amdgpu_bo_kunmap(adev->sysvm.robj);
> +		amdgpu_bo_unpin(adev->sysvm.robj);
> +		amdgpu_bo_unreserve(adev->sysvm.robj);
> +		adev->sysvm.ptr = NULL;
> +	}
> +}
> +
> +/**
> + * amdgpu_sysvm_table_vram_free - free gart page table vram
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Free the video memory used for the GART page table
> + * (pcie r4xx, r5xx+).  These asics require the gart table to
> + * be in video memory.
> + */
> +void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev)
> +{
> +	if (adev->sysvm.robj == NULL) {
> +		return;
> +	}
> +	amdgpu_bo_unref(&adev->sysvm.robj);
> +}
> +
> +/*
> + * Common gart functions.
> + */
> +/**
> + * amdgpu_sysvm_unbind - unbind pages from the gart page table
> + *
> + * @adev: amdgpu_device pointer
> + * @offset: offset into the GPU's gart aperture
> + * @pages: number of pages to unbind
> + *
> + * Unbinds the requested pages from the gart page table and
> + * replaces them with the dummy page (all asics).
> + * Returns 0 for success, -EINVAL for failure.
> + */
> +int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
> +			int pages)
> +{
> +	unsigned t;
> +	unsigned p;
> +	int i, j;
> +	u64 page_base;
> +	/* Starting from VEGA10, system bit must be 0 to mean invalid. */
> +	uint64_t flags = 0;
> +
> +	if (!adev->sysvm.ready) {
> +		WARN(1, "trying to unbind memory from uninitialized GART !\n");
> +		return -EINVAL;
> +	}
> +
> +	t = offset / AMDGPU_GPU_PAGE_SIZE;
> +	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
> +	for (i = 0; i < pages; i++, p++) {
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +		adev->sysvm.pages[p] = NULL;
> +#endif
> +		page_base = adev->dummy_page.addr;
> +		if (!adev->sysvm.ptr)
> +			continue;
> +
> +		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> +			amdgpu_sysvm_set_pte_pde(adev, adev->sysvm.ptr,
> +						t, page_base, flags);
> +			page_base += AMDGPU_GPU_PAGE_SIZE;
> +		}
> +	}
> +	mb();
> +	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_map - map dma_addresses into GART entries
> + *
> + * @adev: amdgpu_device pointer
> + * @offset: offset into the GPU's gart aperture
> + * @pages: number of pages to bind
> + * @dma_addr: DMA addresses of pages
> + *
> + * Map the dma_addresses into GART entries (all asics).
> + * Returns 0 for success, -EINVAL for failure.
> + */
> +int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
> +		    int pages, dma_addr_t *dma_addr, uint64_t flags,
> +		    void *dst)
> +{
> +	uint64_t page_base;
> +	unsigned i, j, t;
> +
> +	if (!adev->sysvm.ready) {
> +		WARN(1, "trying to bind memory to uninitialized GART !\n");
> +		return -EINVAL;
> +	}
> +
> +	t = offset / AMDGPU_GPU_PAGE_SIZE;
> +
> +	for (i = 0; i < pages; i++) {
> +		page_base = dma_addr[i];
> +		for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> +			amdgpu_sysvm_set_pte_pde(adev, dst, t, page_base, flags);
> +			page_base += AMDGPU_GPU_PAGE_SIZE;
> +		}
> +	}
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_bind - bind pages into the gart page table
> + *
> + * @adev: amdgpu_device pointer
> + * @offset: offset into the GPU's gart aperture
> + * @pages: number of pages to bind
> + * @pagelist: pages to bind
> + * @dma_addr: DMA addresses of pages
> + *
> + * Binds the requested pages to the gart page table
> + * (all asics).
> + * Returns 0 for success, -EINVAL for failure.
> + */
> +int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
> +		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
> +		     uint64_t flags)
> +{
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	unsigned i,t,p;
> +#endif
> +	int r;
> +
> +	if (!adev->sysvm.ready) {
> +		WARN(1, "trying to bind memory to uninitialized GART !\n");
> +		return -EINVAL;
> +	}
> +
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	t = offset / AMDGPU_GPU_PAGE_SIZE;
> +	p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
> +	for (i = 0; i < pages; i++, p++)
> +		adev->sysvm.pages[p] = pagelist[i];
> +#endif
> +
> +	if (adev->sysvm.ptr) {
> +		r = amdgpu_sysvm_map(adev, offset, pages, dma_addr, flags,
> +			    adev->sysvm.ptr);
> +		if (r)
> +			return r;
> +	}
> +
> +	mb();
> +	amdgpu_sysvm_flush_gpu_tlb(adev, 0);
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_init - init the driver info for managing the gart
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Allocate the dummy page and init the gart driver info (all asics).
> + * Returns 0 for success, error for failure.
> + */
> +int amdgpu_sysvm_init(struct amdgpu_device *adev)
> +{
> +	int r;
> +
> +	if (adev->dummy_page.page)
> +		return 0;
> +
> +	/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
> +	if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
> +		DRM_ERROR("Page size is smaller than GPU page size!\n");
> +		return -EINVAL;
> +	}
> +	r = amdgpu_dummy_page_init(adev);
> +	if (r)
> +		return r;
> +	/* Compute table size */
> +	adev->sysvm.num_cpu_pages = adev->mc.sysvm_size / PAGE_SIZE;
> +	adev->sysvm.num_gpu_pages = adev->mc.sysvm_size / AMDGPU_GPU_PAGE_SIZE;
> +	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
> +		 adev->sysvm.num_cpu_pages, adev->sysvm.num_gpu_pages);
> +
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	/* Allocate pages table */
> +	adev->sysvm.pages = vzalloc(sizeof(void *) * adev->sysvm.num_cpu_pages);
> +	if (adev->sysvm.pages == NULL) {
> +		amdgpu_sysvm_fini(adev);
> +		return -ENOMEM;
> +	}
> +#endif
> +
> +	return 0;
> +}
> +
> +/**
> + * amdgpu_sysvm_fini - tear down the driver info for managing the gart
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Tear down the gart driver info and free the dummy page (all asics).
> + */
> +void amdgpu_sysvm_fini(struct amdgpu_device *adev)
> +{
> +	if (adev->sysvm.ready) {
> +		/* unbind pages */
> +		amdgpu_sysvm_unbind(adev, 0, adev->sysvm.num_cpu_pages);
> +	}
> +	adev->sysvm.ready = false;
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	vfree(adev->sysvm.pages);
> +	adev->sysvm.pages = NULL;
> +#endif
> +	amdgpu_dummy_page_fini(adev);
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
> index d02e611..651712e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
> @@ -32,8 +32,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  {
>  	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
>  	struct amdgpu_bo *vram_obj = NULL;
> -	struct amdgpu_bo **gtt_obj = NULL;
> -	uint64_t gtt_addr, vram_addr;
> +	struct amdgpu_bo **sysvm_obj = NULL;
> +	uint64_t sysvm_addr, vram_addr;
>  	unsigned n, size;
>  	int i, r;
>  
> @@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  	/* Number of tests =
>  	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
>  	 */
> -	n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
> +	n = adev->mc.sysvm_size - AMDGPU_IB_POOL_SIZE*64*1024;
>  	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
>  		if (adev->rings[i])
>  			n -= adev->rings[i]->ring_size;
> @@ -52,8 +52,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  		n -= adev->irq.ih.ring_size;
>  	n /= size;
>  
> -	gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
> -	if (!gtt_obj) {
> +	sysvm_obj = kzalloc(n * sizeof(*sysvm_obj), GFP_KERNEL);
> +	if (!sysvm_obj) {
>  		DRM_ERROR("Failed to allocate %d pointers\n", n);
>  		r = 1;
>  		goto out_cleanup;
> @@ -75,42 +75,42 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  		goto out_unres;
>  	}
>  	for (i = 0; i < n; i++) {
> -		void *gtt_map, *vram_map;
> -		void **gtt_start, **gtt_end;
> +		void *sysvm_map, *vram_map;
> +		void **sysvm_start, **sysvm_end;
>  		void **vram_start, **vram_end;
>  		struct dma_fence *fence = NULL;
>  
>  		r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
>  				     AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
> -				     NULL, gtt_obj + i);
> +				     NULL, sysvm_obj + i);
>  		if (r) {
>  			DRM_ERROR("Failed to create GTT object %d\n", i);
>  			goto out_lclean;
>  		}
>  
> -		r = amdgpu_bo_reserve(gtt_obj[i], false);
> +		r = amdgpu_bo_reserve(sysvm_obj[i], false);
>  		if (unlikely(r != 0))
>  			goto out_lclean_unref;
> -		r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
> +		r = amdgpu_bo_pin(sysvm_obj[i], AMDGPU_GEM_DOMAIN_GTT, &sysvm_addr);
>  		if (r) {
>  			DRM_ERROR("Failed to pin GTT object %d\n", i);
>  			goto out_lclean_unres;
>  		}
>  
> -		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
> +		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
>  		if (r) {
>  			DRM_ERROR("Failed to map GTT object %d\n", i);
>  			goto out_lclean_unpin;
>  		}
>  
> -		for (gtt_start = gtt_map, gtt_end = gtt_map + size;
> -		     gtt_start < gtt_end;
> -		     gtt_start++)
> -			*gtt_start = gtt_start;
> +		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size;
> +		     sysvm_start < sysvm_end;
> +		     sysvm_start++)
> +			*sysvm_start = sysvm_start;
>  
> -		amdgpu_bo_kunmap(gtt_obj[i]);
> +		amdgpu_bo_kunmap(sysvm_obj[i]);
>  
> -		r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
> +		r = amdgpu_copy_buffer(ring, sysvm_addr, vram_addr,
>  				       size, NULL, &fence, false, false);
>  
>  		if (r) {
> @@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  			goto out_lclean_unpin;
>  		}
>  
> -		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
> +		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
>  		     vram_start = vram_map, vram_end = vram_map + size;
>  		     vram_start < vram_end;
> -		     gtt_start++, vram_start++) {
> -			if (*vram_start != gtt_start) {
> +		     sysvm_start++, vram_start++) {
> +			if (*vram_start != sysvm_start) {
>  				DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
>  					  "expected 0x%p (GTT/VRAM offset "
>  					  "0x%16llx/0x%16llx)\n",
> -					  i, *vram_start, gtt_start,
> +					  i, *vram_start, sysvm_start,
>  					  (unsigned long long)
> -					  (gtt_addr - adev->mc.gtt_start +
> -					   (void*)gtt_start - gtt_map),
> +					  (sysvm_addr - adev->mc.sysvm_start +
> +					   (void*)sysvm_start - sysvm_map),
>  					  (unsigned long long)
>  					  (vram_addr - adev->mc.vram_start +
> -					   (void*)gtt_start - gtt_map));
> +					   (void*)sysvm_start - sysvm_map));
>  				amdgpu_bo_kunmap(vram_obj);
>  				goto out_lclean_unpin;
>  			}
> @@ -155,7 +155,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  
>  		amdgpu_bo_kunmap(vram_obj);
>  
> -		r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
> +		r = amdgpu_copy_buffer(ring, vram_addr, sysvm_addr,
>  				       size, NULL, &fence, false, false);
>  
>  		if (r) {
> @@ -171,49 +171,49 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  
>  		dma_fence_put(fence);
>  
> -		r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
> +		r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
>  		if (r) {
>  			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
>  			goto out_lclean_unpin;
>  		}
>  
> -		for (gtt_start = gtt_map, gtt_end = gtt_map + size,
> +		for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
>  		     vram_start = vram_map, vram_end = vram_map + size;
> -		     gtt_start < gtt_end;
> -		     gtt_start++, vram_start++) {
> -			if (*gtt_start != vram_start) {
> +		     sysvm_start < sysvm_end;
> +		     sysvm_start++, vram_start++) {
> +			if (*sysvm_start != vram_start) {
>  				DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
>  					  "expected 0x%p (VRAM/GTT offset "
>  					  "0x%16llx/0x%16llx)\n",
> -					  i, *gtt_start, vram_start,
> +					  i, *sysvm_start, vram_start,
>  					  (unsigned long long)
>  					  (vram_addr - adev->mc.vram_start +
>  					   (void*)vram_start - vram_map),
>  					  (unsigned long long)
> -					  (gtt_addr - adev->mc.gtt_start +
> +					  (sysvm_addr - adev->mc.sysvm_start +
>  					   (void*)vram_start - vram_map));
> -				amdgpu_bo_kunmap(gtt_obj[i]);
> +				amdgpu_bo_kunmap(sysvm_obj[i]);
>  				goto out_lclean_unpin;
>  			}
>  		}
>  
> -		amdgpu_bo_kunmap(gtt_obj[i]);
> +		amdgpu_bo_kunmap(sysvm_obj[i]);
>  
>  		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
> -			 gtt_addr - adev->mc.gtt_start);
> +			 sysvm_addr - adev->mc.sysvm_start);
>  		continue;
>  
>  out_lclean_unpin:
> -		amdgpu_bo_unpin(gtt_obj[i]);
> +		amdgpu_bo_unpin(sysvm_obj[i]);
>  out_lclean_unres:
> -		amdgpu_bo_unreserve(gtt_obj[i]);
> +		amdgpu_bo_unreserve(sysvm_obj[i]);
>  out_lclean_unref:
> -		amdgpu_bo_unref(&gtt_obj[i]);
> +		amdgpu_bo_unref(&sysvm_obj[i]);
>  out_lclean:
>  		for (--i; i >= 0; --i) {
> -			amdgpu_bo_unpin(gtt_obj[i]);
> -			amdgpu_bo_unreserve(gtt_obj[i]);
> -			amdgpu_bo_unref(&gtt_obj[i]);
> +			amdgpu_bo_unpin(sysvm_obj[i]);
> +			amdgpu_bo_unreserve(sysvm_obj[i]);
> +			amdgpu_bo_unref(&sysvm_obj[i]);
>  		}
>  		if (fence)
>  			dma_fence_put(fence);
> @@ -226,7 +226,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>  out_unref:
>  	amdgpu_bo_unref(&vram_obj);
>  out_cleanup:
> -	kfree(gtt_obj);
> +	kfree(sysvm_obj);
>  	if (r) {
>  		pr_warn("Error while testing BO move\n");
>  	}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 5c7a6c5..9240357 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -102,7 +102,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
>  		goto error_bo;
>  	}
>  
> -	mutex_init(&adev->mman.gtt_window_lock);
> +	mutex_init(&adev->mman.sysvm_window_lock);
>  
>  	ring = adev->mman.buffer_funcs_ring;
>  	rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
> @@ -130,7 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
>  	if (adev->mman.mem_global_referenced) {
>  		amd_sched_entity_fini(adev->mman.entity.sched,
>  				      &adev->mman.entity);
> -		mutex_destroy(&adev->mman.gtt_window_lock);
> +		mutex_destroy(&adev->mman.sysvm_window_lock);
>  		drm_global_item_unref(&adev->mman.bo_global_ref.ref);
>  		drm_global_item_unref(&adev->mman.mem_global_ref);
>  		adev->mman.mem_global_referenced = false;
> @@ -158,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
>  		break;
>  	case TTM_PL_TT:
>  		man->func = &amdgpu_gtt_mgr_func;
> -		man->gpu_offset = adev->mc.gtt_start;
> +		man->gpu_offset = adev->mc.sysvm_start;
>  		man->available_caching = TTM_PL_MASK_CACHING;
>  		man->default_caching = TTM_PL_FLAG_CACHED;
>  		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
> @@ -289,7 +289,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>  	new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
>  
>  	num_pages = new_mem->num_pages;
> -	mutex_lock(&adev->mman.gtt_window_lock);
> +	mutex_lock(&adev->mman.sysvm_window_lock);
>  	while (num_pages) {
>  		unsigned long cur_pages = min(min(old_size, new_size),
>  					      (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
> @@ -341,14 +341,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>  			new_start += cur_pages * PAGE_SIZE;
>  		}
>  	}
> -	mutex_unlock(&adev->mman.gtt_window_lock);
> +	mutex_unlock(&adev->mman.sysvm_window_lock);
>  
>  	r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
>  	dma_fence_put(fence);
>  	return r;
>  
>  error:
> -	mutex_unlock(&adev->mman.gtt_window_lock);
> +	mutex_unlock(&adev->mman.sysvm_window_lock);
>  
>  	if (fence)
>  		dma_fence_wait(fence, false);
> @@ -693,10 +693,10 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
>  	uint64_t flags;
>  	int r;
>  
> -	spin_lock(&gtt->adev->gtt_list_lock);
> +	spin_lock(&gtt->adev->sysvm_list_lock);
>  	flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
>  	gtt->offset = (u64)mem->start << PAGE_SHIFT;
> -	r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
> +	r = amdgpu_sysvm_bind(gtt->adev, gtt->offset, ttm->num_pages,
>  		ttm->pages, gtt->ttm.dma_address, flags);
>  
>  	if (r) {
> @@ -705,9 +705,9 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
>  		goto error_gart_bind;
>  	}
>  
> -	list_add_tail(&gtt->list, &gtt->adev->gtt_list);
> +	list_add_tail(&gtt->list, &gtt->adev->sysvm_list);
>  error_gart_bind:
> -	spin_unlock(&gtt->adev->gtt_list_lock);
> +	spin_unlock(&gtt->adev->sysvm_list_lock);
>  	return r;
>  
>  }
> @@ -774,20 +774,20 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
>  	int r;
>  
>  	bo_mem.mem_type = TTM_PL_TT;
> -	spin_lock(&adev->gtt_list_lock);
> -	list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
> +	spin_lock(&adev->sysvm_list_lock);
> +	list_for_each_entry_safe(gtt, tmp, &adev->sysvm_list, list) {
>  		flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
> -		r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
> +		r = amdgpu_sysvm_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
>  				     gtt->ttm.ttm.pages, gtt->ttm.dma_address,
>  				     flags);
>  		if (r) {
> -			spin_unlock(&adev->gtt_list_lock);
> +			spin_unlock(&adev->sysvm_list_lock);
>  			DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
>  				  gtt->ttm.ttm.num_pages, gtt->offset);
>  			return r;
>  		}
>  	}
> -	spin_unlock(&adev->gtt_list_lock);
> +	spin_unlock(&adev->sysvm_list_lock);
>  	return 0;
>  }
>  
> @@ -803,8 +803,8 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
>  		return 0;
>  
>  	/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
> -	spin_lock(&gtt->adev->gtt_list_lock);
> -	r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
> +	spin_lock(&gtt->adev->sysvm_list_lock);
> +	r = amdgpu_sysvm_unbind(gtt->adev, gtt->offset, ttm->num_pages);
>  	if (r) {
>  		DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
>  			  gtt->ttm.ttm.num_pages, gtt->offset);
> @@ -812,7 +812,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
>  	}
>  	list_del_init(&gtt->list);
>  error_unbind:
> -	spin_unlock(&gtt->adev->gtt_list_lock);
> +	spin_unlock(&gtt->adev->sysvm_list_lock);
>  	return r;
>  }
>  
> @@ -1037,7 +1037,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
>  			flags |= AMDGPU_PTE_SNOOPED;
>  	}
>  
> -	flags |= adev->gart.gart_pte_flags;
> +	flags |= adev->sysvm.sysvm_pte_flags;
>  	flags |= AMDGPU_PTE_READABLE;
>  
>  	if (!amdgpu_ttm_tt_is_readonly(ttm))
> @@ -1144,13 +1144,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
>  	DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
>  		 (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
>  	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
> -				adev->mc.gtt_size >> PAGE_SHIFT);
> +				adev->mc.sysvm_size >> PAGE_SHIFT);
>  	if (r) {
>  		DRM_ERROR("Failed initializing GTT heap.\n");
>  		return r;
>  	}
>  	DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
> -		 (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
> +		 (unsigned)(adev->mc.sysvm_size / (1024 * 1024)));
>  
>  	adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
>  	adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
> @@ -1223,7 +1223,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
>  	if (adev->gds.oa.total_size)
>  		ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
>  	ttm_bo_device_release(&adev->mman.bdev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_fini(adev);
>  	amdgpu_ttm_global_fini(adev);
>  	adev->mman.initialized = false;
>  	DRM_INFO("amdgpu: ttm finalized\n");
> @@ -1279,7 +1279,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>  	BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
>  	       AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
>  
> -	*addr = adev->mc.gtt_start;
> +	*addr = adev->mc.sysvm_start;
>  	*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
>  		AMDGPU_GPU_PAGE_SIZE;
>  
> @@ -1296,7 +1296,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>  	src_addr = num_dw * 4;
>  	src_addr += job->ibs[0].gpu_addr;
>  
> -	dst_addr = adev->gart.table_addr;
> +	dst_addr = adev->sysvm.table_addr;
>  	dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
>  	amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
>  				dst_addr, num_bytes);
> @@ -1306,7 +1306,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>  
>  	dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
>  	flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
> -	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
> +	r = amdgpu_sysvm_map(adev, 0, num_pages, dma_address, flags,
>  			    &job->ibs[0].ptr[num_dw]);
>  	if (r)
>  		goto error_free;
> @@ -1522,7 +1522,7 @@ static int ttm_pl_tt = TTM_PL_TT;
>  
>  static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
>  	{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
> -	{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
> +	{"amdgpu_sysvm_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
>  	{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
>  #ifdef CONFIG_SWIOTLB
>  	{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
> @@ -1574,9 +1574,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
>  	.llseek = default_llseek
>  };
>  
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>  
> -static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
> +static ssize_t amdgpu_ttm_sysvm_read(struct file *f, char __user *buf,
>  				   size_t size, loff_t *pos)
>  {
>  	struct amdgpu_device *adev = file_inode(f)->i_private;
> @@ -1590,16 +1590,16 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>  		struct page *page;
>  		void *ptr;
>  
> -		if (p >= adev->gart.num_cpu_pages)
> +		if (p >= adev->sysvm.num_cpu_pages)
>  			return result;
>  
> -		page = adev->gart.pages[p];
> +		page = adev->sysvm.pages[p];
>  		if (page) {
>  			ptr = kmap(page);
>  			ptr += off;
>  
>  			r = copy_to_user(buf, ptr, cur_size);
> -			kunmap(adev->gart.pages[p]);
> +			kunmap(adev->sysvm.pages[p]);
>  		} else
>  			r = clear_user(buf, cur_size);
>  
> @@ -1615,9 +1615,9 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>  	return result;
>  }
>  
> -static const struct file_operations amdgpu_ttm_gtt_fops = {
> +static const struct file_operations amdgpu_ttm_sysvm_fops = {
>  	.owner = THIS_MODULE,
> -	.read = amdgpu_ttm_gtt_read,
> +	.read = amdgpu_ttm_sysvm_read,
>  	.llseek = default_llseek
>  };
>  
> @@ -1640,12 +1640,12 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
>  	i_size_write(ent->d_inode, adev->mc.mc_vram_size);
>  	adev->mman.vram = ent;
>  
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> -	ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
> -				  adev, &amdgpu_ttm_gtt_fops);
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +	ent = debugfs_create_file("amdgpu_sysvm", S_IFREG | S_IRUGO, root,
> +				  adev, &amdgpu_ttm_sysvm_fops);
>  	if (IS_ERR(ent))
>  		return PTR_ERR(ent);
> -	i_size_write(ent->d_inode, adev->mc.gtt_size);
> +	i_size_write(ent->d_inode, adev->mc.sysvm_size);
>  	adev->mman.gtt = ent;
>  
>  #endif
> @@ -1670,7 +1670,7 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
>  	debugfs_remove(adev->mman.vram);
>  	adev->mman.vram = NULL;
>  
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>  	debugfs_remove(adev->mman.gtt);
>  	adev->mman.gtt = NULL;
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index 4f5c1da..1443038 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -53,7 +53,7 @@ struct amdgpu_mman {
>  	const struct amdgpu_buffer_funcs	*buffer_funcs;
>  	struct amdgpu_ring			*buffer_funcs_ring;
>  
> -	struct mutex				gtt_window_lock;
> +	struct mutex				sysvm_window_lock;
>  	/* Scheduler entity for buffer moves */
>  	struct amd_sched_entity			entity;
>  };
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 1d1810d..8dbacec 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -982,14 +982,14 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
>  		value = params->pages_addr ?
>  			amdgpu_vm_map_gart(params->pages_addr, addr) :
>  			addr;
> -		amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
> +		amdgpu_sysvm_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
>  					i, value, flags);
>  		addr += incr;
>  	}
>  
>  	/* Flush HDP */
>  	mb();
> -	amdgpu_gart_flush_gpu_tlb(params->adev, 0);
> +	amdgpu_sysvm_flush_gpu_tlb(params->adev, 0);
>  }
>  
>  static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
> @@ -1101,7 +1101,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
>  		}
>  
>  		pt = amdgpu_bo_gpu_offset(bo);
> -		pt = amdgpu_gart_get_vm_pde(adev, pt);
> +		pt = amdgpu_sysvm_get_vm_pde(adev, pt);
>  		if (parent->entries[pt_idx].addr == pt)
>  			continue;
>  
> @@ -1564,7 +1564,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>   *
>   * @adev: amdgpu_device pointer
>   * @exclusive: fence we need to sync to
> - * @gtt_flags: flags as they are used for GTT
> + * @sysvm_flags: flags as they are used in the SYSVM
>   * @pages_addr: DMA addresses to use for mapping
>   * @vm: requested vm
>   * @mapping: mapped range and flags to use for the update
> @@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>   */
>  static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
>  				      struct dma_fence *exclusive,
> -				      uint64_t gtt_flags,
> +				      uint64_t sysvm_flags,
>  				      dma_addr_t *pages_addr,
>  				      struct amdgpu_vm *vm,
>  				      struct amdgpu_bo_va_mapping *mapping,
> @@ -1633,8 +1633,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
>  		}
>  
>  		if (pages_addr) {
> -			if (flags == gtt_flags)
> -				src = adev->gart.table_addr +
> +			if (flags == sysvm_flags)
> +				src = adev->sysvm.table_addr +
>  					(addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
>  			else
>  				max_entries = min(max_entries, 16ull * 1024ull);
> @@ -1681,7 +1681,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>  	struct amdgpu_vm *vm = bo_va->vm;
>  	struct amdgpu_bo_va_mapping *mapping;
>  	dma_addr_t *pages_addr = NULL;
> -	uint64_t gtt_flags, flags;
> +	uint64_t sysvm_flags, flags;
>  	struct ttm_mem_reg *mem;
>  	struct drm_mm_node *nodes;
>  	struct dma_fence *exclusive;
> @@ -1706,12 +1706,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>  
>  	if (bo_va->bo) {
>  		flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
> -		gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
> +		sysvm_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
>  			adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
>  			flags : 0;
>  	} else {
>  		flags = 0x0;
> -		gtt_flags = ~0x0;
> +		sysvm_flags = ~0x0;
>  	}
>  
>  	spin_lock(&vm->status_lock);
> @@ -1721,7 +1721,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>  
>  	list_for_each_entry(mapping, &bo_va->invalids, list) {
>  		r = amdgpu_vm_bo_split_mapping(adev, exclusive,
> -					       gtt_flags, pages_addr, vm,
> +					       sysvm_flags, pages_addr, vm,
>  					       mapping, flags, nodes,
>  					       &bo_va->last_pt_update);
>  		if (r)
> @@ -1756,7 +1756,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
>  
>  	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
>  	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
> -	adev->gart.gart_funcs->set_prt(adev, enable);
> +	adev->sysvm.sysvm_funcs->set_prt(adev, enable);
>  	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
>  }
>  
> @@ -1765,7 +1765,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
>   */
>  static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
>  {
> -	if (!adev->gart.gart_funcs->set_prt)
> +	if (!adev->sysvm.sysvm_funcs->set_prt)
>  		return;
>  
>  	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
> @@ -1800,7 +1800,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
>  {
>  	struct amdgpu_prt_cb *cb;
>  
> -	if (!adev->gart.gart_funcs->set_prt)
> +	if (!adev->sysvm.sysvm_funcs->set_prt)
>  		return;
>  
>  	cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
> @@ -2498,7 +2498,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
>  void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>  {
>  	struct amdgpu_bo_va_mapping *mapping, *tmp;
> -	bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
> +	bool prt_fini_needed = !!adev->sysvm.sysvm_funcs->set_prt;
>  	int i;
>  
>  	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index 6986285..708fb84 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -3731,10 +3731,10 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>  	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	gfx_v9_0_write_data_to_reg(ring, usepfp, true,
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
> index a42f483..1290434 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
> @@ -40,8 +40,8 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
>  {
>  	uint64_t value;
>  
> -	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
> -	value = adev->gart.table_addr - adev->mc.vram_start
> +	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
> +	value = adev->sysvm.table_addr - adev->mc.vram_start
>  		+ adev->vm_manager.vram_base_offset;
>  	value &= 0x0000FFFFFFFFF000ULL;
>  	value |= 0x1; /*valid bit*/
> @@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
>  	gfxhub_v1_0_init_gart_pt_regs(adev);
>  
>  	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
> -		     (u32)(adev->mc.gtt_start >> 12));
> +		     (u32)(adev->mc.sysvm_start >> 12));
>  	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
> -		     (u32)(adev->mc.gtt_start >> 44));
> +		     (u32)(adev->mc.sysvm_start >> 44));
>  
>  	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
> -		     (u32)(adev->mc.gtt_end >> 12));
> +		     (u32)(adev->mc.sysvm_end >> 12));
>  	WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
> -		     (u32)(adev->mc.gtt_end >> 44));
> +		     (u32)(adev->mc.sysvm_end >> 44));
>  }
>  
>  static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
> @@ -228,7 +228,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
>  	}
>  }
>  
> -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
> +int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	if (amdgpu_sriov_vf(adev)) {
>  		/*
> @@ -256,7 +256,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
> +void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	u32 tmp;
>  	u32 i;
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
> index d2dbb08..d194b7e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
> +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
> @@ -24,8 +24,8 @@
>  #ifndef __GFXHUB_V1_0_H__
>  #define __GFXHUB_V1_0_H__
>  
> -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev);
> -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev);
> +int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
> +void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
>  void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
>  					  bool value);
>  void gfxhub_v1_0_init(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> index 5ed6788f..53c3b8a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> @@ -36,7 +36,7 @@
>  #include "dce/dce_6_0_sh_mask.h"
>  #include "si_enums.h"
>  
> -static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
> +static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev);
>  static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
>  static int gmc_v6_0_wait_for_idle(void *handle);
>  
> @@ -216,7 +216,7 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
> +static void gmc_v6_0_vram_sysvm_location(struct amdgpu_device *adev,
>  				       struct amdgpu_mc *mc)
>  {
>  	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
> @@ -228,8 +228,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
>  		mc->mc_vram_size = 0xFFC0000000ULL;
>  	}
>  	amdgpu_vram_location(adev, &adev->mc, base);
> -	adev->mc.gtt_base_align = 0;
> -	amdgpu_gtt_location(adev, mc);
> +	adev->mc.sysvm_base_align = 0;
> +	amdgpu_sysvm_location(adev, mc);
>  }
>  
>  static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
> @@ -321,8 +321,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
>  	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
>  	adev->mc.visible_vram_size = adev->mc.aper_size;
>  
> -	amdgpu_gart_set_defaults(adev);
> -	gmc_v6_0_vram_gtt_location(adev, &adev->mc);
> +	amdgpu_sysvm_set_defaults(adev);
> +	gmc_v6_0_vram_sysvm_location(adev, &adev->mc);
>  
>  	return 0;
>  }
> @@ -447,15 +447,15 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
>  	}
>  }
>  
> -static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
> +static int gmc_v6_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	int r, i;
>  
> -	if (adev->gart.robj == NULL) {
> +	if (adev->sysvm.robj == NULL) {
>  		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>  		return -EINVAL;
>  	}
> -	r = amdgpu_gart_table_vram_pin(adev);
> +	r = amdgpu_sysvm_table_vram_pin(adev);
>  	if (r)
>  		return r;
>  	/* Setup TLB control */
> @@ -482,9 +482,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>  	       (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
>  	       (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
>  	/* setup context0 */
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>  	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>  			(u32)(adev->dummy_page.addr >> 12));
>  	WREG32(mmVM_CONTEXT0_CNTL2, 0);
> @@ -508,10 +508,10 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>  	for (i = 1; i < 16; i++) {
>  		if (i < 8)
>  			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  		else
>  			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  	}
>  
>  	/* enable context1-15 */
> @@ -530,9 +530,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>  
>  	gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
>  	dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
> -		 (unsigned)(adev->mc.gtt_size >> 20),
> -		 (unsigned long long)adev->gart.table_addr);
> -	adev->gart.ready = true;
> +		 (unsigned)(adev->mc.sysvm_size >> 20),
> +		 (unsigned long long)adev->sysvm.table_addr);
> +	adev->sysvm.ready = true;
>  	return 0;
>  }
>  
> @@ -540,19 +540,19 @@ static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
>  {
>  	int r;
>  
> -	if (adev->gart.robj) {
> +	if (adev->sysvm.robj) {
>  		dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
>  		return 0;
>  	}
> -	r = amdgpu_gart_init(adev);
> +	r = amdgpu_sysvm_init(adev);
>  	if (r)
>  		return r;
> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
> -	adev->gart.gart_pte_flags = 0;
> -	return amdgpu_gart_table_vram_alloc(adev);
> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
> +	adev->sysvm.sysvm_pte_flags = 0;
> +	return amdgpu_sysvm_table_vram_alloc(adev);
>  }
>  
> -static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
> +static void gmc_v6_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	/*unsigned i;
>  
> @@ -582,13 +582,13 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
>  	WREG32(mmVM_L2_CNTL3,
>  	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
>  	       (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
> -	amdgpu_gart_table_vram_unpin(adev);
> +	amdgpu_sysvm_table_vram_unpin(adev);
>  }
>  
>  static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
>  {
> -	amdgpu_gart_table_vram_free(adev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_table_vram_free(adev);
> +	amdgpu_sysvm_fini(adev);
>  }
>  
>  static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
> @@ -762,7 +762,7 @@ static int gmc_v6_0_early_init(void *handle)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
> -	gmc_v6_0_set_gart_funcs(adev);
> +	gmc_v6_0_set_sysvm_funcs(adev);
>  	gmc_v6_0_set_irq_funcs(adev);
>  
>  	return 0;
> @@ -889,7 +889,7 @@ static int gmc_v6_0_hw_init(void *handle)
>  		}
>  	}
>  
> -	r = gmc_v6_0_gart_enable(adev);
> +	r = gmc_v6_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> @@ -901,7 +901,7 @@ static int gmc_v6_0_hw_fini(void *handle)
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
>  	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
> -	gmc_v6_0_gart_disable(adev);
> +	gmc_v6_0_sysvm_disable(adev);
>  
>  	return 0;
>  }
> @@ -1095,7 +1095,7 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
>  	.set_powergating_state = gmc_v6_0_set_powergating_state,
>  };
>  
> -static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
> +static const struct amdgpu_sysvm_funcs gmc_v6_0_sysvm_funcs = {
>  	.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
>  	.set_pte_pde = gmc_v6_0_gart_set_pte_pde,
>  	.set_prt = gmc_v6_0_set_prt,
> @@ -1108,10 +1108,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
>  	.process = gmc_v6_0_process_interrupt,
>  };
>  
> -static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
> +static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev)
>  {
> -	if (adev->gart.gart_funcs == NULL)
> -		adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
> +	if (adev->sysvm.sysvm_funcs == NULL)
> +		adev->sysvm.sysvm_funcs = &gmc_v6_0_sysvm_funcs;
>  }
>  
>  static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> index 15f2c0f..2329bdb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
> @@ -39,7 +39,7 @@
>  
>  #include "amdgpu_atombios.h"
>  
> -static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
> +static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev);
>  static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
>  static int gmc_v7_0_wait_for_idle(void *handle);
>  
> @@ -231,7 +231,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
> +static void gmc_v7_0_vram_sysvm_location(struct amdgpu_device *adev,
>  				       struct amdgpu_mc *mc)
>  {
>  	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
> @@ -244,8 +244,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
>  		mc->mc_vram_size = 0xFFC0000000ULL;
>  	}
>  	amdgpu_vram_location(adev, &adev->mc, base);
> -	adev->mc.gtt_base_align = 0;
> -	amdgpu_gtt_location(adev, mc);
> +	adev->mc.sysvm_base_align = 0;
> +	amdgpu_sysvm_location(adev, mc);
>  }
>  
>  /**
> @@ -373,8 +373,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
>  	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>  		adev->mc.visible_vram_size = adev->mc.real_vram_size;
>  
> -	amdgpu_gart_set_defaults(adev);
> -	gmc_v7_0_vram_gtt_location(adev, &adev->mc);
> +	amdgpu_sysvm_set_defaults(adev);
> +	gmc_v7_0_vram_sysvm_location(adev, &adev->mc);
>  
>  	return 0;
>  }
> @@ -536,7 +536,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
>  }
>  
>  /**
> - * gmc_v7_0_gart_enable - gart enable
> + * gmc_v7_0_sysvm_enable - gart enable
>   *
>   * @adev: amdgpu_device pointer
>   *
> @@ -546,16 +546,16 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
>   * and GPUVM for FSA64 clients (CIK).
>   * Returns 0 for success, errors for failure.
>   */
> -static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
> +static int gmc_v7_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	int r, i;
>  	u32 tmp;
>  
> -	if (adev->gart.robj == NULL) {
> +	if (adev->sysvm.robj == NULL) {
>  		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>  		return -EINVAL;
>  	}
> -	r = amdgpu_gart_table_vram_pin(adev);
> +	r = amdgpu_sysvm_table_vram_pin(adev);
>  	if (r)
>  		return r;
>  	/* Setup TLB control */
> @@ -585,9 +585,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>  	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
>  	WREG32(mmVM_L2_CNTL3, tmp);
>  	/* setup context0 */
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>  	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>  			(u32)(adev->dummy_page.addr >> 12));
>  	WREG32(mmVM_CONTEXT0_CNTL2, 0);
> @@ -611,10 +611,10 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>  	for (i = 1; i < 16; i++) {
>  		if (i < 8)
>  			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  		else
>  			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  	}
>  
>  	/* enable context1-15 */
> @@ -640,9 +640,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>  
>  	gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
>  	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
> -		 (unsigned)(adev->mc.gtt_size >> 20),
> -		 (unsigned long long)adev->gart.table_addr);
> -	adev->gart.ready = true;
> +		 (unsigned)(adev->mc.sysvm_size >> 20),
> +		 (unsigned long long)adev->sysvm.table_addr);
> +	adev->sysvm.ready = true;
>  	return 0;
>  }
>  
> @@ -650,27 +650,27 @@ static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
>  {
>  	int r;
>  
> -	if (adev->gart.robj) {
> +	if (adev->sysvm.robj) {
>  		WARN(1, "R600 PCIE GART already initialized\n");
>  		return 0;
>  	}
>  	/* Initialize common gart structure */
> -	r = amdgpu_gart_init(adev);
> +	r = amdgpu_sysvm_init(adev);
>  	if (r)
>  		return r;
> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
> -	adev->gart.gart_pte_flags = 0;
> -	return amdgpu_gart_table_vram_alloc(adev);
> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
> +	adev->sysvm.sysvm_pte_flags = 0;
> +	return amdgpu_sysvm_table_vram_alloc(adev);
>  }
>  
>  /**
> - * gmc_v7_0_gart_disable - gart disable
> + * gmc_v7_0_sysvm_disable - gart disable
>   *
>   * @adev: amdgpu_device pointer
>   *
>   * This disables all VM page table (CIK).
>   */
> -static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
> +static void gmc_v7_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	u32 tmp;
>  
> @@ -688,7 +688,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>  	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
>  	WREG32(mmVM_L2_CNTL, tmp);
>  	WREG32(mmVM_L2_CNTL2, 0);
> -	amdgpu_gart_table_vram_unpin(adev);
> +	amdgpu_sysvm_table_vram_unpin(adev);
>  }
>  
>  /**
> @@ -700,8 +700,8 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>   */
>  static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
>  {
> -	amdgpu_gart_table_vram_free(adev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_table_vram_free(adev);
> +	amdgpu_sysvm_fini(adev);
>  }
>  
>  /**
> @@ -885,7 +885,7 @@ static int gmc_v7_0_early_init(void *handle)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
> -	gmc_v7_0_set_gart_funcs(adev);
> +	gmc_v7_0_set_sysvm_funcs(adev);
>  	gmc_v7_0_set_irq_funcs(adev);
>  
>  	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
> @@ -1036,7 +1036,7 @@ static int gmc_v7_0_hw_init(void *handle)
>  		}
>  	}
>  
> -	r = gmc_v7_0_gart_enable(adev);
> +	r = gmc_v7_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> @@ -1048,7 +1048,7 @@ static int gmc_v7_0_hw_fini(void *handle)
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
>  	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
> -	gmc_v7_0_gart_disable(adev);
> +	gmc_v7_0_sysvm_disable(adev);
>  
>  	return 0;
>  }
> @@ -1270,7 +1270,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
>  	.set_powergating_state = gmc_v7_0_set_powergating_state,
>  };
>  
> -static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
> +static const struct amdgpu_sysvm_funcs gmc_v7_0_sysvm_funcs = {
>  	.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
>  	.set_pte_pde = gmc_v7_0_gart_set_pte_pde,
>  	.set_prt = gmc_v7_0_set_prt,
> @@ -1283,10 +1283,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
>  	.process = gmc_v7_0_process_interrupt,
>  };
>  
> -static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
> +static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev)
>  {
> -	if (adev->gart.gart_funcs == NULL)
> -		adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
> +	if (adev->sysvm.sysvm_funcs == NULL)
> +		adev->sysvm.sysvm_funcs = &gmc_v7_0_sysvm_funcs;
>  }
>  
>  static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> index 213af65..cf8f8d2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
> @@ -41,7 +41,7 @@
>  #include "amdgpu_atombios.h"
>  
>  
> -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
> +static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev);
>  static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
>  static int gmc_v8_0_wait_for_idle(void *handle);
>  
> @@ -393,7 +393,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
> +static void gmc_v8_0_vram_sysvm_location(struct amdgpu_device *adev,
>  				       struct amdgpu_mc *mc)
>  {
>  	u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
> @@ -406,8 +406,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
>  		mc->mc_vram_size = 0xFFC0000000ULL;
>  	}
>  	amdgpu_vram_location(adev, &adev->mc, base);
> -	adev->mc.gtt_base_align = 0;
> -	amdgpu_gtt_location(adev, mc);
> +	adev->mc.sysvm_base_align = 0;
> +	amdgpu_sysvm_location(adev, mc);
>  }
>  
>  /**
> @@ -535,8 +535,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
>  	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>  		adev->mc.visible_vram_size = adev->mc.real_vram_size;
>  
> -	amdgpu_gart_set_defaults(adev);
> -	gmc_v8_0_vram_gtt_location(adev, &adev->mc);
> +	amdgpu_sysvm_set_defaults(adev);
> +	gmc_v8_0_vram_sysvm_location(adev, &adev->mc);
>  
>  	return 0;
>  }
> @@ -722,7 +722,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
>  }
>  
>  /**
> - * gmc_v8_0_gart_enable - gart enable
> + * gmc_v8_0_sysvm_enable - gart enable
>   *
>   * @adev: amdgpu_device pointer
>   *
> @@ -732,16 +732,16 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
>   * and GPUVM for FSA64 clients (CIK).
>   * Returns 0 for success, errors for failure.
>   */
> -static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
> +static int gmc_v8_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	int r, i;
>  	u32 tmp;
>  
> -	if (adev->gart.robj == NULL) {
> +	if (adev->sysvm.robj == NULL) {
>  		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>  		return -EINVAL;
>  	}
> -	r = amdgpu_gart_table_vram_pin(adev);
> +	r = amdgpu_sysvm_table_vram_pin(adev);
>  	if (r)
>  		return r;
>  	/* Setup TLB control */
> @@ -787,9 +787,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>  	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
>  	WREG32(mmVM_L2_CNTL4, tmp);
>  	/* setup context0 */
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
> -	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>  	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>  			(u32)(adev->dummy_page.addr >> 12));
>  	WREG32(mmVM_CONTEXT0_CNTL2, 0);
> @@ -813,10 +813,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>  	for (i = 1; i < 16; i++) {
>  		if (i < 8)
>  			WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  		else
>  			WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
> -			       adev->gart.table_addr >> 12);
> +			       adev->sysvm.table_addr >> 12);
>  	}
>  
>  	/* enable context1-15 */
> @@ -843,9 +843,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>  
>  	gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
>  	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
> -		 (unsigned)(adev->mc.gtt_size >> 20),
> -		 (unsigned long long)adev->gart.table_addr);
> -	adev->gart.ready = true;
> +		 (unsigned)(adev->mc.sysvm_size >> 20),
> +		 (unsigned long long)adev->sysvm.table_addr);
> +	adev->sysvm.ready = true;
>  	return 0;
>  }
>  
> @@ -853,27 +853,27 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
>  {
>  	int r;
>  
> -	if (adev->gart.robj) {
> +	if (adev->sysvm.robj) {
>  		WARN(1, "R600 PCIE GART already initialized\n");
>  		return 0;
>  	}
>  	/* Initialize common gart structure */
> -	r = amdgpu_gart_init(adev);
> +	r = amdgpu_sysvm_init(adev);
>  	if (r)
>  		return r;
> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
> -	adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
> -	return amdgpu_gart_table_vram_alloc(adev);
> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
> +	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_EXECUTABLE;
> +	return amdgpu_sysvm_table_vram_alloc(adev);
>  }
>  
>  /**
> - * gmc_v8_0_gart_disable - gart disable
> + * gmc_v8_0_sysvm_disable - gart disable
>   *
>   * @adev: amdgpu_device pointer
>   *
>   * This disables all VM page table (CIK).
>   */
> -static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
> +static void gmc_v8_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	u32 tmp;
>  
> @@ -891,7 +891,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>  	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
>  	WREG32(mmVM_L2_CNTL, tmp);
>  	WREG32(mmVM_L2_CNTL2, 0);
> -	amdgpu_gart_table_vram_unpin(adev);
> +	amdgpu_sysvm_table_vram_unpin(adev);
>  }
>  
>  /**
> @@ -903,8 +903,8 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>   */
>  static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
>  {
> -	amdgpu_gart_table_vram_free(adev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_table_vram_free(adev);
> +	amdgpu_sysvm_fini(adev);
>  }
>  
>  /**
> @@ -962,7 +962,7 @@ static int gmc_v8_0_early_init(void *handle)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
> -	gmc_v8_0_set_gart_funcs(adev);
> +	gmc_v8_0_set_sysvm_funcs(adev);
>  	gmc_v8_0_set_irq_funcs(adev);
>  
>  	adev->mc.shared_aperture_start = 0x2000000000000000ULL;
> @@ -1128,7 +1128,7 @@ static int gmc_v8_0_hw_init(void *handle)
>  		}
>  	}
>  
> -	r = gmc_v8_0_gart_enable(adev);
> +	r = gmc_v8_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> @@ -1140,7 +1140,7 @@ static int gmc_v8_0_hw_fini(void *handle)
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
>  	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
> -	gmc_v8_0_gart_disable(adev);
> +	gmc_v8_0_sysvm_disable(adev);
>  
>  	return 0;
>  }
> @@ -1590,7 +1590,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
>  	.get_clockgating_state = gmc_v8_0_get_clockgating_state,
>  };
>  
> -static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
> +static const struct amdgpu_sysvm_funcs gmc_v8_0_sysvm_funcs = {
>  	.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
>  	.set_pte_pde = gmc_v8_0_gart_set_pte_pde,
>  	.set_prt = gmc_v8_0_set_prt,
> @@ -1603,10 +1603,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
>  	.process = gmc_v8_0_process_interrupt,
>  };
>  
> -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
> +static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev)
>  {
> -	if (adev->gart.gart_funcs == NULL)
> -		adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
> +	if (adev->sysvm.sysvm_funcs == NULL)
> +		adev->sysvm.sysvm_funcs = &gmc_v8_0_sysvm_funcs;
>  }
>  
>  static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index dbb43d9..f067465 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -365,7 +365,7 @@ static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
>  	return addr;
>  }
>  
> -static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
> +static const struct amdgpu_sysvm_funcs gmc_v9_0_sysvm_funcs = {
>  	.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
>  	.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
>  	.get_invalidate_req = gmc_v9_0_get_invalidate_req,
> @@ -373,17 +373,17 @@ static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
>  	.get_vm_pde = gmc_v9_0_get_vm_pde
>  };
>  
> -static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
> +static void gmc_v9_0_set_sysvm_funcs(struct amdgpu_device *adev)
>  {
> -	if (adev->gart.gart_funcs == NULL)
> -		adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
> +	if (adev->sysvm.sysvm_funcs == NULL)
> +		adev->sysvm.sysvm_funcs = &gmc_v9_0_sysvm_funcs;
>  }
>  
>  static int gmc_v9_0_early_init(void *handle)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  
> -	gmc_v9_0_set_gart_funcs(adev);
> +	gmc_v9_0_set_sysvm_funcs(adev);
>  	gmc_v9_0_set_irq_funcs(adev);
>  
>  	return 0;
> @@ -412,15 +412,15 @@ static int gmc_v9_0_late_init(void *handle)
>  	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
>  }
>  
> -static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
> +static void gmc_v9_0_vram_sysvm_location(struct amdgpu_device *adev,
>  					struct amdgpu_mc *mc)
>  {
>  	u64 base = 0;
>  	if (!amdgpu_sriov_vf(adev))
>  		base = mmhub_v1_0_get_fb_location(adev);
>  	amdgpu_vram_location(adev, &adev->mc, base);
> -	adev->mc.gtt_base_align = 0;
> -	amdgpu_gtt_location(adev, mc);
> +	adev->mc.sysvm_base_align = 0;
> +	amdgpu_sysvm_location(adev, mc);
>  	/* base offset of vram pages */
>  	if (adev->flags & AMD_IS_APU)
>  		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
> @@ -494,8 +494,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
>  	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>  		adev->mc.visible_vram_size = adev->mc.real_vram_size;
>  
> -	amdgpu_gart_set_defaults(adev);
> -	gmc_v9_0_vram_gtt_location(adev, &adev->mc);
> +	amdgpu_sysvm_set_defaults(adev);
> +	gmc_v9_0_vram_sysvm_location(adev, &adev->mc);
>  
>  	return 0;
>  }
> @@ -504,18 +504,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
>  {
>  	int r;
>  
> -	if (adev->gart.robj) {
> +	if (adev->sysvm.robj) {
>  		WARN(1, "VEGA10 PCIE GART already initialized\n");
>  		return 0;
>  	}
>  	/* Initialize common gart structure */
> -	r = amdgpu_gart_init(adev);
> +	r = amdgpu_sysvm_init(adev);
>  	if (r)
>  		return r;
> -	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
> -	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
> +	adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
> +	adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
>  				 AMDGPU_PTE_EXECUTABLE;
> -	return amdgpu_gart_table_vram_alloc(adev);
> +	return amdgpu_sysvm_table_vram_alloc(adev);
>  }
>  
>  static int gmc_v9_0_sw_init(void *handle)
> @@ -640,8 +640,8 @@ static int gmc_v9_0_sw_init(void *handle)
>   */
>  static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
>  {
> -	amdgpu_gart_table_vram_free(adev);
> -	amdgpu_gart_fini(adev);
> +	amdgpu_sysvm_table_vram_free(adev);
> +	amdgpu_sysvm_fini(adev);
>  }
>  
>  static int gmc_v9_0_sw_fini(void *handle)
> @@ -669,11 +669,11 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
>  }
>  
>  /**
> - * gmc_v9_0_gart_enable - gart enable
> + * gmc_v9_0_sysvm_enable - gart enable
>   *
>   * @adev: amdgpu_device pointer
>   */
> -static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
> +static int gmc_v9_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	int r;
>  	bool value;
> @@ -683,11 +683,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>  		golden_settings_vega10_hdp,
>  		(const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
>  
> -	if (adev->gart.robj == NULL) {
> +	if (adev->sysvm.robj == NULL) {
>  		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>  		return -EINVAL;
>  	}
> -	r = amdgpu_gart_table_vram_pin(adev);
> +	r = amdgpu_sysvm_table_vram_pin(adev);
>  	if (r)
>  		return r;
>  
> @@ -706,11 +706,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>  		break;
>  	}
>  
> -	r = gfxhub_v1_0_gart_enable(adev);
> +	r = gfxhub_v1_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> -	r = mmhub_v1_0_gart_enable(adev);
> +	r = mmhub_v1_0_sysvm_enable(adev);
>  	if (r)
>  		return r;
>  
> @@ -733,9 +733,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>  	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
>  
>  	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
> -		 (unsigned)(adev->mc.gtt_size >> 20),
> -		 (unsigned long long)adev->gart.table_addr);
> -	adev->gart.ready = true;
> +		 (unsigned)(adev->mc.sysvm_size >> 20),
> +		 (unsigned long long)adev->sysvm.table_addr);
> +	adev->sysvm.ready = true;
>  	return 0;
>  }
>  
> @@ -747,23 +747,23 @@ static int gmc_v9_0_hw_init(void *handle)
>  	/* The sequence of these two function calls matters.*/
>  	gmc_v9_0_init_golden_registers(adev);
>  
> -	r = gmc_v9_0_gart_enable(adev);
> +	r = gmc_v9_0_sysvm_enable(adev);
>  
>  	return r;
>  }
>  
>  /**
> - * gmc_v9_0_gart_disable - gart disable
> + * gmc_v9_0_sysvm_disable - gart disable
>   *
>   * @adev: amdgpu_device pointer
>   *
>   * This disables all VM page table.
>   */
> -static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
> +static void gmc_v9_0_sysvm_disable(struct amdgpu_device *adev)
>  {
> -	gfxhub_v1_0_gart_disable(adev);
> -	mmhub_v1_0_gart_disable(adev);
> -	amdgpu_gart_table_vram_unpin(adev);
> +	gfxhub_v1_0_sysvm_disable(adev);
> +	mmhub_v1_0_sysvm_disable(adev);
> +	amdgpu_sysvm_table_vram_unpin(adev);
>  }
>  
>  static int gmc_v9_0_hw_fini(void *handle)
> @@ -777,7 +777,7 @@ static int gmc_v9_0_hw_fini(void *handle)
>  	}
>  
>  	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
> -	gmc_v9_0_gart_disable(adev);
> +	gmc_v9_0_sysvm_disable(adev);
>  
>  	return 0;
>  }
> diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
> index 9804318..fbc8f6e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
> @@ -51,8 +51,8 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
>  {
>  	uint64_t value;
>  
> -	BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
> -	value = adev->gart.table_addr - adev->mc.vram_start +
> +	BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
> +	value = adev->sysvm.table_addr - adev->mc.vram_start +
>  		adev->vm_manager.vram_base_offset;
>  	value &= 0x0000FFFFFFFFF000ULL;
>  	value |= 0x1; /* valid bit */
> @@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
>  	mmhub_v1_0_init_gart_pt_regs(adev);
>  
>  	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
> -		     (u32)(adev->mc.gtt_start >> 12));
> +		     (u32)(adev->mc.sysvm_start >> 12));
>  	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
> -		     (u32)(adev->mc.gtt_start >> 44));
> +		     (u32)(adev->mc.sysvm_start >> 44));
>  
>  	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
> -		     (u32)(adev->mc.gtt_end >> 12));
> +		     (u32)(adev->mc.sysvm_end >> 12));
>  	WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
> -		     (u32)(adev->mc.gtt_end >> 44));
> +		     (u32)(adev->mc.sysvm_end >> 44));
>  }
>  
>  static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
> @@ -462,7 +462,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
>  	}
>  }
>  
> -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
> +int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
>  {
>  	if (amdgpu_sriov_vf(adev)) {
>  		/*
> @@ -490,7 +490,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
>  	return 0;
>  }
>  
> -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
> +void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
>  {
>  	u32 tmp;
>  	u32 i;
> diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
> index 57bb940..23128e5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
> +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
> @@ -24,8 +24,8 @@
>  #define __MMHUB_V1_0_H__
>  
>  u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
> -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
> -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
> +int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
> +void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
>  void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
>  					 bool value);
>  void mmhub_v1_0_init(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index 4a65697..056b169 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -1121,10 +1121,10 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  					 unsigned vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> index 987b958..95913fd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> @@ -1312,11 +1312,11 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  					unsigned vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	uint32_t data0, data1, mask;
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
> @@ -1353,10 +1353,10 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  			 unsigned int vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
> diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
> index 1ecd6bb..b869423 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
> @@ -952,10 +952,10 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
>  			 unsigned int vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> index 21e7b88..2ca49af 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> @@ -878,11 +878,11 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  					unsigned vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	uint32_t data0, data1, mask;
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
> @@ -1010,10 +1010,10 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
>  			 unsigned int vm_id, uint64_t pd_addr)
>  {
>  	struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
> -	uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
> +	uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>  	unsigned eng = ring->vm_inv_eng;
>  
> -	pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
> +	pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>  	pd_addr |= AMDGPU_PTE_VALID;
>  
>  	amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
       [not found]         ` <61109920-9d05-cb27-67b3-51a1b46b15bc-5C7GfCeVMHo@public.gmane.org>
@ 2017-07-05  0:57           ` Michel Dänzer
       [not found]             ` <06b47744-60a4-f79e-e120-60d7bcff8526-otUistvHUpPR7s880joybQ@public.gmane.org>
  2017-07-06 16:22           ` Alex Deucher
  1 sibling, 1 reply; 30+ messages in thread
From: Michel Dänzer @ 2017-07-05  0:57 UTC (permalink / raw)
  To: Felix Kuehling, Christian König
  Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 05/07/17 06:11 AM, Felix Kuehling wrote:
> I'm afraid this will lead to more confusion when talking to different
> teams in AMD. At least to me "GART" was always understood to be the
> system-wide address translation table (VMID-0). The remnant from the
> pre-GPUVM days. You're now calling that SYSVM, while all GPU-accessible
> system memory is still called GTT.
> 
> I think it would make more sense to call general GPU-mapped system
> memory "SYSVM" or "GPUVM", and only call the global VMID-0 page table
> "GART".
> 
> But of course that would require changes in TTM and all TTM drivers, so
> it's not a realistic option.

FWIW, the string "GART" doesn't appear anywhere in TTM, so I think your
idea is feasible.


-- 
Earthling Michel Dänzer               |               http://www.amd.com
Libre software enthusiast             |             Mesa and X developer
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
       [not found]             ` <06b47744-60a4-f79e-e120-60d7bcff8526-otUistvHUpPR7s880joybQ@public.gmane.org>
@ 2017-07-05  7:22               ` Christian König
  0 siblings, 0 replies; 30+ messages in thread
From: Christian König @ 2017-07-05  7:22 UTC (permalink / raw)
  To: Michel Dänzer, Felix Kuehling, Christian König
  Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 05.07.2017 um 02:57 schrieb Michel Dänzer:
> On 05/07/17 06:11 AM, Felix Kuehling wrote:
>> I'm afraid this will lead to more confusion when talking to different
>> teams in AMD. At least to me "GART" was always understood to be the
>> system-wide address translation table (VMID-0). The remnant from the
>> pre-GPUVM days. You're now calling that SYSVM, while all GPU-accessible
>> system memory is still called GTT.
>>
>> I think it would make more sense to call general GPU-mapped system
>> memory "SYSVM" or "GPUVM", and only call the global VMID-0 page table
>> "GART".
>>
>> But of course that would require changes in TTM and all TTM drivers, so
>> it's not a realistic option.
> FWIW, the string "GART" doesn't appear anywhere in TTM, so I think your
> idea is feasible.
Yeah, TTM calls the domain TT (for translation table I think). I think 
that this is a rather good naming, because it doesn't make any 
assumption what kind of translation table it is (AGP, GART, GPUVM etc...).

But using SYSVM or GPUVM as name for the TTM domain wouldn't work for 
the simple reason that this naming is completely AMD specific.

I've considered vmid0 as well, but that didn't looked good to me. 
"system VM" is at least what our hardware developers use in the VM 
documentation, but I'm not totally happy with using the name either.

So feel free to throw in ideas.

Regards,
Christian.
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 01/11] drm/amdgpu: reserve the first 2x512 of GART
       [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
                     ` (9 preceding siblings ...)
  2017-07-03  9:44   ` [PATCH 11/11] drm/amdgpu: add sysvm_size Christian König
@ 2017-07-06 16:15   ` Alex Deucher
  10 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:15 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> We want to use them as remap address space.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

2x512 pages.  With that updated:
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 5 ++++-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h     | 3 +++
>  2 files changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> index 1ef6255..f46a97d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> @@ -43,12 +43,15 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
>                                unsigned long p_size)
>  {
>         struct amdgpu_gtt_mgr *mgr;
> +       uint64_t start, size;
>
>         mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
>         if (!mgr)
>                 return -ENOMEM;
>
> -       drm_mm_init(&mgr->mm, 0, p_size);
> +       start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
> +       size = p_size - start;
> +       drm_mm_init(&mgr->mm, start, size);
>         spin_lock_init(&mgr->lock);
>         mgr->available = p_size;
>         man->priv = mgr;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index 776a20a..c8059f0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -34,6 +34,9 @@
>  #define AMDGPU_PL_FLAG_GWS             (TTM_PL_FLAG_PRIV << 1)
>  #define AMDGPU_PL_FLAG_OA              (TTM_PL_FLAG_PRIV << 2)
>
> +#define AMDGPU_GTT_MAX_TRANSFER_SIZE   512
> +#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS        2
> +
>  struct amdgpu_mman {
>         struct ttm_bo_global_ref        bo_global_ref;
>         struct drm_global_reference     mem_global_ref;
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 02/11] drm/amdgpu: add amdgpu_gart_map function v2
       [not found]     ` <1499075076-1851-2-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-06 16:16       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:16 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> This allows us to write the mapped PTEs into
> an IB instead of the table directly.
>
> v2: fix build with debugfs enabled, remove unused assignment
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h      |  3 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 62 ++++++++++++++++++++++++--------
>  2 files changed, 51 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 810796a..4a2b33d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -572,6 +572,9 @@ int amdgpu_gart_init(struct amdgpu_device *adev);
>  void amdgpu_gart_fini(struct amdgpu_device *adev);
>  int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
>                         int pages);
> +int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
> +                   int pages, dma_addr_t *dma_addr, uint64_t flags,
> +                   void *dst);
>  int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
>                      int pages, struct page **pagelist,
>                      dma_addr_t *dma_addr, uint64_t flags);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> index 8877015..c808388 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> @@ -280,6 +280,41 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
>  }
>
>  /**
> + * amdgpu_gart_map - map dma_addresses into GART entries
> + *
> + * @adev: amdgpu_device pointer
> + * @offset: offset into the GPU's gart aperture
> + * @pages: number of pages to bind
> + * @dma_addr: DMA addresses of pages
> + *
> + * Map the dma_addresses into GART entries (all asics).
> + * Returns 0 for success, -EINVAL for failure.
> + */
> +int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
> +                   int pages, dma_addr_t *dma_addr, uint64_t flags,
> +                   void *dst)
> +{
> +       uint64_t page_base;
> +       unsigned i, j, t;
> +
> +       if (!adev->gart.ready) {
> +               WARN(1, "trying to bind memory to uninitialized GART !\n");
> +               return -EINVAL;
> +       }
> +
> +       t = offset / AMDGPU_GPU_PAGE_SIZE;
> +
> +       for (i = 0; i < pages; i++) {
> +               page_base = dma_addr[i];
> +               for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> +                       amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
> +                       page_base += AMDGPU_GPU_PAGE_SIZE;
> +               }
> +       }
> +       return 0;
> +}
> +
> +/**
>   * amdgpu_gart_bind - bind pages into the gart page table
>   *
>   * @adev: amdgpu_device pointer
> @@ -296,31 +331,30 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
>                      int pages, struct page **pagelist, dma_addr_t *dma_addr,
>                      uint64_t flags)
>  {
> -       unsigned t;
> -       unsigned p;
> -       uint64_t page_base;
> -       int i, j;
> +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> +       unsigned i,t,p;
> +#endif
> +       int r;
>
>         if (!adev->gart.ready) {
>                 WARN(1, "trying to bind memory to uninitialized GART !\n");
>                 return -EINVAL;
>         }
>
> +#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>         t = offset / AMDGPU_GPU_PAGE_SIZE;
>         p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
> -
> -       for (i = 0; i < pages; i++, p++) {
> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
> +       for (i = 0; i < pages; i++, p++)
>                 adev->gart.pages[p] = pagelist[i];
>  #endif
> -               if (adev->gart.ptr) {
> -                       page_base = dma_addr[i];
> -                       for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
> -                               amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
> -                               page_base += AMDGPU_GPU_PAGE_SIZE;
> -                       }
> -               }
> +
> +       if (adev->gart.ptr) {
> +               r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
> +                           adev->gart.ptr);
> +               if (r)
> +                       return r;
>         }
> +
>         mb();
>         amdgpu_gart_flush_gpu_tlb(adev, 0);
>         return 0;
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 03/11] drm/amdgpu: use the GTT windows for BO moves v2
       [not found]     ` <1499075076-1851-3-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-06 16:17       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:17 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> This way we don't need to map the full BO at a time any more.
>
> v2: use fixed windows for src/dst
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 125 +++++++++++++++++++++++++++-----
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h |   2 +
>  2 files changed, 108 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 15148f1..1fc9866 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -47,10 +47,15 @@
>
>  #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
>
> +static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
> +                            struct ttm_mem_reg *mem, unsigned num_pages,
> +                            uint64_t offset, unsigned window,
> +                            struct amdgpu_ring *ring,
> +                            uint64_t *addr);
> +
>  static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
>  static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
>
> -
>  /*
>   * Global memory.
>   */
> @@ -97,6 +102,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
>                 goto error_bo;
>         }
>
> +       mutex_init(&adev->mman.gtt_window_lock);
> +
>         ring = adev->mman.buffer_funcs_ring;
>         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
>         r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
> @@ -123,6 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
>         if (adev->mman.mem_global_referenced) {
>                 amd_sched_entity_fini(adev->mman.entity.sched,
>                                       &adev->mman.entity);
> +               mutex_destroy(&adev->mman.gtt_window_lock);
>                 drm_global_item_unref(&adev->mman.bo_global_ref.ref);
>                 drm_global_item_unref(&adev->mman.mem_global_ref);
>                 adev->mman.mem_global_referenced = false;
> @@ -256,10 +264,13 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
>                                     struct drm_mm_node *mm_node,
>                                     struct ttm_mem_reg *mem)
>  {
> -       uint64_t addr;
> +       uint64_t addr = 0;
>
> -       addr = mm_node->start << PAGE_SHIFT;
> -       addr += bo->bdev->man[mem->mem_type].gpu_offset;
> +       if (mem->mem_type != TTM_PL_TT ||
> +           amdgpu_gtt_mgr_is_allocated(mem)) {
> +               addr = mm_node->start << PAGE_SHIFT;
> +               addr += bo->bdev->man[mem->mem_type].gpu_offset;
> +       }
>         return addr;
>  }
>
> @@ -284,34 +295,41 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>                 return -EINVAL;
>         }
>
> -       if (old_mem->mem_type == TTM_PL_TT) {
> -               r = amdgpu_ttm_bind(bo, old_mem);
> -               if (r)
> -                       return r;
> -       }
> -
>         old_mm = old_mem->mm_node;
>         old_size = old_mm->size;
>         old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
>
> -       if (new_mem->mem_type == TTM_PL_TT) {
> -               r = amdgpu_ttm_bind(bo, new_mem);
> -               if (r)
> -                       return r;
> -       }
> -
>         new_mm = new_mem->mm_node;
>         new_size = new_mm->size;
>         new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
>
>         num_pages = new_mem->num_pages;
> +       mutex_lock(&adev->mman.gtt_window_lock);
>         while (num_pages) {
> -               unsigned long cur_pages = min(old_size, new_size);
> +               unsigned long cur_pages = min(min(old_size, new_size),
> +                                             (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
> +               uint64_t from = old_start, to = new_start;
>                 struct dma_fence *next;
>
> -               r = amdgpu_copy_buffer(ring, old_start, new_start,
> +               if (old_mem->mem_type == TTM_PL_TT &&
> +                   !amdgpu_gtt_mgr_is_allocated(old_mem)) {
> +                       r = amdgpu_map_buffer(bo, old_mem, cur_pages,
> +                                             old_start, 0, ring, &from);
> +                       if (r)
> +                               goto error;
> +               }
> +
> +               if (new_mem->mem_type == TTM_PL_TT &&
> +                   !amdgpu_gtt_mgr_is_allocated(new_mem)) {
> +                       r = amdgpu_map_buffer(bo, new_mem, cur_pages,
> +                                             new_start, 1, ring, &to);
> +                       if (r)
> +                               goto error;
> +               }
> +
> +               r = amdgpu_copy_buffer(ring, from, to,
>                                        cur_pages * PAGE_SIZE,
> -                                      bo->resv, &next, false, false);
> +                                      bo->resv, &next, false, true);
>                 if (r)
>                         goto error;
>
> @@ -338,12 +356,15 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>                         new_start += cur_pages * PAGE_SIZE;
>                 }
>         }
> +       mutex_unlock(&adev->mman.gtt_window_lock);
>
>         r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
>         dma_fence_put(fence);
>         return r;
>
>  error:
> +       mutex_unlock(&adev->mman.gtt_window_lock);
> +
>         if (fence)
>                 dma_fence_wait(fence, false);
>         dma_fence_put(fence);
> @@ -1253,6 +1274,72 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
>         return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
>  }
>
> +static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
> +                            struct ttm_mem_reg *mem, unsigned num_pages,
> +                            uint64_t offset, unsigned window,
> +                            struct amdgpu_ring *ring,
> +                            uint64_t *addr)
> +{
> +       struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
> +       struct amdgpu_device *adev = ring->adev;
> +       struct ttm_tt *ttm = bo->ttm;
> +       struct amdgpu_job *job;
> +       unsigned num_dw, num_bytes;
> +       dma_addr_t *dma_address;
> +       struct dma_fence *fence;
> +       uint64_t src_addr, dst_addr;
> +       uint64_t flags;
> +       int r;
> +
> +       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
> +              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
> +
> +       *addr = adev->mc.gtt_start;
> +       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
> +               AMDGPU_GPU_PAGE_SIZE;
> +
> +       num_dw = adev->mman.buffer_funcs->copy_num_dw;
> +       while (num_dw & 0x7)
> +               num_dw++;
> +
> +       num_bytes = num_pages * 8;
> +
> +       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
> +       if (r)
> +               return r;
> +
> +       src_addr = num_dw * 4;
> +       src_addr += job->ibs[0].gpu_addr;
> +
> +       dst_addr = adev->gart.table_addr;
> +       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
> +       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
> +                               dst_addr, num_bytes);
> +
> +       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
> +       WARN_ON(job->ibs[0].length_dw > num_dw);
> +
> +       dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
> +       flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
> +       r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
> +                           &job->ibs[0].ptr[num_dw]);
> +       if (r)
> +               goto error_free;
> +
> +       r = amdgpu_job_submit(job, ring, &adev->mman.entity,
> +                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
> +       if (r)
> +               goto error_free;
> +
> +       dma_fence_put(fence);
> +
> +       return r;
> +
> +error_free:
> +       amdgpu_job_free(job);
> +       return r;
> +}
> +
>  int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
>                        uint64_t dst_offset, uint32_t byte_count,
>                        struct reservation_object *resv,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index c8059f0..4f5c1da 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -52,6 +52,8 @@ struct amdgpu_mman {
>         /* buffer handling */
>         const struct amdgpu_buffer_funcs        *buffer_funcs;
>         struct amdgpu_ring                      *buffer_funcs_ring;
> +
> +       struct mutex                            gtt_window_lock;
>         /* Scheduler entity for buffer moves */
>         struct amd_sched_entity                 entity;
>  };
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 04/11] drm/amdgpu: stop mapping BOs to GTT
       [not found]     ` <1499075076-1851-4-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-06 16:18       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:18 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> No need to map BOs to GTT on eviction and intermediate transfers any more.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 19 ++-----------------
>  1 file changed, 2 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 1fc9866..5c7a6c5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -199,7 +199,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
>                 .lpfn = 0,
>                 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
>         };
> -       unsigned i;
>
>         if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
>                 placement->placement = &placements;
> @@ -217,20 +216,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
>                         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
>                 } else {
>                         amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
> -                       for (i = 0; i < abo->placement.num_placement; ++i) {
> -                               if (!(abo->placements[i].flags &
> -                                     TTM_PL_FLAG_TT))
> -                                       continue;
> -
> -                               if (abo->placements[i].lpfn)
> -                                       continue;
> -
> -                               /* set an upper limit to force directly
> -                                * allocating address space for the BO.
> -                                */
> -                               abo->placements[i].lpfn =
> -                                       adev->mc.gtt_size >> PAGE_SHIFT;
> -                       }
>                 }
>                 break;
>         case TTM_PL_TT:
> @@ -391,7 +376,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
>         placement.num_busy_placement = 1;
>         placement.busy_placement = &placements;
>         placements.fpfn = 0;
> -       placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
> +       placements.lpfn = 0;
>         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
>         r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
>                              interruptible, no_wait_gpu);
> @@ -438,7 +423,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
>         placement.num_busy_placement = 1;
>         placement.busy_placement = &placements;
>         placements.fpfn = 0;
> -       placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
> +       placements.lpfn = 0;
>         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
>         r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
>                              interruptible, no_wait_gpu);
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 05/11] drm/amdgpu: remove maximum BO size limitation v2
       [not found]     ` <1499075076-1851-5-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-06 16:18       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:18 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> We can finally remove this now.
>
> v2: remove now unused max_size variable as well.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 12 ------------
>  1 file changed, 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index 96c4493..917ac5e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -49,7 +49,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
>                                 struct drm_gem_object **obj)
>  {
>         struct amdgpu_bo *robj;
> -       unsigned long max_size;
>         int r;
>
>         *obj = NULL;
> @@ -58,17 +57,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
>                 alignment = PAGE_SIZE;
>         }
>
> -       if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
> -               /* Maximum bo size is the unpinned gtt size since we use the gtt to
> -                * handle vram to system pool migrations.
> -                */
> -               max_size = adev->mc.gtt_size - adev->gart_pin_size;
> -               if (size > max_size) {
> -                       DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
> -                                 size >> 20, max_size >> 20);
> -                       return -ENOMEM;
> -               }
> -       }
>  retry:
>         r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
>                              flags, NULL, NULL, &robj);
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 06/11] drm/amdgpu: use TTM values instead of MC values for the info queries
       [not found]     ` <1499075076-1851-6-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-06 16:19       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:19 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> Use the TTM values instead of the hardware config here.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 10 ++++++----
>  1 file changed, 6 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 00ef2fc..7a8da32 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -484,7 +484,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
>                 vram_gtt.vram_size -= adev->vram_pin_size;
>                 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
>                 vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
> -               vram_gtt.gtt_size  = adev->mc.gtt_size;
> +               vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
> +               vram_gtt.gtt_size *= PAGE_SIZE;
>                 vram_gtt.gtt_size -= adev->gart_pin_size;
>                 return copy_to_user(out, &vram_gtt,
>                                     min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
> @@ -509,9 +510,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
>                 mem.cpu_accessible_vram.max_allocation =
>                         mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
>
> -               mem.gtt.total_heap_size = adev->mc.gtt_size;
> -               mem.gtt.usable_heap_size =
> -                       adev->mc.gtt_size - adev->gart_pin_size;
> +               mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
> +               mem.gtt.total_heap_size *= PAGE_SIZE;
> +               mem.gtt.usable_heap_size = mem.gtt.total_heap_size
> +                       - adev->gart_pin_size;
>                 mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
>                 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 07/11] drm/amdgpu: rename GART to SYSVM
       [not found]         ` <61109920-9d05-cb27-67b3-51a1b46b15bc-5C7GfCeVMHo@public.gmane.org>
  2017-07-05  0:57           ` Michel Dänzer
@ 2017-07-06 16:22           ` Alex Deucher
  1 sibling, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:22 UTC (permalink / raw)
  To: Felix Kuehling; +Cc: Christian König, amd-gfx list

On Tue, Jul 4, 2017 at 5:11 PM, Felix Kuehling <felix.kuehling@amd.com> wrote:
> I'm afraid this will lead to more confusion when talking to different
> teams in AMD. At least to me "GART" was always understood to be the
> system-wide address translation table (VMID-0). The remnant from the
> pre-GPUVM days. You're now calling that SYSVM, while all GPU-accessible
> system memory is still called GTT.

I agree.  gart makes more sense to me.  I think we should use gtt for
the ttm pool and gart for the sysvm.

Alex

>
> I think it would make more sense to call general GPU-mapped system
> memory "SYSVM" or "GPUVM", and only call the global VMID-0 page table
> "GART".
>
> But of course that would require changes in TTM and all TTM drivers, so
> it's not a realistic option.
>
> Regards,
>   Felix
>
>
> On 17-07-03 05:44 AM, Christian König wrote:
>> From: Christian König <christian.koenig@amd.com>
>>
>> Just mass rename all names related to the hardware GART/GTT functions to SYSVM.
>>
>> The name of symbols related to the TTM TT domain stay the same.
>>
>> This should improve the distinction between the two.
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> ---
>>  drivers/gpu/drm/amd/amdgpu/Kconfig         |   9 +-
>>  drivers/gpu/drm/amd/amdgpu/Makefile        |   2 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  58 ++--
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  48 ++--
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c   | 423 -----------------------------
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |   2 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c  | 423 +++++++++++++++++++++++++++++
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_test.c   |  84 +++---
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c    |  76 +++---
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h    |   2 +-
>>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     |  30 +-
>>  drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c      |   4 +-
>>  drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c   |  16 +-
>>  drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h   |   4 +-
>>  drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c      |  66 ++---
>>  drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c      |  70 ++---
>>  drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c      |  70 ++---
>>  drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c      |  66 ++---
>>  drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c    |  16 +-
>>  drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h    |   4 +-
>>  drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     |   4 +-
>>  drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c      |   8 +-
>>  drivers/gpu/drm/amd/amdgpu/vce_v4_0.c      |   4 +-
>>  drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c      |   8 +-
>>  24 files changed, 749 insertions(+), 748 deletions(-)
>>  delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
>>  create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
>> index e8af1f5..ebbac01 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/Kconfig
>> +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
>> @@ -31,14 +31,15 @@ config DRM_AMDGPU_USERPTR
>>         This option selects CONFIG_MMU_NOTIFIER if it isn't already
>>         selected to enabled full userptr support.
>>
>> -config DRM_AMDGPU_GART_DEBUGFS
>> -     bool "Allow GART access through debugfs"
>> +config DRM_AMDGPU_SYSVM_DEBUGFS
>> +     bool "Allow SYSVM access through debugfs"
>>       depends on DRM_AMDGPU
>>       depends on DEBUG_FS
>>       default n
>>       help
>> -       Selecting this option creates a debugfs file to inspect the mapped
>> -       pages. Uses more memory for housekeeping, enable only for debugging.
>> +       Selecting this option creates a debugfs file to inspect the SYSVM
>> +       mapped pages. Uses more memory for housekeeping, enable only for
>> +       debugging.
>>
>>  source "drivers/gpu/drm/amd/acp/Kconfig"
>>  source "drivers/gpu/drm/amd/display/Kconfig"
>> diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
>> index 3661110..d80d49f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/Makefile
>> +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
>> @@ -22,7 +22,7 @@ amdgpu-y := amdgpu_drv.o
>>  # add KMS driver
>>  amdgpu-y += amdgpu_device.o amdgpu_kms.o \
>>       amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
>> -     atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
>> +     atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_sysvm.o \
>>       amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
>>       amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
>>       amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> index 4a2b33d..abe191f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
>> @@ -305,7 +305,7 @@ struct amdgpu_vm_pte_funcs {
>>  };
>>
>>  /* provided by the gmc block */
>> -struct amdgpu_gart_funcs {
>> +struct amdgpu_sysvm_funcs {
>>       /* flush the vm tlb via mmio */
>>       void (*flush_gpu_tlb)(struct amdgpu_device *adev,
>>                             uint32_t vmid);
>> @@ -543,39 +543,39 @@ struct amdgpu_mc;
>>  #define AMDGPU_GPU_PAGE_SHIFT 12
>>  #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
>>
>> -struct amdgpu_gart {
>> +struct amdgpu_sysvm {
>>       dma_addr_t                      table_addr;
>>       struct amdgpu_bo                *robj;
>>       void                            *ptr;
>>       unsigned                        num_gpu_pages;
>>       unsigned                        num_cpu_pages;
>>       unsigned                        table_size;
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>>       struct page                     **pages;
>>  #endif
>>       bool                            ready;
>>
>>       /* Asic default pte flags */
>> -     uint64_t                        gart_pte_flags;
>> +     uint64_t                        sysvm_pte_flags;
>>
>> -     const struct amdgpu_gart_funcs *gart_funcs;
>> +     const struct amdgpu_sysvm_funcs *sysvm_funcs;
>>  };
>>
>> -void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
>> -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
>> -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
>> -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
>> -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
>> -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
>> -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
>> -int amdgpu_gart_init(struct amdgpu_device *adev);
>> -void amdgpu_gart_fini(struct amdgpu_device *adev);
>> -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
>> +void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
>> +void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
>> +void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
>> +void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_init(struct amdgpu_device *adev);
>> +void amdgpu_sysvm_fini(struct amdgpu_device *adev);
>> +int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
>>                       int pages);
>> -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
>> +int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
>>                   int pages, dma_addr_t *dma_addr, uint64_t flags,
>>                   void *dst);
>> -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
>> +int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
>>                    int pages, struct page **pagelist,
>>                    dma_addr_t *dma_addr, uint64_t flags);
>>  int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
>> @@ -604,15 +604,15 @@ struct amdgpu_mc {
>>        * about vram size near mc fb location */
>>       u64                     mc_vram_size;
>>       u64                     visible_vram_size;
>> -     u64                     gtt_size;
>> -     u64                     gtt_start;
>> -     u64                     gtt_end;
>> +     u64                     sysvm_size;
>> +     u64                     sysvm_start;
>> +     u64                     sysvm_end;
>>       u64                     vram_start;
>>       u64                     vram_end;
>>       unsigned                vram_width;
>>       u64                     real_vram_size;
>>       int                     vram_mtrr;
>> -     u64                     gtt_base_align;
>> +     u64                     sysvm_base_align;
>>       u64                     mc_mask;
>>       const struct firmware   *fw;    /* MC firmware */
>>       uint32_t                fw_version;
>> @@ -1575,7 +1575,7 @@ struct amdgpu_device {
>>
>>       /* MC */
>>       struct amdgpu_mc                mc;
>> -     struct amdgpu_gart              gart;
>> +     struct amdgpu_sysvm             sysvm;
>>       struct amdgpu_dummy_page        dummy_page;
>>       struct amdgpu_vm_manager        vm_manager;
>>       struct amdgpu_vmhub             vmhub[AMDGPU_MAX_VMHUBS];
>> @@ -1686,8 +1686,8 @@ struct amdgpu_device {
>>       struct list_head                shadow_list;
>>       struct mutex                    shadow_list_lock;
>>       /* link all gtt */
>> -     spinlock_t                      gtt_list_lock;
>> -     struct list_head                gtt_list;
>> +     spinlock_t                      sysvm_list_lock;
>> +     struct list_head                sysvm_list;
>>       /* keep an lru list of rings by HW IP */
>>       struct list_head                ring_lru_list;
>>       spinlock_t                      ring_lru_list_lock;
>> @@ -1835,13 +1835,13 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
>>  #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
>>  #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
>>  #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
>> -#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
>> -#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
>> -#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
>> +#define amdgpu_sysvm_flush_gpu_tlb(adev, vmid) (adev)->sysvm.sysvm_funcs->flush_gpu_tlb((adev), (vmid))
>> +#define amdgpu_sysvm_set_pte_pde(adev, pt, idx, addr, flags) (adev)->sysvm.sysvm_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
>> +#define amdgpu_sysvm_get_vm_pde(adev, addr) (adev)->sysvm.sysvm_funcs->get_vm_pde((adev), (addr))
>>  #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
>>  #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
>>  #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
>> -#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
>> +#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->sysvm.sysvm_funcs->get_vm_pte_flags((adev),(flags))
>>  #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
>>  #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
>>  #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
>> @@ -1908,7 +1908,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
>>  uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
>>                                struct ttm_mem_reg *mem);
>>  void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
>> -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>>  void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
>>  int amdgpu_ttm_init(struct amdgpu_device *adev);
>>  void amdgpu_ttm_fini(struct amdgpu_device *adev);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 5b1220f..46a82d3 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -667,39 +667,39 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
>>  }
>>
>>  /**
>> - * amdgpu_gtt_location - try to find GTT location
>> + * amdgpu_sysvm_location - try to find SYSVM location
>>   * @adev: amdgpu device structure holding all necessary informations
>>   * @mc: memory controller structure holding memory informations
>>   *
>> - * Function will place try to place GTT before or after VRAM.
>> + * Function will place try to place SYSVM before or after VRAM.
>>   *
>> - * If GTT size is bigger than space left then we ajust GTT size.
>> + * If SYSVM size is bigger than space left then we ajust SYSVM size.
>>   * Thus function will never fails.
>>   *
>> - * FIXME: when reducing GTT size align new size on power of 2.
>> + * FIXME: when reducing SYSVM size align new size on power of 2.
>>   */
>> -void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
>> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
>>  {
>>       u64 size_af, size_bf;
>>
>> -     size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
>> -     size_bf = mc->vram_start & ~mc->gtt_base_align;
>> +     size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) & ~mc->sysvm_base_align;
>> +     size_bf = mc->vram_start & ~mc->sysvm_base_align;
>>       if (size_bf > size_af) {
>> -             if (mc->gtt_size > size_bf) {
>> -                     dev_warn(adev->dev, "limiting GTT\n");
>> -                     mc->gtt_size = size_bf;
>> +             if (mc->sysvm_size > size_bf) {
>> +                     dev_warn(adev->dev, "limiting SYSVM\n");
>> +                     mc->sysvm_size = size_bf;
>>               }
>> -             mc->gtt_start = 0;
>> +             mc->sysvm_start = 0;
>>       } else {
>> -             if (mc->gtt_size > size_af) {
>> -                     dev_warn(adev->dev, "limiting GTT\n");
>> -                     mc->gtt_size = size_af;
>> +             if (mc->sysvm_size > size_af) {
>> +                     dev_warn(adev->dev, "limiting SYSVM\n");
>> +                     mc->sysvm_size = size_af;
>>               }
>> -             mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
>> +             mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) & ~mc->sysvm_base_align;
>>       }
>> -     mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
>> -     dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
>> -                     mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
>> +     mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
>> +     dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
>> +                     mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
>>  }
>>
>>  /*
>> @@ -1663,12 +1663,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
>>
>>  static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
>>  {
>> -     memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
>> +     memcpy(adev->reset_magic, adev->sysvm.ptr, AMDGPU_RESET_MAGIC_NUM);
>>  }
>>
>>  static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
>>  {
>> -     return !!memcmp(adev->gart.ptr, adev->reset_magic,
>> +     return !!memcmp(adev->sysvm.ptr, adev->reset_magic,
>>                       AMDGPU_RESET_MAGIC_NUM);
>>  }
>>
>> @@ -2052,14 +2052,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>       adev->flags = flags;
>>       adev->asic_type = flags & AMD_ASIC_MASK;
>>       adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
>> -     adev->mc.gtt_size = 512 * 1024 * 1024;
>> +     adev->mc.sysvm_size = 512 * 1024 * 1024;
>>       adev->accel_working = false;
>>       adev->num_rings = 0;
>>       adev->mman.buffer_funcs = NULL;
>>       adev->mman.buffer_funcs_ring = NULL;
>>       adev->vm_manager.vm_pte_funcs = NULL;
>>       adev->vm_manager.vm_pte_num_rings = 0;
>> -     adev->gart.gart_funcs = NULL;
>> +     adev->sysvm.sysvm_funcs = NULL;
>>       adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
>>
>>       adev->smc_rreg = &amdgpu_invalid_rreg;
>> @@ -2110,8 +2110,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>       INIT_LIST_HEAD(&adev->shadow_list);
>>       mutex_init(&adev->shadow_list_lock);
>>
>> -     INIT_LIST_HEAD(&adev->gtt_list);
>> -     spin_lock_init(&adev->gtt_list_lock);
>> +     INIT_LIST_HEAD(&adev->sysvm_list);
>> +     spin_lock_init(&adev->sysvm_list_lock);
>>
>>       INIT_LIST_HEAD(&adev->ring_lru_list);
>>       spin_lock_init(&adev->ring_lru_list_lock);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
>> deleted file mode 100644
>> index c808388..0000000
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
>> +++ /dev/null
>> @@ -1,423 +0,0 @@
>> -/*
>> - * Copyright 2008 Advanced Micro Devices, Inc.
>> - * Copyright 2008 Red Hat Inc.
>> - * Copyright 2009 Jerome Glisse.
>> - *
>> - * Permission is hereby granted, free of charge, to any person obtaining a
>> - * copy of this software and associated documentation files (the "Software"),
>> - * to deal in the Software without restriction, including without limitation
>> - * the rights to use, copy, modify, merge, publish, distribute, sublicense,
>> - * and/or sell copies of the Software, and to permit persons to whom the
>> - * Software is furnished to do so, subject to the following conditions:
>> - *
>> - * The above copyright notice and this permission notice shall be included in
>> - * all copies or substantial portions of the Software.
>> - *
>> - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>> - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
>> - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
>> - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
>> - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
>> - * OTHER DEALINGS IN THE SOFTWARE.
>> - *
>> - * Authors: Dave Airlie
>> - *          Alex Deucher
>> - *          Jerome Glisse
>> - */
>> -#include <drm/drmP.h>
>> -#include <drm/amdgpu_drm.h>
>> -#include "amdgpu.h"
>> -
>> -/*
>> - * GART
>> - * The GART (Graphics Aperture Remapping Table) is an aperture
>> - * in the GPU's address space.  System pages can be mapped into
>> - * the aperture and look like contiguous pages from the GPU's
>> - * perspective.  A page table maps the pages in the aperture
>> - * to the actual backing pages in system memory.
>> - *
>> - * Radeon GPUs support both an internal GART, as described above,
>> - * and AGP.  AGP works similarly, but the GART table is configured
>> - * and maintained by the northbridge rather than the driver.
>> - * Radeon hw has a separate AGP aperture that is programmed to
>> - * point to the AGP aperture provided by the northbridge and the
>> - * requests are passed through to the northbridge aperture.
>> - * Both AGP and internal GART can be used at the same time, however
>> - * that is not currently supported by the driver.
>> - *
>> - * This file handles the common internal GART management.
>> - */
>> -
>> -/*
>> - * Common GART table functions.
>> - */
>> -
>> -/**
>> - * amdgpu_gart_set_defaults - set the default gtt_size
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Set the default gtt_size based on parameters and available VRAM.
>> - */
>> -void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
>> -{
>> -     /* unless the user had overridden it, set the gart
>> -      * size equal to the 1024 or vram, whichever is larger.
>> -      */
>> -     if (amdgpu_gart_size == -1)
>> -             adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
>> -                                     adev->mc.mc_vram_size);
>> -     else
>> -             adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Allocate system memory for GART page table
>> - * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
>> - * gart table to be in system memory.
>> - * Returns 0 for success, -ENOMEM for failure.
>> - */
>> -int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
>> -{
>> -     void *ptr;
>> -
>> -     ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
>> -                                &adev->gart.table_addr);
>> -     if (ptr == NULL) {
>> -             return -ENOMEM;
>> -     }
>> -#ifdef CONFIG_X86
>> -     if (0) {
>> -             set_memory_uc((unsigned long)ptr,
>> -                           adev->gart.table_size >> PAGE_SHIFT);
>> -     }
>> -#endif
>> -     adev->gart.ptr = ptr;
>> -     memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
>> -     return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_ram_free - free system ram for gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Free system memory for GART page table
>> - * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
>> - * gart table to be in system memory.
>> - */
>> -void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
>> -{
>> -     if (adev->gart.ptr == NULL) {
>> -             return;
>> -     }
>> -#ifdef CONFIG_X86
>> -     if (0) {
>> -             set_memory_wb((unsigned long)adev->gart.ptr,
>> -                           adev->gart.table_size >> PAGE_SHIFT);
>> -     }
>> -#endif
>> -     pci_free_consistent(adev->pdev, adev->gart.table_size,
>> -                         (void *)adev->gart.ptr,
>> -                         adev->gart.table_addr);
>> -     adev->gart.ptr = NULL;
>> -     adev->gart.table_addr = 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Allocate video memory for GART page table
>> - * (pcie r4xx, r5xx+).  These asics require the
>> - * gart table to be in video memory.
>> - * Returns 0 for success, error for failure.
>> - */
>> -int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
>> -{
>> -     int r;
>> -
>> -     if (adev->gart.robj == NULL) {
>> -             r = amdgpu_bo_create(adev, adev->gart.table_size,
>> -                                  PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
>> -                                  AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
>> -                                  AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
>> -                                  NULL, NULL, &adev->gart.robj);
>> -             if (r) {
>> -                     return r;
>> -             }
>> -     }
>> -     return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_vram_pin - pin gart page table in vram
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Pin the GART page table in vram so it will not be moved
>> - * by the memory manager (pcie r4xx, r5xx+).  These asics require the
>> - * gart table to be in video memory.
>> - * Returns 0 for success, error for failure.
>> - */
>> -int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
>> -{
>> -     uint64_t gpu_addr;
>> -     int r;
>> -
>> -     r = amdgpu_bo_reserve(adev->gart.robj, false);
>> -     if (unlikely(r != 0))
>> -             return r;
>> -     r = amdgpu_bo_pin(adev->gart.robj,
>> -                             AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
>> -     if (r) {
>> -             amdgpu_bo_unreserve(adev->gart.robj);
>> -             return r;
>> -     }
>> -     r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
>> -     if (r)
>> -             amdgpu_bo_unpin(adev->gart.robj);
>> -     amdgpu_bo_unreserve(adev->gart.robj);
>> -     adev->gart.table_addr = gpu_addr;
>> -     return r;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Unpin the GART page table in vram (pcie r4xx, r5xx+).
>> - * These asics require the gart table to be in video memory.
>> - */
>> -void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
>> -{
>> -     int r;
>> -
>> -     if (adev->gart.robj == NULL) {
>> -             return;
>> -     }
>> -     r = amdgpu_bo_reserve(adev->gart.robj, true);
>> -     if (likely(r == 0)) {
>> -             amdgpu_bo_kunmap(adev->gart.robj);
>> -             amdgpu_bo_unpin(adev->gart.robj);
>> -             amdgpu_bo_unreserve(adev->gart.robj);
>> -             adev->gart.ptr = NULL;
>> -     }
>> -}
>> -
>> -/**
>> - * amdgpu_gart_table_vram_free - free gart page table vram
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Free the video memory used for the GART page table
>> - * (pcie r4xx, r5xx+).  These asics require the gart table to
>> - * be in video memory.
>> - */
>> -void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
>> -{
>> -     if (adev->gart.robj == NULL) {
>> -             return;
>> -     }
>> -     amdgpu_bo_unref(&adev->gart.robj);
>> -}
>> -
>> -/*
>> - * Common gart functions.
>> - */
>> -/**
>> - * amdgpu_gart_unbind - unbind pages from the gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - * @offset: offset into the GPU's gart aperture
>> - * @pages: number of pages to unbind
>> - *
>> - * Unbinds the requested pages from the gart page table and
>> - * replaces them with the dummy page (all asics).
>> - * Returns 0 for success, -EINVAL for failure.
>> - */
>> -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
>> -                     int pages)
>> -{
>> -     unsigned t;
>> -     unsigned p;
>> -     int i, j;
>> -     u64 page_base;
>> -     /* Starting from VEGA10, system bit must be 0 to mean invalid. */
>> -     uint64_t flags = 0;
>> -
>> -     if (!adev->gart.ready) {
>> -             WARN(1, "trying to unbind memory from uninitialized GART !\n");
>> -             return -EINVAL;
>> -     }
>> -
>> -     t = offset / AMDGPU_GPU_PAGE_SIZE;
>> -     p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
>> -     for (i = 0; i < pages; i++, p++) {
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -             adev->gart.pages[p] = NULL;
>> -#endif
>> -             page_base = adev->dummy_page.addr;
>> -             if (!adev->gart.ptr)
>> -                     continue;
>> -
>> -             for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
>> -                     amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
>> -                                             t, page_base, flags);
>> -                     page_base += AMDGPU_GPU_PAGE_SIZE;
>> -             }
>> -     }
>> -     mb();
>> -     amdgpu_gart_flush_gpu_tlb(adev, 0);
>> -     return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_map - map dma_addresses into GART entries
>> - *
>> - * @adev: amdgpu_device pointer
>> - * @offset: offset into the GPU's gart aperture
>> - * @pages: number of pages to bind
>> - * @dma_addr: DMA addresses of pages
>> - *
>> - * Map the dma_addresses into GART entries (all asics).
>> - * Returns 0 for success, -EINVAL for failure.
>> - */
>> -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
>> -                 int pages, dma_addr_t *dma_addr, uint64_t flags,
>> -                 void *dst)
>> -{
>> -     uint64_t page_base;
>> -     unsigned i, j, t;
>> -
>> -     if (!adev->gart.ready) {
>> -             WARN(1, "trying to bind memory to uninitialized GART !\n");
>> -             return -EINVAL;
>> -     }
>> -
>> -     t = offset / AMDGPU_GPU_PAGE_SIZE;
>> -
>> -     for (i = 0; i < pages; i++) {
>> -             page_base = dma_addr[i];
>> -             for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
>> -                     amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
>> -                     page_base += AMDGPU_GPU_PAGE_SIZE;
>> -             }
>> -     }
>> -     return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_bind - bind pages into the gart page table
>> - *
>> - * @adev: amdgpu_device pointer
>> - * @offset: offset into the GPU's gart aperture
>> - * @pages: number of pages to bind
>> - * @pagelist: pages to bind
>> - * @dma_addr: DMA addresses of pages
>> - *
>> - * Binds the requested pages to the gart page table
>> - * (all asics).
>> - * Returns 0 for success, -EINVAL for failure.
>> - */
>> -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
>> -                  int pages, struct page **pagelist, dma_addr_t *dma_addr,
>> -                  uint64_t flags)
>> -{
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -     unsigned i,t,p;
>> -#endif
>> -     int r;
>> -
>> -     if (!adev->gart.ready) {
>> -             WARN(1, "trying to bind memory to uninitialized GART !\n");
>> -             return -EINVAL;
>> -     }
>> -
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -     t = offset / AMDGPU_GPU_PAGE_SIZE;
>> -     p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
>> -     for (i = 0; i < pages; i++, p++)
>> -             adev->gart.pages[p] = pagelist[i];
>> -#endif
>> -
>> -     if (adev->gart.ptr) {
>> -             r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
>> -                         adev->gart.ptr);
>> -             if (r)
>> -                     return r;
>> -     }
>> -
>> -     mb();
>> -     amdgpu_gart_flush_gpu_tlb(adev, 0);
>> -     return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_init - init the driver info for managing the gart
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Allocate the dummy page and init the gart driver info (all asics).
>> - * Returns 0 for success, error for failure.
>> - */
>> -int amdgpu_gart_init(struct amdgpu_device *adev)
>> -{
>> -     int r;
>> -
>> -     if (adev->dummy_page.page)
>> -             return 0;
>> -
>> -     /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
>> -     if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
>> -             DRM_ERROR("Page size is smaller than GPU page size!\n");
>> -             return -EINVAL;
>> -     }
>> -     r = amdgpu_dummy_page_init(adev);
>> -     if (r)
>> -             return r;
>> -     /* Compute table size */
>> -     adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
>> -     adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
>> -     DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
>> -              adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
>> -
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -     /* Allocate pages table */
>> -     adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
>> -     if (adev->gart.pages == NULL) {
>> -             amdgpu_gart_fini(adev);
>> -             return -ENOMEM;
>> -     }
>> -#endif
>> -
>> -     return 0;
>> -}
>> -
>> -/**
>> - * amdgpu_gart_fini - tear down the driver info for managing the gart
>> - *
>> - * @adev: amdgpu_device pointer
>> - *
>> - * Tear down the gart driver info and free the dummy page (all asics).
>> - */
>> -void amdgpu_gart_fini(struct amdgpu_device *adev)
>> -{
>> -     if (adev->gart.ready) {
>> -             /* unbind pages */
>> -             amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
>> -     }
>> -     adev->gart.ready = false;
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -     vfree(adev->gart.pages);
>> -     adev->gart.pages = NULL;
>> -#endif
>> -     amdgpu_dummy_page_fini(adev);
>> -}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> index 4510627..73a1c64 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> @@ -82,7 +82,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
>>       if (r)
>>               kfree(*job);
>>       else
>> -             (*job)->vm_pd_addr = adev->gart.table_addr;
>> +             (*job)->vm_pd_addr = adev->sysvm.table_addr;
>>
>>       return r;
>>  }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
>> new file mode 100644
>> index 0000000..50fc8d7
>> --- /dev/null
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
>> @@ -0,0 +1,423 @@
>> +/*
>> + * Copyright 2008 Advanced Micro Devices, Inc.
>> + * Copyright 2008 Red Hat Inc.
>> + * Copyright 2009 Jerome Glisse.
>> + *
>> + * Permission is hereby granted, free of charge, to any person obtaining a
>> + * copy of this software and associated documentation files (the "Software"),
>> + * to deal in the Software without restriction, including without limitation
>> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
>> + * and/or sell copies of the Software, and to permit persons to whom the
>> + * Software is furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice shall be included in
>> + * all copies or substantial portions of the Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
>> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
>> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
>> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
>> + * OTHER DEALINGS IN THE SOFTWARE.
>> + *
>> + * Authors: Dave Airlie
>> + *          Alex Deucher
>> + *          Jerome Glisse
>> + */
>> +#include <drm/drmP.h>
>> +#include <drm/amdgpu_drm.h>
>> +#include "amdgpu.h"
>> +
>> +/*
>> + * SYSVM
>> + * The system VM (previously called GART) is an aperture
>> + * in the GPU's address space.  System pages can be mapped into
>> + * the aperture and look like contiguous pages from the GPU's
>> + * perspective.  A page table maps the pages in the aperture
>> + * to the actual backing pages in system memory.
>> + *
>> + * Radeon GPUs support both an internal SYSVM based GART, as described above,
>> + * and AGP.  AGP works similarly, but the GART table is configured
>> + * and maintained by the northbridge rather than the driver.
>> + * Radeon hw has a separate AGP aperture that is programmed to
>> + * point to the AGP aperture provided by the northbridge and the
>> + * requests are passed through to the northbridge aperture.
>> + * Both AGP and internal GART can be used at the same time, however
>> + * that is not currently supported by the driver.
>> + *
>> + * This file handles the common internal SYSVM management.
>> + */
>> +
>> +/*
>> + * Common SYSVM table functions.
>> + */
>> +
>> +/**
>> + * amdgpu_sysvm_set_defaults - set the default sysvm_size
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Set the default sysvm_size based on parameters and available VRAM.
>> + */
>> +void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
>> +{
>> +     /* unless the user had overridden it, set the gart
>> +      * size equal to the 1024 or vram, whichever is larger.
>> +      */
>> +     if (amdgpu_gart_size == -1)
>> +             adev->mc.sysvm_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
>> +                                     adev->mc.mc_vram_size);
>> +     else
>> +             adev->mc.sysvm_size = (uint64_t)amdgpu_gart_size << 20;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_ram_alloc - allocate system ram for gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Allocate system memory for SYSVM page table
>> + * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
>> + * gart table to be in system memory.
>> + * Returns 0 for success, -ENOMEM for failure.
>> + */
>> +int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev)
>> +{
>> +     void *ptr;
>> +
>> +     ptr = pci_alloc_consistent(adev->pdev, adev->sysvm.table_size,
>> +                                &adev->sysvm.table_addr);
>> +     if (ptr == NULL) {
>> +             return -ENOMEM;
>> +     }
>> +#ifdef CONFIG_X86
>> +     if (0) {
>> +             set_memory_uc((unsigned long)ptr,
>> +                           adev->sysvm.table_size >> PAGE_SHIFT);
>> +     }
>> +#endif
>> +     adev->sysvm.ptr = ptr;
>> +     memset((void *)adev->sysvm.ptr, 0, adev->sysvm.table_size);
>> +     return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_ram_free - free system ram for gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Free system memory for SYSVM page table
>> + * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
>> + * gart table to be in system memory.
>> + */
>> +void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev)
>> +{
>> +     if (adev->sysvm.ptr == NULL) {
>> +             return;
>> +     }
>> +#ifdef CONFIG_X86
>> +     if (0) {
>> +             set_memory_wb((unsigned long)adev->sysvm.ptr,
>> +                           adev->sysvm.table_size >> PAGE_SHIFT);
>> +     }
>> +#endif
>> +     pci_free_consistent(adev->pdev, adev->sysvm.table_size,
>> +                         (void *)adev->sysvm.ptr,
>> +                         adev->sysvm.table_addr);
>> +     adev->sysvm.ptr = NULL;
>> +     adev->sysvm.table_addr = 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_vram_alloc - allocate vram for gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Allocate video memory for SYSVM page table
>> + * (pcie r4xx, r5xx+).  These asics require the
>> + * gart table to be in video memory.
>> + * Returns 0 for success, error for failure.
>> + */
>> +int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev)
>> +{
>> +     int r;
>> +
>> +     if (adev->sysvm.robj == NULL) {
>> +             r = amdgpu_bo_create(adev, adev->sysvm.table_size,
>> +                                  PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
>> +                                  AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
>> +                                  AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
>> +                                  NULL, NULL, &adev->sysvm.robj);
>> +             if (r) {
>> +                     return r;
>> +             }
>> +     }
>> +     return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_vram_pin - pin gart page table in vram
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Pin the SYSVM page table in vram so it will not be moved
>> + * by the memory manager (pcie r4xx, r5xx+).  These asics require the
>> + * gart table to be in video memory.
>> + * Returns 0 for success, error for failure.
>> + */
>> +int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev)
>> +{
>> +     uint64_t gpu_addr;
>> +     int r;
>> +
>> +     r = amdgpu_bo_reserve(adev->sysvm.robj, false);
>> +     if (unlikely(r != 0))
>> +             return r;
>> +     r = amdgpu_bo_pin(adev->sysvm.robj,
>> +                             AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
>> +     if (r) {
>> +             amdgpu_bo_unreserve(adev->sysvm.robj);
>> +             return r;
>> +     }
>> +     r = amdgpu_bo_kmap(adev->sysvm.robj, &adev->sysvm.ptr);
>> +     if (r)
>> +             amdgpu_bo_unpin(adev->sysvm.robj);
>> +     amdgpu_bo_unreserve(adev->sysvm.robj);
>> +     adev->sysvm.table_addr = gpu_addr;
>> +     return r;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_vram_unpin - unpin gart page table in vram
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Unpin the GART page table in vram (pcie r4xx, r5xx+).
>> + * These asics require the gart table to be in video memory.
>> + */
>> +void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev)
>> +{
>> +     int r;
>> +
>> +     if (adev->sysvm.robj == NULL) {
>> +             return;
>> +     }
>> +     r = amdgpu_bo_reserve(adev->sysvm.robj, true);
>> +     if (likely(r == 0)) {
>> +             amdgpu_bo_kunmap(adev->sysvm.robj);
>> +             amdgpu_bo_unpin(adev->sysvm.robj);
>> +             amdgpu_bo_unreserve(adev->sysvm.robj);
>> +             adev->sysvm.ptr = NULL;
>> +     }
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_table_vram_free - free gart page table vram
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Free the video memory used for the GART page table
>> + * (pcie r4xx, r5xx+).  These asics require the gart table to
>> + * be in video memory.
>> + */
>> +void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev)
>> +{
>> +     if (adev->sysvm.robj == NULL) {
>> +             return;
>> +     }
>> +     amdgpu_bo_unref(&adev->sysvm.robj);
>> +}
>> +
>> +/*
>> + * Common gart functions.
>> + */
>> +/**
>> + * amdgpu_sysvm_unbind - unbind pages from the gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + * @offset: offset into the GPU's gart aperture
>> + * @pages: number of pages to unbind
>> + *
>> + * Unbinds the requested pages from the gart page table and
>> + * replaces them with the dummy page (all asics).
>> + * Returns 0 for success, -EINVAL for failure.
>> + */
>> +int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
>> +                     int pages)
>> +{
>> +     unsigned t;
>> +     unsigned p;
>> +     int i, j;
>> +     u64 page_base;
>> +     /* Starting from VEGA10, system bit must be 0 to mean invalid. */
>> +     uint64_t flags = 0;
>> +
>> +     if (!adev->sysvm.ready) {
>> +             WARN(1, "trying to unbind memory from uninitialized GART !\n");
>> +             return -EINVAL;
>> +     }
>> +
>> +     t = offset / AMDGPU_GPU_PAGE_SIZE;
>> +     p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
>> +     for (i = 0; i < pages; i++, p++) {
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +             adev->sysvm.pages[p] = NULL;
>> +#endif
>> +             page_base = adev->dummy_page.addr;
>> +             if (!adev->sysvm.ptr)
>> +                     continue;
>> +
>> +             for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
>> +                     amdgpu_sysvm_set_pte_pde(adev, adev->sysvm.ptr,
>> +                                             t, page_base, flags);
>> +                     page_base += AMDGPU_GPU_PAGE_SIZE;
>> +             }
>> +     }
>> +     mb();
>> +     amdgpu_sysvm_flush_gpu_tlb(adev, 0);
>> +     return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_map - map dma_addresses into GART entries
>> + *
>> + * @adev: amdgpu_device pointer
>> + * @offset: offset into the GPU's gart aperture
>> + * @pages: number of pages to bind
>> + * @dma_addr: DMA addresses of pages
>> + *
>> + * Map the dma_addresses into GART entries (all asics).
>> + * Returns 0 for success, -EINVAL for failure.
>> + */
>> +int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
>> +                 int pages, dma_addr_t *dma_addr, uint64_t flags,
>> +                 void *dst)
>> +{
>> +     uint64_t page_base;
>> +     unsigned i, j, t;
>> +
>> +     if (!adev->sysvm.ready) {
>> +             WARN(1, "trying to bind memory to uninitialized GART !\n");
>> +             return -EINVAL;
>> +     }
>> +
>> +     t = offset / AMDGPU_GPU_PAGE_SIZE;
>> +
>> +     for (i = 0; i < pages; i++) {
>> +             page_base = dma_addr[i];
>> +             for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
>> +                     amdgpu_sysvm_set_pte_pde(adev, dst, t, page_base, flags);
>> +                     page_base += AMDGPU_GPU_PAGE_SIZE;
>> +             }
>> +     }
>> +     return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_bind - bind pages into the gart page table
>> + *
>> + * @adev: amdgpu_device pointer
>> + * @offset: offset into the GPU's gart aperture
>> + * @pages: number of pages to bind
>> + * @pagelist: pages to bind
>> + * @dma_addr: DMA addresses of pages
>> + *
>> + * Binds the requested pages to the gart page table
>> + * (all asics).
>> + * Returns 0 for success, -EINVAL for failure.
>> + */
>> +int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
>> +                  int pages, struct page **pagelist, dma_addr_t *dma_addr,
>> +                  uint64_t flags)
>> +{
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +     unsigned i,t,p;
>> +#endif
>> +     int r;
>> +
>> +     if (!adev->sysvm.ready) {
>> +             WARN(1, "trying to bind memory to uninitialized GART !\n");
>> +             return -EINVAL;
>> +     }
>> +
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +     t = offset / AMDGPU_GPU_PAGE_SIZE;
>> +     p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
>> +     for (i = 0; i < pages; i++, p++)
>> +             adev->sysvm.pages[p] = pagelist[i];
>> +#endif
>> +
>> +     if (adev->sysvm.ptr) {
>> +             r = amdgpu_sysvm_map(adev, offset, pages, dma_addr, flags,
>> +                         adev->sysvm.ptr);
>> +             if (r)
>> +                     return r;
>> +     }
>> +
>> +     mb();
>> +     amdgpu_sysvm_flush_gpu_tlb(adev, 0);
>> +     return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_init - init the driver info for managing the gart
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Allocate the dummy page and init the gart driver info (all asics).
>> + * Returns 0 for success, error for failure.
>> + */
>> +int amdgpu_sysvm_init(struct amdgpu_device *adev)
>> +{
>> +     int r;
>> +
>> +     if (adev->dummy_page.page)
>> +             return 0;
>> +
>> +     /* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
>> +     if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
>> +             DRM_ERROR("Page size is smaller than GPU page size!\n");
>> +             return -EINVAL;
>> +     }
>> +     r = amdgpu_dummy_page_init(adev);
>> +     if (r)
>> +             return r;
>> +     /* Compute table size */
>> +     adev->sysvm.num_cpu_pages = adev->mc.sysvm_size / PAGE_SIZE;
>> +     adev->sysvm.num_gpu_pages = adev->mc.sysvm_size / AMDGPU_GPU_PAGE_SIZE;
>> +     DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
>> +              adev->sysvm.num_cpu_pages, adev->sysvm.num_gpu_pages);
>> +
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +     /* Allocate pages table */
>> +     adev->sysvm.pages = vzalloc(sizeof(void *) * adev->sysvm.num_cpu_pages);
>> +     if (adev->sysvm.pages == NULL) {
>> +             amdgpu_sysvm_fini(adev);
>> +             return -ENOMEM;
>> +     }
>> +#endif
>> +
>> +     return 0;
>> +}
>> +
>> +/**
>> + * amdgpu_sysvm_fini - tear down the driver info for managing the gart
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Tear down the gart driver info and free the dummy page (all asics).
>> + */
>> +void amdgpu_sysvm_fini(struct amdgpu_device *adev)
>> +{
>> +     if (adev->sysvm.ready) {
>> +             /* unbind pages */
>> +             amdgpu_sysvm_unbind(adev, 0, adev->sysvm.num_cpu_pages);
>> +     }
>> +     adev->sysvm.ready = false;
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +     vfree(adev->sysvm.pages);
>> +     adev->sysvm.pages = NULL;
>> +#endif
>> +     amdgpu_dummy_page_fini(adev);
>> +}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
>> index d02e611..651712e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
>> @@ -32,8 +32,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>  {
>>       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
>>       struct amdgpu_bo *vram_obj = NULL;
>> -     struct amdgpu_bo **gtt_obj = NULL;
>> -     uint64_t gtt_addr, vram_addr;
>> +     struct amdgpu_bo **sysvm_obj = NULL;
>> +     uint64_t sysvm_addr, vram_addr;
>>       unsigned n, size;
>>       int i, r;
>>
>> @@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>       /* Number of tests =
>>        * (Total GTT - IB pool - writeback page - ring buffers) / test size
>>        */
>> -     n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
>> +     n = adev->mc.sysvm_size - AMDGPU_IB_POOL_SIZE*64*1024;
>>       for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
>>               if (adev->rings[i])
>>                       n -= adev->rings[i]->ring_size;
>> @@ -52,8 +52,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>               n -= adev->irq.ih.ring_size;
>>       n /= size;
>>
>> -     gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
>> -     if (!gtt_obj) {
>> +     sysvm_obj = kzalloc(n * sizeof(*sysvm_obj), GFP_KERNEL);
>> +     if (!sysvm_obj) {
>>               DRM_ERROR("Failed to allocate %d pointers\n", n);
>>               r = 1;
>>               goto out_cleanup;
>> @@ -75,42 +75,42 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>               goto out_unres;
>>       }
>>       for (i = 0; i < n; i++) {
>> -             void *gtt_map, *vram_map;
>> -             void **gtt_start, **gtt_end;
>> +             void *sysvm_map, *vram_map;
>> +             void **sysvm_start, **sysvm_end;
>>               void **vram_start, **vram_end;
>>               struct dma_fence *fence = NULL;
>>
>>               r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
>>                                    AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
>> -                                  NULL, gtt_obj + i);
>> +                                  NULL, sysvm_obj + i);
>>               if (r) {
>>                       DRM_ERROR("Failed to create GTT object %d\n", i);
>>                       goto out_lclean;
>>               }
>>
>> -             r = amdgpu_bo_reserve(gtt_obj[i], false);
>> +             r = amdgpu_bo_reserve(sysvm_obj[i], false);
>>               if (unlikely(r != 0))
>>                       goto out_lclean_unref;
>> -             r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
>> +             r = amdgpu_bo_pin(sysvm_obj[i], AMDGPU_GEM_DOMAIN_GTT, &sysvm_addr);
>>               if (r) {
>>                       DRM_ERROR("Failed to pin GTT object %d\n", i);
>>                       goto out_lclean_unres;
>>               }
>>
>> -             r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
>> +             r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
>>               if (r) {
>>                       DRM_ERROR("Failed to map GTT object %d\n", i);
>>                       goto out_lclean_unpin;
>>               }
>>
>> -             for (gtt_start = gtt_map, gtt_end = gtt_map + size;
>> -                  gtt_start < gtt_end;
>> -                  gtt_start++)
>> -                     *gtt_start = gtt_start;
>> +             for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size;
>> +                  sysvm_start < sysvm_end;
>> +                  sysvm_start++)
>> +                     *sysvm_start = sysvm_start;
>>
>> -             amdgpu_bo_kunmap(gtt_obj[i]);
>> +             amdgpu_bo_kunmap(sysvm_obj[i]);
>>
>> -             r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
>> +             r = amdgpu_copy_buffer(ring, sysvm_addr, vram_addr,
>>                                      size, NULL, &fence, false, false);
>>
>>               if (r) {
>> @@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>                       goto out_lclean_unpin;
>>               }
>>
>> -             for (gtt_start = gtt_map, gtt_end = gtt_map + size,
>> +             for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
>>                    vram_start = vram_map, vram_end = vram_map + size;
>>                    vram_start < vram_end;
>> -                  gtt_start++, vram_start++) {
>> -                     if (*vram_start != gtt_start) {
>> +                  sysvm_start++, vram_start++) {
>> +                     if (*vram_start != sysvm_start) {
>>                               DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
>>                                         "expected 0x%p (GTT/VRAM offset "
>>                                         "0x%16llx/0x%16llx)\n",
>> -                                       i, *vram_start, gtt_start,
>> +                                       i, *vram_start, sysvm_start,
>>                                         (unsigned long long)
>> -                                       (gtt_addr - adev->mc.gtt_start +
>> -                                        (void*)gtt_start - gtt_map),
>> +                                       (sysvm_addr - adev->mc.sysvm_start +
>> +                                        (void*)sysvm_start - sysvm_map),
>>                                         (unsigned long long)
>>                                         (vram_addr - adev->mc.vram_start +
>> -                                        (void*)gtt_start - gtt_map));
>> +                                        (void*)sysvm_start - sysvm_map));
>>                               amdgpu_bo_kunmap(vram_obj);
>>                               goto out_lclean_unpin;
>>                       }
>> @@ -155,7 +155,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>
>>               amdgpu_bo_kunmap(vram_obj);
>>
>> -             r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
>> +             r = amdgpu_copy_buffer(ring, vram_addr, sysvm_addr,
>>                                      size, NULL, &fence, false, false);
>>
>>               if (r) {
>> @@ -171,49 +171,49 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>
>>               dma_fence_put(fence);
>>
>> -             r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
>> +             r = amdgpu_bo_kmap(sysvm_obj[i], &sysvm_map);
>>               if (r) {
>>                       DRM_ERROR("Failed to map GTT object after copy %d\n", i);
>>                       goto out_lclean_unpin;
>>               }
>>
>> -             for (gtt_start = gtt_map, gtt_end = gtt_map + size,
>> +             for (sysvm_start = sysvm_map, sysvm_end = sysvm_map + size,
>>                    vram_start = vram_map, vram_end = vram_map + size;
>> -                  gtt_start < gtt_end;
>> -                  gtt_start++, vram_start++) {
>> -                     if (*gtt_start != vram_start) {
>> +                  sysvm_start < sysvm_end;
>> +                  sysvm_start++, vram_start++) {
>> +                     if (*sysvm_start != vram_start) {
>>                               DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
>>                                         "expected 0x%p (VRAM/GTT offset "
>>                                         "0x%16llx/0x%16llx)\n",
>> -                                       i, *gtt_start, vram_start,
>> +                                       i, *sysvm_start, vram_start,
>>                                         (unsigned long long)
>>                                         (vram_addr - adev->mc.vram_start +
>>                                          (void*)vram_start - vram_map),
>>                                         (unsigned long long)
>> -                                       (gtt_addr - adev->mc.gtt_start +
>> +                                       (sysvm_addr - adev->mc.sysvm_start +
>>                                          (void*)vram_start - vram_map));
>> -                             amdgpu_bo_kunmap(gtt_obj[i]);
>> +                             amdgpu_bo_kunmap(sysvm_obj[i]);
>>                               goto out_lclean_unpin;
>>                       }
>>               }
>>
>> -             amdgpu_bo_kunmap(gtt_obj[i]);
>> +             amdgpu_bo_kunmap(sysvm_obj[i]);
>>
>>               DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
>> -                      gtt_addr - adev->mc.gtt_start);
>> +                      sysvm_addr - adev->mc.sysvm_start);
>>               continue;
>>
>>  out_lclean_unpin:
>> -             amdgpu_bo_unpin(gtt_obj[i]);
>> +             amdgpu_bo_unpin(sysvm_obj[i]);
>>  out_lclean_unres:
>> -             amdgpu_bo_unreserve(gtt_obj[i]);
>> +             amdgpu_bo_unreserve(sysvm_obj[i]);
>>  out_lclean_unref:
>> -             amdgpu_bo_unref(&gtt_obj[i]);
>> +             amdgpu_bo_unref(&sysvm_obj[i]);
>>  out_lclean:
>>               for (--i; i >= 0; --i) {
>> -                     amdgpu_bo_unpin(gtt_obj[i]);
>> -                     amdgpu_bo_unreserve(gtt_obj[i]);
>> -                     amdgpu_bo_unref(&gtt_obj[i]);
>> +                     amdgpu_bo_unpin(sysvm_obj[i]);
>> +                     amdgpu_bo_unreserve(sysvm_obj[i]);
>> +                     amdgpu_bo_unref(&sysvm_obj[i]);
>>               }
>>               if (fence)
>>                       dma_fence_put(fence);
>> @@ -226,7 +226,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
>>  out_unref:
>>       amdgpu_bo_unref(&vram_obj);
>>  out_cleanup:
>> -     kfree(gtt_obj);
>> +     kfree(sysvm_obj);
>>       if (r) {
>>               pr_warn("Error while testing BO move\n");
>>       }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index 5c7a6c5..9240357 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -102,7 +102,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
>>               goto error_bo;
>>       }
>>
>> -     mutex_init(&adev->mman.gtt_window_lock);
>> +     mutex_init(&adev->mman.sysvm_window_lock);
>>
>>       ring = adev->mman.buffer_funcs_ring;
>>       rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
>> @@ -130,7 +130,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
>>       if (adev->mman.mem_global_referenced) {
>>               amd_sched_entity_fini(adev->mman.entity.sched,
>>                                     &adev->mman.entity);
>> -             mutex_destroy(&adev->mman.gtt_window_lock);
>> +             mutex_destroy(&adev->mman.sysvm_window_lock);
>>               drm_global_item_unref(&adev->mman.bo_global_ref.ref);
>>               drm_global_item_unref(&adev->mman.mem_global_ref);
>>               adev->mman.mem_global_referenced = false;
>> @@ -158,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
>>               break;
>>       case TTM_PL_TT:
>>               man->func = &amdgpu_gtt_mgr_func;
>> -             man->gpu_offset = adev->mc.gtt_start;
>> +             man->gpu_offset = adev->mc.sysvm_start;
>>               man->available_caching = TTM_PL_MASK_CACHING;
>>               man->default_caching = TTM_PL_FLAG_CACHED;
>>               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
>> @@ -289,7 +289,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>>       new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
>>
>>       num_pages = new_mem->num_pages;
>> -     mutex_lock(&adev->mman.gtt_window_lock);
>> +     mutex_lock(&adev->mman.sysvm_window_lock);
>>       while (num_pages) {
>>               unsigned long cur_pages = min(min(old_size, new_size),
>>                                             (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
>> @@ -341,14 +341,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
>>                       new_start += cur_pages * PAGE_SIZE;
>>               }
>>       }
>> -     mutex_unlock(&adev->mman.gtt_window_lock);
>> +     mutex_unlock(&adev->mman.sysvm_window_lock);
>>
>>       r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
>>       dma_fence_put(fence);
>>       return r;
>>
>>  error:
>> -     mutex_unlock(&adev->mman.gtt_window_lock);
>> +     mutex_unlock(&adev->mman.sysvm_window_lock);
>>
>>       if (fence)
>>               dma_fence_wait(fence, false);
>> @@ -693,10 +693,10 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
>>       uint64_t flags;
>>       int r;
>>
>> -     spin_lock(&gtt->adev->gtt_list_lock);
>> +     spin_lock(&gtt->adev->sysvm_list_lock);
>>       flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
>>       gtt->offset = (u64)mem->start << PAGE_SHIFT;
>> -     r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
>> +     r = amdgpu_sysvm_bind(gtt->adev, gtt->offset, ttm->num_pages,
>>               ttm->pages, gtt->ttm.dma_address, flags);
>>
>>       if (r) {
>> @@ -705,9 +705,9 @@ static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
>>               goto error_gart_bind;
>>       }
>>
>> -     list_add_tail(&gtt->list, &gtt->adev->gtt_list);
>> +     list_add_tail(&gtt->list, &gtt->adev->sysvm_list);
>>  error_gart_bind:
>> -     spin_unlock(&gtt->adev->gtt_list_lock);
>> +     spin_unlock(&gtt->adev->sysvm_list_lock);
>>       return r;
>>
>>  }
>> @@ -774,20 +774,20 @@ int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
>>       int r;
>>
>>       bo_mem.mem_type = TTM_PL_TT;
>> -     spin_lock(&adev->gtt_list_lock);
>> -     list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
>> +     spin_lock(&adev->sysvm_list_lock);
>> +     list_for_each_entry_safe(gtt, tmp, &adev->sysvm_list, list) {
>>               flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
>> -             r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
>> +             r = amdgpu_sysvm_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
>>                                    gtt->ttm.ttm.pages, gtt->ttm.dma_address,
>>                                    flags);
>>               if (r) {
>> -                     spin_unlock(&adev->gtt_list_lock);
>> +                     spin_unlock(&adev->sysvm_list_lock);
>>                       DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
>>                                 gtt->ttm.ttm.num_pages, gtt->offset);
>>                       return r;
>>               }
>>       }
>> -     spin_unlock(&adev->gtt_list_lock);
>> +     spin_unlock(&adev->sysvm_list_lock);
>>       return 0;
>>  }
>>
>> @@ -803,8 +803,8 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
>>               return 0;
>>
>>       /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
>> -     spin_lock(&gtt->adev->gtt_list_lock);
>> -     r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
>> +     spin_lock(&gtt->adev->sysvm_list_lock);
>> +     r = amdgpu_sysvm_unbind(gtt->adev, gtt->offset, ttm->num_pages);
>>       if (r) {
>>               DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
>>                         gtt->ttm.ttm.num_pages, gtt->offset);
>> @@ -812,7 +812,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
>>       }
>>       list_del_init(&gtt->list);
>>  error_unbind:
>> -     spin_unlock(&gtt->adev->gtt_list_lock);
>> +     spin_unlock(&gtt->adev->sysvm_list_lock);
>>       return r;
>>  }
>>
>> @@ -1037,7 +1037,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
>>                       flags |= AMDGPU_PTE_SNOOPED;
>>       }
>>
>> -     flags |= adev->gart.gart_pte_flags;
>> +     flags |= adev->sysvm.sysvm_pte_flags;
>>       flags |= AMDGPU_PTE_READABLE;
>>
>>       if (!amdgpu_ttm_tt_is_readonly(ttm))
>> @@ -1144,13 +1144,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
>>       DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
>>                (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
>>       r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
>> -                             adev->mc.gtt_size >> PAGE_SHIFT);
>> +                             adev->mc.sysvm_size >> PAGE_SHIFT);
>>       if (r) {
>>               DRM_ERROR("Failed initializing GTT heap.\n");
>>               return r;
>>       }
>>       DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
>> -              (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
>> +              (unsigned)(adev->mc.sysvm_size / (1024 * 1024)));
>>
>>       adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
>>       adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
>> @@ -1223,7 +1223,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
>>       if (adev->gds.oa.total_size)
>>               ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
>>       ttm_bo_device_release(&adev->mman.bdev);
>> -     amdgpu_gart_fini(adev);
>> +     amdgpu_sysvm_fini(adev);
>>       amdgpu_ttm_global_fini(adev);
>>       adev->mman.initialized = false;
>>       DRM_INFO("amdgpu: ttm finalized\n");
>> @@ -1279,7 +1279,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>>       BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
>>              AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
>>
>> -     *addr = adev->mc.gtt_start;
>> +     *addr = adev->mc.sysvm_start;
>>       *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
>>               AMDGPU_GPU_PAGE_SIZE;
>>
>> @@ -1296,7 +1296,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>>       src_addr = num_dw * 4;
>>       src_addr += job->ibs[0].gpu_addr;
>>
>> -     dst_addr = adev->gart.table_addr;
>> +     dst_addr = adev->sysvm.table_addr;
>>       dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
>>       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
>>                               dst_addr, num_bytes);
>> @@ -1306,7 +1306,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
>>
>>       dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
>>       flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
>> -     r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
>> +     r = amdgpu_sysvm_map(adev, 0, num_pages, dma_address, flags,
>>                           &job->ibs[0].ptr[num_dw]);
>>       if (r)
>>               goto error_free;
>> @@ -1522,7 +1522,7 @@ static int ttm_pl_tt = TTM_PL_TT;
>>
>>  static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
>>       {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
>> -     {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
>> +     {"amdgpu_sysvm_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
>>       {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
>>  #ifdef CONFIG_SWIOTLB
>>       {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
>> @@ -1574,9 +1574,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
>>       .llseek = default_llseek
>>  };
>>
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>>
>> -static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>> +static ssize_t amdgpu_ttm_sysvm_read(struct file *f, char __user *buf,
>>                                  size_t size, loff_t *pos)
>>  {
>>       struct amdgpu_device *adev = file_inode(f)->i_private;
>> @@ -1590,16 +1590,16 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>>               struct page *page;
>>               void *ptr;
>>
>> -             if (p >= adev->gart.num_cpu_pages)
>> +             if (p >= adev->sysvm.num_cpu_pages)
>>                       return result;
>>
>> -             page = adev->gart.pages[p];
>> +             page = adev->sysvm.pages[p];
>>               if (page) {
>>                       ptr = kmap(page);
>>                       ptr += off;
>>
>>                       r = copy_to_user(buf, ptr, cur_size);
>> -                     kunmap(adev->gart.pages[p]);
>> +                     kunmap(adev->sysvm.pages[p]);
>>               } else
>>                       r = clear_user(buf, cur_size);
>>
>> @@ -1615,9 +1615,9 @@ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
>>       return result;
>>  }
>>
>> -static const struct file_operations amdgpu_ttm_gtt_fops = {
>> +static const struct file_operations amdgpu_ttm_sysvm_fops = {
>>       .owner = THIS_MODULE,
>> -     .read = amdgpu_ttm_gtt_read,
>> +     .read = amdgpu_ttm_sysvm_read,
>>       .llseek = default_llseek
>>  };
>>
>> @@ -1640,12 +1640,12 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
>>       i_size_write(ent->d_inode, adev->mc.mc_vram_size);
>>       adev->mman.vram = ent;
>>
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> -     ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
>> -                               adev, &amdgpu_ttm_gtt_fops);
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>> +     ent = debugfs_create_file("amdgpu_sysvm", S_IFREG | S_IRUGO, root,
>> +                               adev, &amdgpu_ttm_sysvm_fops);
>>       if (IS_ERR(ent))
>>               return PTR_ERR(ent);
>> -     i_size_write(ent->d_inode, adev->mc.gtt_size);
>> +     i_size_write(ent->d_inode, adev->mc.sysvm_size);
>>       adev->mman.gtt = ent;
>>
>>  #endif
>> @@ -1670,7 +1670,7 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
>>       debugfs_remove(adev->mman.vram);
>>       adev->mman.vram = NULL;
>>
>> -#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
>> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
>>       debugfs_remove(adev->mman.gtt);
>>       adev->mman.gtt = NULL;
>>  #endif
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> index 4f5c1da..1443038 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> @@ -53,7 +53,7 @@ struct amdgpu_mman {
>>       const struct amdgpu_buffer_funcs        *buffer_funcs;
>>       struct amdgpu_ring                      *buffer_funcs_ring;
>>
>> -     struct mutex                            gtt_window_lock;
>> +     struct mutex                            sysvm_window_lock;
>>       /* Scheduler entity for buffer moves */
>>       struct amd_sched_entity                 entity;
>>  };
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> index 1d1810d..8dbacec 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
>> @@ -982,14 +982,14 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
>>               value = params->pages_addr ?
>>                       amdgpu_vm_map_gart(params->pages_addr, addr) :
>>                       addr;
>> -             amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
>> +             amdgpu_sysvm_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
>>                                       i, value, flags);
>>               addr += incr;
>>       }
>>
>>       /* Flush HDP */
>>       mb();
>> -     amdgpu_gart_flush_gpu_tlb(params->adev, 0);
>> +     amdgpu_sysvm_flush_gpu_tlb(params->adev, 0);
>>  }
>>
>>  static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
>> @@ -1101,7 +1101,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
>>               }
>>
>>               pt = amdgpu_bo_gpu_offset(bo);
>> -             pt = amdgpu_gart_get_vm_pde(adev, pt);
>> +             pt = amdgpu_sysvm_get_vm_pde(adev, pt);
>>               if (parent->entries[pt_idx].addr == pt)
>>                       continue;
>>
>> @@ -1564,7 +1564,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>   *
>>   * @adev: amdgpu_device pointer
>>   * @exclusive: fence we need to sync to
>> - * @gtt_flags: flags as they are used for GTT
>> + * @sysvm_flags: flags as they are used in the SYSVM
>>   * @pages_addr: DMA addresses to use for mapping
>>   * @vm: requested vm
>>   * @mapping: mapped range and flags to use for the update
>> @@ -1578,7 +1578,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
>>   */
>>  static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
>>                                     struct dma_fence *exclusive,
>> -                                   uint64_t gtt_flags,
>> +                                   uint64_t sysvm_flags,
>>                                     dma_addr_t *pages_addr,
>>                                     struct amdgpu_vm *vm,
>>                                     struct amdgpu_bo_va_mapping *mapping,
>> @@ -1633,8 +1633,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
>>               }
>>
>>               if (pages_addr) {
>> -                     if (flags == gtt_flags)
>> -                             src = adev->gart.table_addr +
>> +                     if (flags == sysvm_flags)
>> +                             src = adev->sysvm.table_addr +
>>                                       (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
>>                       else
>>                               max_entries = min(max_entries, 16ull * 1024ull);
>> @@ -1681,7 +1681,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>>       struct amdgpu_vm *vm = bo_va->vm;
>>       struct amdgpu_bo_va_mapping *mapping;
>>       dma_addr_t *pages_addr = NULL;
>> -     uint64_t gtt_flags, flags;
>> +     uint64_t sysvm_flags, flags;
>>       struct ttm_mem_reg *mem;
>>       struct drm_mm_node *nodes;
>>       struct dma_fence *exclusive;
>> @@ -1706,12 +1706,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>>
>>       if (bo_va->bo) {
>>               flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
>> -             gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
>> +             sysvm_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
>>                       adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
>>                       flags : 0;
>>       } else {
>>               flags = 0x0;
>> -             gtt_flags = ~0x0;
>> +             sysvm_flags = ~0x0;
>>       }
>>
>>       spin_lock(&vm->status_lock);
>> @@ -1721,7 +1721,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
>>
>>       list_for_each_entry(mapping, &bo_va->invalids, list) {
>>               r = amdgpu_vm_bo_split_mapping(adev, exclusive,
>> -                                            gtt_flags, pages_addr, vm,
>> +                                            sysvm_flags, pages_addr, vm,
>>                                              mapping, flags, nodes,
>>                                              &bo_va->last_pt_update);
>>               if (r)
>> @@ -1756,7 +1756,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
>>
>>       spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
>>       enable = !!atomic_read(&adev->vm_manager.num_prt_users);
>> -     adev->gart.gart_funcs->set_prt(adev, enable);
>> +     adev->sysvm.sysvm_funcs->set_prt(adev, enable);
>>       spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
>>  }
>>
>> @@ -1765,7 +1765,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
>>   */
>>  static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
>>  {
>> -     if (!adev->gart.gart_funcs->set_prt)
>> +     if (!adev->sysvm.sysvm_funcs->set_prt)
>>               return;
>>
>>       if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
>> @@ -1800,7 +1800,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
>>  {
>>       struct amdgpu_prt_cb *cb;
>>
>> -     if (!adev->gart.gart_funcs->set_prt)
>> +     if (!adev->sysvm.sysvm_funcs->set_prt)
>>               return;
>>
>>       cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
>> @@ -2498,7 +2498,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
>>  void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>>  {
>>       struct amdgpu_bo_va_mapping *mapping, *tmp;
>> -     bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
>> +     bool prt_fini_needed = !!adev->sysvm.sysvm_funcs->set_prt;
>>       int i;
>>
>>       amd_sched_entity_fini(vm->entity.sched, &vm->entity);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> index 6986285..708fb84 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
>> @@ -3731,10 +3731,10 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>  {
>>       struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>>       int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
>> -     uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +     uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>       unsigned eng = ring->vm_inv_eng;
>>
>> -     pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +     pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>       pd_addr |= AMDGPU_PTE_VALID;
>>
>>       gfx_v9_0_write_data_to_reg(ring, usepfp, true,
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
>> index a42f483..1290434 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
>> @@ -40,8 +40,8 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
>>  {
>>       uint64_t value;
>>
>> -     BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
>> -     value = adev->gart.table_addr - adev->mc.vram_start
>> +     BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
>> +     value = adev->sysvm.table_addr - adev->mc.vram_start
>>               + adev->vm_manager.vram_base_offset;
>>       value &= 0x0000FFFFFFFFF000ULL;
>>       value |= 0x1; /*valid bit*/
>> @@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
>>       gfxhub_v1_0_init_gart_pt_regs(adev);
>>
>>       WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
>> -                  (u32)(adev->mc.gtt_start >> 12));
>> +                  (u32)(adev->mc.sysvm_start >> 12));
>>       WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
>> -                  (u32)(adev->mc.gtt_start >> 44));
>> +                  (u32)(adev->mc.sysvm_start >> 44));
>>
>>       WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
>> -                  (u32)(adev->mc.gtt_end >> 12));
>> +                  (u32)(adev->mc.sysvm_end >> 12));
>>       WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
>> -                  (u32)(adev->mc.gtt_end >> 44));
>> +                  (u32)(adev->mc.sysvm_end >> 44));
>>  }
>>
>>  static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
>> @@ -228,7 +228,7 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
>>       }
>>  }
>>
>> -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
>> +int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
>>  {
>>       if (amdgpu_sriov_vf(adev)) {
>>               /*
>> @@ -256,7 +256,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
>>       return 0;
>>  }
>>
>> -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
>> +void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
>>  {
>>       u32 tmp;
>>       u32 i;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
>> index d2dbb08..d194b7e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
>> @@ -24,8 +24,8 @@
>>  #ifndef __GFXHUB_V1_0_H__
>>  #define __GFXHUB_V1_0_H__
>>
>> -int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev);
>> -void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev);
>> +int gfxhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
>> +void gfxhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
>>  void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
>>                                         bool value);
>>  void gfxhub_v1_0_init(struct amdgpu_device *adev);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> index 5ed6788f..53c3b8a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
>> @@ -36,7 +36,7 @@
>>  #include "dce/dce_6_0_sh_mask.h"
>>  #include "si_enums.h"
>>
>> -static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
>> +static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev);
>>  static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
>>  static int gmc_v6_0_wait_for_idle(void *handle);
>>
>> @@ -216,7 +216,7 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
>>       return 0;
>>  }
>>
>> -static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
>> +static void gmc_v6_0_vram_sysvm_location(struct amdgpu_device *adev,
>>                                      struct amdgpu_mc *mc)
>>  {
>>       u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
>> @@ -228,8 +228,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
>>               mc->mc_vram_size = 0xFFC0000000ULL;
>>       }
>>       amdgpu_vram_location(adev, &adev->mc, base);
>> -     adev->mc.gtt_base_align = 0;
>> -     amdgpu_gtt_location(adev, mc);
>> +     adev->mc.sysvm_base_align = 0;
>> +     amdgpu_sysvm_location(adev, mc);
>>  }
>>
>>  static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
>> @@ -321,8 +321,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
>>       adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
>>       adev->mc.visible_vram_size = adev->mc.aper_size;
>>
>> -     amdgpu_gart_set_defaults(adev);
>> -     gmc_v6_0_vram_gtt_location(adev, &adev->mc);
>> +     amdgpu_sysvm_set_defaults(adev);
>> +     gmc_v6_0_vram_sysvm_location(adev, &adev->mc);
>>
>>       return 0;
>>  }
>> @@ -447,15 +447,15 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
>>       }
>>  }
>>
>> -static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>> +static int gmc_v6_0_sysvm_enable(struct amdgpu_device *adev)
>>  {
>>       int r, i;
>>
>> -     if (adev->gart.robj == NULL) {
>> +     if (adev->sysvm.robj == NULL) {
>>               dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>>               return -EINVAL;
>>       }
>> -     r = amdgpu_gart_table_vram_pin(adev);
>> +     r = amdgpu_sysvm_table_vram_pin(adev);
>>       if (r)
>>               return r;
>>       /* Setup TLB control */
>> @@ -482,9 +482,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>>              (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
>>              (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
>>       /* setup context0 */
>> -     WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
>> -     WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
>> -     WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
>> +     WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
>> +     WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
>> +     WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>>       WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>>                       (u32)(adev->dummy_page.addr >> 12));
>>       WREG32(mmVM_CONTEXT0_CNTL2, 0);
>> @@ -508,10 +508,10 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>>       for (i = 1; i < 16; i++) {
>>               if (i < 8)
>>                       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
>> -                            adev->gart.table_addr >> 12);
>> +                            adev->sysvm.table_addr >> 12);
>>               else
>>                       WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
>> -                            adev->gart.table_addr >> 12);
>> +                            adev->sysvm.table_addr >> 12);
>>       }
>>
>>       /* enable context1-15 */
>> @@ -530,9 +530,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
>>
>>       gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
>>       dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
>> -              (unsigned)(adev->mc.gtt_size >> 20),
>> -              (unsigned long long)adev->gart.table_addr);
>> -     adev->gart.ready = true;
>> +              (unsigned)(adev->mc.sysvm_size >> 20),
>> +              (unsigned long long)adev->sysvm.table_addr);
>> +     adev->sysvm.ready = true;
>>       return 0;
>>  }
>>
>> @@ -540,19 +540,19 @@ static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
>>  {
>>       int r;
>>
>> -     if (adev->gart.robj) {
>> +     if (adev->sysvm.robj) {
>>               dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
>>               return 0;
>>       }
>> -     r = amdgpu_gart_init(adev);
>> +     r = amdgpu_sysvm_init(adev);
>>       if (r)
>>               return r;
>> -     adev->gart.table_size = adev->gart.num_gpu_pages * 8;
>> -     adev->gart.gart_pte_flags = 0;
>> -     return amdgpu_gart_table_vram_alloc(adev);
>> +     adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
>> +     adev->sysvm.sysvm_pte_flags = 0;
>> +     return amdgpu_sysvm_table_vram_alloc(adev);
>>  }
>>
>> -static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
>> +static void gmc_v6_0_sysvm_disable(struct amdgpu_device *adev)
>>  {
>>       /*unsigned i;
>>
>> @@ -582,13 +582,13 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
>>       WREG32(mmVM_L2_CNTL3,
>>              VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
>>              (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
>> -     amdgpu_gart_table_vram_unpin(adev);
>> +     amdgpu_sysvm_table_vram_unpin(adev);
>>  }
>>
>>  static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
>>  {
>> -     amdgpu_gart_table_vram_free(adev);
>> -     amdgpu_gart_fini(adev);
>> +     amdgpu_sysvm_table_vram_free(adev);
>> +     amdgpu_sysvm_fini(adev);
>>  }
>>
>>  static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
>> @@ -762,7 +762,7 @@ static int gmc_v6_0_early_init(void *handle)
>>  {
>>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>
>> -     gmc_v6_0_set_gart_funcs(adev);
>> +     gmc_v6_0_set_sysvm_funcs(adev);
>>       gmc_v6_0_set_irq_funcs(adev);
>>
>>       return 0;
>> @@ -889,7 +889,7 @@ static int gmc_v6_0_hw_init(void *handle)
>>               }
>>       }
>>
>> -     r = gmc_v6_0_gart_enable(adev);
>> +     r = gmc_v6_0_sysvm_enable(adev);
>>       if (r)
>>               return r;
>>
>> @@ -901,7 +901,7 @@ static int gmc_v6_0_hw_fini(void *handle)
>>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>
>>       amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
>> -     gmc_v6_0_gart_disable(adev);
>> +     gmc_v6_0_sysvm_disable(adev);
>>
>>       return 0;
>>  }
>> @@ -1095,7 +1095,7 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
>>       .set_powergating_state = gmc_v6_0_set_powergating_state,
>>  };
>>
>> -static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = {
>> +static const struct amdgpu_sysvm_funcs gmc_v6_0_sysvm_funcs = {
>>       .flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb,
>>       .set_pte_pde = gmc_v6_0_gart_set_pte_pde,
>>       .set_prt = gmc_v6_0_set_prt,
>> @@ -1108,10 +1108,10 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
>>       .process = gmc_v6_0_process_interrupt,
>>  };
>>
>> -static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev)
>> +static void gmc_v6_0_set_sysvm_funcs(struct amdgpu_device *adev)
>>  {
>> -     if (adev->gart.gart_funcs == NULL)
>> -             adev->gart.gart_funcs = &gmc_v6_0_gart_funcs;
>> +     if (adev->sysvm.sysvm_funcs == NULL)
>> +             adev->sysvm.sysvm_funcs = &gmc_v6_0_sysvm_funcs;
>>  }
>>
>>  static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
>> index 15f2c0f..2329bdb 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
>> @@ -39,7 +39,7 @@
>>
>>  #include "amdgpu_atombios.h"
>>
>> -static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
>> +static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev);
>>  static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
>>  static int gmc_v7_0_wait_for_idle(void *handle);
>>
>> @@ -231,7 +231,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
>>       return 0;
>>  }
>>
>> -static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
>> +static void gmc_v7_0_vram_sysvm_location(struct amdgpu_device *adev,
>>                                      struct amdgpu_mc *mc)
>>  {
>>       u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
>> @@ -244,8 +244,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
>>               mc->mc_vram_size = 0xFFC0000000ULL;
>>       }
>>       amdgpu_vram_location(adev, &adev->mc, base);
>> -     adev->mc.gtt_base_align = 0;
>> -     amdgpu_gtt_location(adev, mc);
>> +     adev->mc.sysvm_base_align = 0;
>> +     amdgpu_sysvm_location(adev, mc);
>>  }
>>
>>  /**
>> @@ -373,8 +373,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
>>       if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>>               adev->mc.visible_vram_size = adev->mc.real_vram_size;
>>
>> -     amdgpu_gart_set_defaults(adev);
>> -     gmc_v7_0_vram_gtt_location(adev, &adev->mc);
>> +     amdgpu_sysvm_set_defaults(adev);
>> +     gmc_v7_0_vram_sysvm_location(adev, &adev->mc);
>>
>>       return 0;
>>  }
>> @@ -536,7 +536,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
>>  }
>>
>>  /**
>> - * gmc_v7_0_gart_enable - gart enable
>> + * gmc_v7_0_sysvm_enable - gart enable
>>   *
>>   * @adev: amdgpu_device pointer
>>   *
>> @@ -546,16 +546,16 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
>>   * and GPUVM for FSA64 clients (CIK).
>>   * Returns 0 for success, errors for failure.
>>   */
>> -static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>> +static int gmc_v7_0_sysvm_enable(struct amdgpu_device *adev)
>>  {
>>       int r, i;
>>       u32 tmp;
>>
>> -     if (adev->gart.robj == NULL) {
>> +     if (adev->sysvm.robj == NULL) {
>>               dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>>               return -EINVAL;
>>       }
>> -     r = amdgpu_gart_table_vram_pin(adev);
>> +     r = amdgpu_sysvm_table_vram_pin(adev);
>>       if (r)
>>               return r;
>>       /* Setup TLB control */
>> @@ -585,9 +585,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>>       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
>>       WREG32(mmVM_L2_CNTL3, tmp);
>>       /* setup context0 */
>> -     WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
>> -     WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
>> -     WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
>> +     WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
>> +     WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
>> +     WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>>       WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>>                       (u32)(adev->dummy_page.addr >> 12));
>>       WREG32(mmVM_CONTEXT0_CNTL2, 0);
>> @@ -611,10 +611,10 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>>       for (i = 1; i < 16; i++) {
>>               if (i < 8)
>>                       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
>> -                            adev->gart.table_addr >> 12);
>> +                            adev->sysvm.table_addr >> 12);
>>               else
>>                       WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
>> -                            adev->gart.table_addr >> 12);
>> +                            adev->sysvm.table_addr >> 12);
>>       }
>>
>>       /* enable context1-15 */
>> @@ -640,9 +640,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
>>
>>       gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
>>       DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
>> -              (unsigned)(adev->mc.gtt_size >> 20),
>> -              (unsigned long long)adev->gart.table_addr);
>> -     adev->gart.ready = true;
>> +              (unsigned)(adev->mc.sysvm_size >> 20),
>> +              (unsigned long long)adev->sysvm.table_addr);
>> +     adev->sysvm.ready = true;
>>       return 0;
>>  }
>>
>> @@ -650,27 +650,27 @@ static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
>>  {
>>       int r;
>>
>> -     if (adev->gart.robj) {
>> +     if (adev->sysvm.robj) {
>>               WARN(1, "R600 PCIE GART already initialized\n");
>>               return 0;
>>       }
>>       /* Initialize common gart structure */
>> -     r = amdgpu_gart_init(adev);
>> +     r = amdgpu_sysvm_init(adev);
>>       if (r)
>>               return r;
>> -     adev->gart.table_size = adev->gart.num_gpu_pages * 8;
>> -     adev->gart.gart_pte_flags = 0;
>> -     return amdgpu_gart_table_vram_alloc(adev);
>> +     adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
>> +     adev->sysvm.sysvm_pte_flags = 0;
>> +     return amdgpu_sysvm_table_vram_alloc(adev);
>>  }
>>
>>  /**
>> - * gmc_v7_0_gart_disable - gart disable
>> + * gmc_v7_0_sysvm_disable - gart disable
>>   *
>>   * @adev: amdgpu_device pointer
>>   *
>>   * This disables all VM page table (CIK).
>>   */
>> -static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>> +static void gmc_v7_0_sysvm_disable(struct amdgpu_device *adev)
>>  {
>>       u32 tmp;
>>
>> @@ -688,7 +688,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>>       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
>>       WREG32(mmVM_L2_CNTL, tmp);
>>       WREG32(mmVM_L2_CNTL2, 0);
>> -     amdgpu_gart_table_vram_unpin(adev);
>> +     amdgpu_sysvm_table_vram_unpin(adev);
>>  }
>>
>>  /**
>> @@ -700,8 +700,8 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
>>   */
>>  static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
>>  {
>> -     amdgpu_gart_table_vram_free(adev);
>> -     amdgpu_gart_fini(adev);
>> +     amdgpu_sysvm_table_vram_free(adev);
>> +     amdgpu_sysvm_fini(adev);
>>  }
>>
>>  /**
>> @@ -885,7 +885,7 @@ static int gmc_v7_0_early_init(void *handle)
>>  {
>>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>
>> -     gmc_v7_0_set_gart_funcs(adev);
>> +     gmc_v7_0_set_sysvm_funcs(adev);
>>       gmc_v7_0_set_irq_funcs(adev);
>>
>>       adev->mc.shared_aperture_start = 0x2000000000000000ULL;
>> @@ -1036,7 +1036,7 @@ static int gmc_v7_0_hw_init(void *handle)
>>               }
>>       }
>>
>> -     r = gmc_v7_0_gart_enable(adev);
>> +     r = gmc_v7_0_sysvm_enable(adev);
>>       if (r)
>>               return r;
>>
>> @@ -1048,7 +1048,7 @@ static int gmc_v7_0_hw_fini(void *handle)
>>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>
>>       amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
>> -     gmc_v7_0_gart_disable(adev);
>> +     gmc_v7_0_sysvm_disable(adev);
>>
>>       return 0;
>>  }
>> @@ -1270,7 +1270,7 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
>>       .set_powergating_state = gmc_v7_0_set_powergating_state,
>>  };
>>
>> -static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
>> +static const struct amdgpu_sysvm_funcs gmc_v7_0_sysvm_funcs = {
>>       .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
>>       .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
>>       .set_prt = gmc_v7_0_set_prt,
>> @@ -1283,10 +1283,10 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
>>       .process = gmc_v7_0_process_interrupt,
>>  };
>>
>> -static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
>> +static void gmc_v7_0_set_sysvm_funcs(struct amdgpu_device *adev)
>>  {
>> -     if (adev->gart.gart_funcs == NULL)
>> -             adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
>> +     if (adev->sysvm.sysvm_funcs == NULL)
>> +             adev->sysvm.sysvm_funcs = &gmc_v7_0_sysvm_funcs;
>>  }
>>
>>  static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
>> index 213af65..cf8f8d2 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
>> @@ -41,7 +41,7 @@
>>  #include "amdgpu_atombios.h"
>>
>>
>> -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
>> +static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev);
>>  static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
>>  static int gmc_v8_0_wait_for_idle(void *handle);
>>
>> @@ -393,7 +393,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
>>       return 0;
>>  }
>>
>> -static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
>> +static void gmc_v8_0_vram_sysvm_location(struct amdgpu_device *adev,
>>                                      struct amdgpu_mc *mc)
>>  {
>>       u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
>> @@ -406,8 +406,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
>>               mc->mc_vram_size = 0xFFC0000000ULL;
>>       }
>>       amdgpu_vram_location(adev, &adev->mc, base);
>> -     adev->mc.gtt_base_align = 0;
>> -     amdgpu_gtt_location(adev, mc);
>> +     adev->mc.sysvm_base_align = 0;
>> +     amdgpu_sysvm_location(adev, mc);
>>  }
>>
>>  /**
>> @@ -535,8 +535,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
>>       if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>>               adev->mc.visible_vram_size = adev->mc.real_vram_size;
>>
>> -     amdgpu_gart_set_defaults(adev);
>> -     gmc_v8_0_vram_gtt_location(adev, &adev->mc);
>> +     amdgpu_sysvm_set_defaults(adev);
>> +     gmc_v8_0_vram_sysvm_location(adev, &adev->mc);
>>
>>       return 0;
>>  }
>> @@ -722,7 +722,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
>>  }
>>
>>  /**
>> - * gmc_v8_0_gart_enable - gart enable
>> + * gmc_v8_0_sysvm_enable - gart enable
>>   *
>>   * @adev: amdgpu_device pointer
>>   *
>> @@ -732,16 +732,16 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
>>   * and GPUVM for FSA64 clients (CIK).
>>   * Returns 0 for success, errors for failure.
>>   */
>> -static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>> +static int gmc_v8_0_sysvm_enable(struct amdgpu_device *adev)
>>  {
>>       int r, i;
>>       u32 tmp;
>>
>> -     if (adev->gart.robj == NULL) {
>> +     if (adev->sysvm.robj == NULL) {
>>               dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>>               return -EINVAL;
>>       }
>> -     r = amdgpu_gart_table_vram_pin(adev);
>> +     r = amdgpu_sysvm_table_vram_pin(adev);
>>       if (r)
>>               return r;
>>       /* Setup TLB control */
>> @@ -787,9 +787,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>>       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
>>       WREG32(mmVM_L2_CNTL4, tmp);
>>       /* setup context0 */
>> -     WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
>> -     WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
>> -     WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
>> +     WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.sysvm_start >> 12);
>> +     WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.sysvm_end >> 12);
>> +     WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->sysvm.table_addr >> 12);
>>       WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>>                       (u32)(adev->dummy_page.addr >> 12));
>>       WREG32(mmVM_CONTEXT0_CNTL2, 0);
>> @@ -813,10 +813,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>>       for (i = 1; i < 16; i++) {
>>               if (i < 8)
>>                       WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
>> -                            adev->gart.table_addr >> 12);
>> +                            adev->sysvm.table_addr >> 12);
>>               else
>>                       WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
>> -                            adev->gart.table_addr >> 12);
>> +                            adev->sysvm.table_addr >> 12);
>>       }
>>
>>       /* enable context1-15 */
>> @@ -843,9 +843,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
>>
>>       gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
>>       DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
>> -              (unsigned)(adev->mc.gtt_size >> 20),
>> -              (unsigned long long)adev->gart.table_addr);
>> -     adev->gart.ready = true;
>> +              (unsigned)(adev->mc.sysvm_size >> 20),
>> +              (unsigned long long)adev->sysvm.table_addr);
>> +     adev->sysvm.ready = true;
>>       return 0;
>>  }
>>
>> @@ -853,27 +853,27 @@ static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
>>  {
>>       int r;
>>
>> -     if (adev->gart.robj) {
>> +     if (adev->sysvm.robj) {
>>               WARN(1, "R600 PCIE GART already initialized\n");
>>               return 0;
>>       }
>>       /* Initialize common gart structure */
>> -     r = amdgpu_gart_init(adev);
>> +     r = amdgpu_sysvm_init(adev);
>>       if (r)
>>               return r;
>> -     adev->gart.table_size = adev->gart.num_gpu_pages * 8;
>> -     adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
>> -     return amdgpu_gart_table_vram_alloc(adev);
>> +     adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
>> +     adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_EXECUTABLE;
>> +     return amdgpu_sysvm_table_vram_alloc(adev);
>>  }
>>
>>  /**
>> - * gmc_v8_0_gart_disable - gart disable
>> + * gmc_v8_0_sysvm_disable - gart disable
>>   *
>>   * @adev: amdgpu_device pointer
>>   *
>>   * This disables all VM page table (CIK).
>>   */
>> -static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>> +static void gmc_v8_0_sysvm_disable(struct amdgpu_device *adev)
>>  {
>>       u32 tmp;
>>
>> @@ -891,7 +891,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>>       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
>>       WREG32(mmVM_L2_CNTL, tmp);
>>       WREG32(mmVM_L2_CNTL2, 0);
>> -     amdgpu_gart_table_vram_unpin(adev);
>> +     amdgpu_sysvm_table_vram_unpin(adev);
>>  }
>>
>>  /**
>> @@ -903,8 +903,8 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
>>   */
>>  static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
>>  {
>> -     amdgpu_gart_table_vram_free(adev);
>> -     amdgpu_gart_fini(adev);
>> +     amdgpu_sysvm_table_vram_free(adev);
>> +     amdgpu_sysvm_fini(adev);
>>  }
>>
>>  /**
>> @@ -962,7 +962,7 @@ static int gmc_v8_0_early_init(void *handle)
>>  {
>>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>
>> -     gmc_v8_0_set_gart_funcs(adev);
>> +     gmc_v8_0_set_sysvm_funcs(adev);
>>       gmc_v8_0_set_irq_funcs(adev);
>>
>>       adev->mc.shared_aperture_start = 0x2000000000000000ULL;
>> @@ -1128,7 +1128,7 @@ static int gmc_v8_0_hw_init(void *handle)
>>               }
>>       }
>>
>> -     r = gmc_v8_0_gart_enable(adev);
>> +     r = gmc_v8_0_sysvm_enable(adev);
>>       if (r)
>>               return r;
>>
>> @@ -1140,7 +1140,7 @@ static int gmc_v8_0_hw_fini(void *handle)
>>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>
>>       amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
>> -     gmc_v8_0_gart_disable(adev);
>> +     gmc_v8_0_sysvm_disable(adev);
>>
>>       return 0;
>>  }
>> @@ -1590,7 +1590,7 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
>>       .get_clockgating_state = gmc_v8_0_get_clockgating_state,
>>  };
>>
>> -static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = {
>> +static const struct amdgpu_sysvm_funcs gmc_v8_0_sysvm_funcs = {
>>       .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb,
>>       .set_pte_pde = gmc_v8_0_gart_set_pte_pde,
>>       .set_prt = gmc_v8_0_set_prt,
>> @@ -1603,10 +1603,10 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
>>       .process = gmc_v8_0_process_interrupt,
>>  };
>>
>> -static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev)
>> +static void gmc_v8_0_set_sysvm_funcs(struct amdgpu_device *adev)
>>  {
>> -     if (adev->gart.gart_funcs == NULL)
>> -             adev->gart.gart_funcs = &gmc_v8_0_gart_funcs;
>> +     if (adev->sysvm.sysvm_funcs == NULL)
>> +             adev->sysvm.sysvm_funcs = &gmc_v8_0_sysvm_funcs;
>>  }
>>
>>  static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> index dbb43d9..f067465 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
>> @@ -365,7 +365,7 @@ static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
>>       return addr;
>>  }
>>
>> -static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
>> +static const struct amdgpu_sysvm_funcs gmc_v9_0_sysvm_funcs = {
>>       .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
>>       .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
>>       .get_invalidate_req = gmc_v9_0_get_invalidate_req,
>> @@ -373,17 +373,17 @@ static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
>>       .get_vm_pde = gmc_v9_0_get_vm_pde
>>  };
>>
>> -static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
>> +static void gmc_v9_0_set_sysvm_funcs(struct amdgpu_device *adev)
>>  {
>> -     if (adev->gart.gart_funcs == NULL)
>> -             adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
>> +     if (adev->sysvm.sysvm_funcs == NULL)
>> +             adev->sysvm.sysvm_funcs = &gmc_v9_0_sysvm_funcs;
>>  }
>>
>>  static int gmc_v9_0_early_init(void *handle)
>>  {
>>       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>>
>> -     gmc_v9_0_set_gart_funcs(adev);
>> +     gmc_v9_0_set_sysvm_funcs(adev);
>>       gmc_v9_0_set_irq_funcs(adev);
>>
>>       return 0;
>> @@ -412,15 +412,15 @@ static int gmc_v9_0_late_init(void *handle)
>>       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
>>  }
>>
>> -static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
>> +static void gmc_v9_0_vram_sysvm_location(struct amdgpu_device *adev,
>>                                       struct amdgpu_mc *mc)
>>  {
>>       u64 base = 0;
>>       if (!amdgpu_sriov_vf(adev))
>>               base = mmhub_v1_0_get_fb_location(adev);
>>       amdgpu_vram_location(adev, &adev->mc, base);
>> -     adev->mc.gtt_base_align = 0;
>> -     amdgpu_gtt_location(adev, mc);
>> +     adev->mc.sysvm_base_align = 0;
>> +     amdgpu_sysvm_location(adev, mc);
>>       /* base offset of vram pages */
>>       if (adev->flags & AMD_IS_APU)
>>               adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
>> @@ -494,8 +494,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
>>       if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
>>               adev->mc.visible_vram_size = adev->mc.real_vram_size;
>>
>> -     amdgpu_gart_set_defaults(adev);
>> -     gmc_v9_0_vram_gtt_location(adev, &adev->mc);
>> +     amdgpu_sysvm_set_defaults(adev);
>> +     gmc_v9_0_vram_sysvm_location(adev, &adev->mc);
>>
>>       return 0;
>>  }
>> @@ -504,18 +504,18 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
>>  {
>>       int r;
>>
>> -     if (adev->gart.robj) {
>> +     if (adev->sysvm.robj) {
>>               WARN(1, "VEGA10 PCIE GART already initialized\n");
>>               return 0;
>>       }
>>       /* Initialize common gart structure */
>> -     r = amdgpu_gart_init(adev);
>> +     r = amdgpu_sysvm_init(adev);
>>       if (r)
>>               return r;
>> -     adev->gart.table_size = adev->gart.num_gpu_pages * 8;
>> -     adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
>> +     adev->sysvm.table_size = adev->sysvm.num_gpu_pages * 8;
>> +     adev->sysvm.sysvm_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
>>                                AMDGPU_PTE_EXECUTABLE;
>> -     return amdgpu_gart_table_vram_alloc(adev);
>> +     return amdgpu_sysvm_table_vram_alloc(adev);
>>  }
>>
>>  static int gmc_v9_0_sw_init(void *handle)
>> @@ -640,8 +640,8 @@ static int gmc_v9_0_sw_init(void *handle)
>>   */
>>  static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
>>  {
>> -     amdgpu_gart_table_vram_free(adev);
>> -     amdgpu_gart_fini(adev);
>> +     amdgpu_sysvm_table_vram_free(adev);
>> +     amdgpu_sysvm_fini(adev);
>>  }
>>
>>  static int gmc_v9_0_sw_fini(void *handle)
>> @@ -669,11 +669,11 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
>>  }
>>
>>  /**
>> - * gmc_v9_0_gart_enable - gart enable
>> + * gmc_v9_0_sysvm_enable - gart enable
>>   *
>>   * @adev: amdgpu_device pointer
>>   */
>> -static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>> +static int gmc_v9_0_sysvm_enable(struct amdgpu_device *adev)
>>  {
>>       int r;
>>       bool value;
>> @@ -683,11 +683,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>>               golden_settings_vega10_hdp,
>>               (const u32)ARRAY_SIZE(golden_settings_vega10_hdp));
>>
>> -     if (adev->gart.robj == NULL) {
>> +     if (adev->sysvm.robj == NULL) {
>>               dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
>>               return -EINVAL;
>>       }
>> -     r = amdgpu_gart_table_vram_pin(adev);
>> +     r = amdgpu_sysvm_table_vram_pin(adev);
>>       if (r)
>>               return r;
>>
>> @@ -706,11 +706,11 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>>               break;
>>       }
>>
>> -     r = gfxhub_v1_0_gart_enable(adev);
>> +     r = gfxhub_v1_0_sysvm_enable(adev);
>>       if (r)
>>               return r;
>>
>> -     r = mmhub_v1_0_gart_enable(adev);
>> +     r = mmhub_v1_0_sysvm_enable(adev);
>>       if (r)
>>               return r;
>>
>> @@ -733,9 +733,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
>>       gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
>>
>>       DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
>> -              (unsigned)(adev->mc.gtt_size >> 20),
>> -              (unsigned long long)adev->gart.table_addr);
>> -     adev->gart.ready = true;
>> +              (unsigned)(adev->mc.sysvm_size >> 20),
>> +              (unsigned long long)adev->sysvm.table_addr);
>> +     adev->sysvm.ready = true;
>>       return 0;
>>  }
>>
>> @@ -747,23 +747,23 @@ static int gmc_v9_0_hw_init(void *handle)
>>       /* The sequence of these two function calls matters.*/
>>       gmc_v9_0_init_golden_registers(adev);
>>
>> -     r = gmc_v9_0_gart_enable(adev);
>> +     r = gmc_v9_0_sysvm_enable(adev);
>>
>>       return r;
>>  }
>>
>>  /**
>> - * gmc_v9_0_gart_disable - gart disable
>> + * gmc_v9_0_sysvm_disable - gart disable
>>   *
>>   * @adev: amdgpu_device pointer
>>   *
>>   * This disables all VM page table.
>>   */
>> -static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
>> +static void gmc_v9_0_sysvm_disable(struct amdgpu_device *adev)
>>  {
>> -     gfxhub_v1_0_gart_disable(adev);
>> -     mmhub_v1_0_gart_disable(adev);
>> -     amdgpu_gart_table_vram_unpin(adev);
>> +     gfxhub_v1_0_sysvm_disable(adev);
>> +     mmhub_v1_0_sysvm_disable(adev);
>> +     amdgpu_sysvm_table_vram_unpin(adev);
>>  }
>>
>>  static int gmc_v9_0_hw_fini(void *handle)
>> @@ -777,7 +777,7 @@ static int gmc_v9_0_hw_fini(void *handle)
>>       }
>>
>>       amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
>> -     gmc_v9_0_gart_disable(adev);
>> +     gmc_v9_0_sysvm_disable(adev);
>>
>>       return 0;
>>  }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
>> index 9804318..fbc8f6e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
>> @@ -51,8 +51,8 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
>>  {
>>       uint64_t value;
>>
>> -     BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
>> -     value = adev->gart.table_addr - adev->mc.vram_start +
>> +     BUG_ON(adev->sysvm.table_addr & (~0x0000FFFFFFFFF000ULL));
>> +     value = adev->sysvm.table_addr - adev->mc.vram_start +
>>               adev->vm_manager.vram_base_offset;
>>       value &= 0x0000FFFFFFFFF000ULL;
>>       value |= 0x1; /* valid bit */
>> @@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
>>       mmhub_v1_0_init_gart_pt_regs(adev);
>>
>>       WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
>> -                  (u32)(adev->mc.gtt_start >> 12));
>> +                  (u32)(adev->mc.sysvm_start >> 12));
>>       WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
>> -                  (u32)(adev->mc.gtt_start >> 44));
>> +                  (u32)(adev->mc.sysvm_start >> 44));
>>
>>       WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
>> -                  (u32)(adev->mc.gtt_end >> 12));
>> +                  (u32)(adev->mc.sysvm_end >> 12));
>>       WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
>> -                  (u32)(adev->mc.gtt_end >> 44));
>> +                  (u32)(adev->mc.sysvm_end >> 44));
>>  }
>>
>>  static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
>> @@ -462,7 +462,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
>>       }
>>  }
>>
>> -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
>> +int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev)
>>  {
>>       if (amdgpu_sriov_vf(adev)) {
>>               /*
>> @@ -490,7 +490,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
>>       return 0;
>>  }
>>
>> -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
>> +void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev)
>>  {
>>       u32 tmp;
>>       u32 i;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
>> index 57bb940..23128e5 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
>> @@ -24,8 +24,8 @@
>>  #define __MMHUB_V1_0_H__
>>
>>  u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev);
>> -int mmhub_v1_0_gart_enable(struct amdgpu_device *adev);
>> -void mmhub_v1_0_gart_disable(struct amdgpu_device *adev);
>> +int mmhub_v1_0_sysvm_enable(struct amdgpu_device *adev);
>> +void mmhub_v1_0_sysvm_disable(struct amdgpu_device *adev);
>>  void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
>>                                        bool value);
>>  void mmhub_v1_0_init(struct amdgpu_device *adev);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
>> index 4a65697..056b169 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
>> @@ -1121,10 +1121,10 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>                                        unsigned vm_id, uint64_t pd_addr)
>>  {
>>       struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -     uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +     uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>       unsigned eng = ring->vm_inv_eng;
>>
>> -     pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +     pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>       pd_addr |= AMDGPU_PTE_VALID;
>>
>>       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
>> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
>> index 987b958..95913fd 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
>> @@ -1312,11 +1312,11 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>                                       unsigned vm_id, uint64_t pd_addr)
>>  {
>>       struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -     uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +     uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>       uint32_t data0, data1, mask;
>>       unsigned eng = ring->vm_inv_eng;
>>
>> -     pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +     pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>       pd_addr |= AMDGPU_PTE_VALID;
>>
>>       data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
>> @@ -1353,10 +1353,10 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>                        unsigned int vm_id, uint64_t pd_addr)
>>  {
>>       struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -     uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +     uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>       unsigned eng = ring->vm_inv_eng;
>>
>> -     pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +     pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>       pd_addr |= AMDGPU_PTE_VALID;
>>
>>       amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
>> index 1ecd6bb..b869423 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
>> @@ -952,10 +952,10 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
>>                        unsigned int vm_id, uint64_t pd_addr)
>>  {
>>       struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -     uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +     uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>       unsigned eng = ring->vm_inv_eng;
>>
>> -     pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +     pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>       pd_addr |= AMDGPU_PTE_VALID;
>>
>>       amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
>> index 21e7b88..2ca49af 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
>> @@ -878,11 +878,11 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>                                       unsigned vm_id, uint64_t pd_addr)
>>  {
>>       struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -     uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +     uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>       uint32_t data0, data1, mask;
>>       unsigned eng = ring->vm_inv_eng;
>>
>> -     pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +     pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>       pd_addr |= AMDGPU_PTE_VALID;
>>
>>       data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
>> @@ -1010,10 +1010,10 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
>>                        unsigned int vm_id, uint64_t pd_addr)
>>  {
>>       struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
>> -     uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
>> +     uint32_t req = ring->adev->sysvm.sysvm_funcs->get_invalidate_req(vm_id);
>>       unsigned eng = ring->vm_inv_eng;
>>
>> -     pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
>> +     pd_addr = amdgpu_sysvm_get_vm_pde(ring->adev, pd_addr);
>>       pd_addr |= AMDGPU_PTE_VALID;
>>
>>       amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 08/11] drm/amdgpu: move SYSVM struct and function into amdgpu_sysvm.h
       [not found]     ` <1499075076-1851-8-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-06 16:24       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:24 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> No functional change.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

I agree with what the patch does, but I'd prefer gart to sysvm for the naming.
Acked-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h       | 48 +------------------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h | 77 +++++++++++++++++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h   |  1 +
>  3 files changed, 79 insertions(+), 47 deletions(-)
>  create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index abe191f..a2c0eac 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -69,6 +69,7 @@
>
>  #include "gpu_scheduler.h"
>  #include "amdgpu_virt.h"
> +#include "amdgpu_sysvm.h"
>
>  /*
>   * Modules parameters.
> @@ -534,53 +535,6 @@ int amdgpu_fence_slab_init(void);
>  void amdgpu_fence_slab_fini(void);
>
>  /*
> - * GART structures, functions & helpers
> - */
> -struct amdgpu_mc;
> -
> -#define AMDGPU_GPU_PAGE_SIZE 4096
> -#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
> -#define AMDGPU_GPU_PAGE_SHIFT 12
> -#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
> -
> -struct amdgpu_sysvm {
> -       dma_addr_t                      table_addr;
> -       struct amdgpu_bo                *robj;
> -       void                            *ptr;
> -       unsigned                        num_gpu_pages;
> -       unsigned                        num_cpu_pages;
> -       unsigned                        table_size;
> -#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> -       struct page                     **pages;
> -#endif
> -       bool                            ready;
> -
> -       /* Asic default pte flags */
> -       uint64_t                        sysvm_pte_flags;
> -
> -       const struct amdgpu_sysvm_funcs *sysvm_funcs;
> -};
> -
> -void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
> -int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
> -void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
> -int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
> -void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
> -int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
> -void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
> -int amdgpu_sysvm_init(struct amdgpu_device *adev);
> -void amdgpu_sysvm_fini(struct amdgpu_device *adev);
> -int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
> -                       int pages);
> -int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
> -                   int pages, dma_addr_t *dma_addr, uint64_t flags,
> -                   void *dst);
> -int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
> -                    int pages, struct page **pagelist,
> -                    dma_addr_t *dma_addr, uint64_t flags);
> -int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
> -
> -/*
>   * VMHUB structures, functions & helpers
>   */
>  struct amdgpu_vmhub {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
> new file mode 100644
> index 0000000..7846765
> --- /dev/null
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
> @@ -0,0 +1,77 @@
> +/*
> + * Copyright 2017 Advanced Micro Devices, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + */
> +
> +#ifndef __AMDGPU_SYSVM_H__
> +#define __AMDGPU_SYSVM_H__
> +
> +#include <linux/types.h>
> +
> +/*
> + * SYSVM structures, functions & helpers
> + */
> +struct amdgpu_device;
> +struct amdgpu_bo;
> +struct amdgpu_sysvm_funcs;
> +
> +#define AMDGPU_GPU_PAGE_SIZE 4096
> +#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
> +#define AMDGPU_GPU_PAGE_SHIFT 12
> +#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
> +
> +struct amdgpu_sysvm {
> +       dma_addr_t                      table_addr;
> +       struct amdgpu_bo                *robj;
> +       void                            *ptr;
> +       unsigned                        num_gpu_pages;
> +       unsigned                        num_cpu_pages;
> +       unsigned                        table_size;
> +#ifdef CONFIG_DRM_AMDGPU_SYSVM_DEBUGFS
> +       struct page                     **pages;
> +#endif
> +       bool                            ready;
> +
> +       /* Asic default pte flags */
> +       uint64_t                        sysvm_pte_flags;
> +
> +       const struct amdgpu_sysvm_funcs *sysvm_funcs;
> +};
> +
> +void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
> +int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
> +void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
> +int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
> +void amdgpu_sysvm_table_vram_free(struct amdgpu_device *adev);
> +int amdgpu_sysvm_table_vram_pin(struct amdgpu_device *adev);
> +void amdgpu_sysvm_table_vram_unpin(struct amdgpu_device *adev);
> +int amdgpu_sysvm_init(struct amdgpu_device *adev);
> +void amdgpu_sysvm_fini(struct amdgpu_device *adev);
> +int amdgpu_sysvm_unbind(struct amdgpu_device *adev, uint64_t offset,
> +                       int pages);
> +int amdgpu_sysvm_map(struct amdgpu_device *adev, uint64_t offset,
> +                   int pages, dma_addr_t *dma_addr, uint64_t flags,
> +                   void *dst);
> +int amdgpu_sysvm_bind(struct amdgpu_device *adev, uint64_t offset,
> +                    int pages, struct page **pagelist,
> +                    dma_addr_t *dma_addr, uint64_t flags);
> +
> +#endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index 1443038..9cd435c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -80,5 +80,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
>  int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
>  bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
>  int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
> +int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
>
>  #endif
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 09/11] drm/amdgpu: move amdgpu_sysvm_location into amdgpu_sysvm.c as well
       [not found]     ` <1499075076-1851-9-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-06 16:25       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:25 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> No intended functional change.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

I think I'd prefer to keep this together with the vram_location
function.  Maybe move both of them?

Alex

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h        |  1 -
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 36 ----------------------------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c  | 38 ++++++++++++++++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h  |  2 ++
>  4 files changed, 40 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index a2c0eac..1ed6b7a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1862,7 +1862,6 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
>  uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
>                                  struct ttm_mem_reg *mem);
>  void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
> -void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>  void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
>  int amdgpu_ttm_init(struct amdgpu_device *adev);
>  void amdgpu_ttm_fini(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 46a82d3..228b262 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -666,42 +666,6 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
>                         mc->vram_end, mc->real_vram_size >> 20);
>  }
>
> -/**
> - * amdgpu_sysvm_location - try to find SYSVM location
> - * @adev: amdgpu device structure holding all necessary informations
> - * @mc: memory controller structure holding memory informations
> - *
> - * Function will place try to place SYSVM before or after VRAM.
> - *
> - * If SYSVM size is bigger than space left then we ajust SYSVM size.
> - * Thus function will never fails.
> - *
> - * FIXME: when reducing SYSVM size align new size on power of 2.
> - */
> -void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
> -{
> -       u64 size_af, size_bf;
> -
> -       size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) & ~mc->sysvm_base_align;
> -       size_bf = mc->vram_start & ~mc->sysvm_base_align;
> -       if (size_bf > size_af) {
> -               if (mc->sysvm_size > size_bf) {
> -                       dev_warn(adev->dev, "limiting SYSVM\n");
> -                       mc->sysvm_size = size_bf;
> -               }
> -               mc->sysvm_start = 0;
> -       } else {
> -               if (mc->sysvm_size > size_af) {
> -                       dev_warn(adev->dev, "limiting SYSVM\n");
> -                       mc->sysvm_size = size_af;
> -               }
> -               mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) & ~mc->sysvm_base_align;
> -       }
> -       mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
> -       dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
> -                       mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
> -}
> -
>  /*
>   * GPU helpers function.
>   */
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> index 50fc8d7..ff436ad 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> @@ -73,6 +73,44 @@ void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
>  }
>
>  /**
> + * amdgpu_sysvm_location - try to find SYSVM location
> + * @adev: amdgpu device structure holding all necessary informations
> + * @mc: memory controller structure holding memory informations
> + *
> + * Function will place try to place SYSVM before or after VRAM.
> + *
> + * If SYSVM size is bigger than space left then we ajust SYSVM size.
> + * Thus function will never fails.
> + *
> + * FIXME: when reducing SYSVM size align new size on power of 2.
> + */
> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
> +{
> +       u64 size_af, size_bf;
> +
> +       size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->sysvm_base_align) &
> +               ~mc->sysvm_base_align;
> +       size_bf = mc->vram_start & ~mc->sysvm_base_align;
> +       if (size_bf > size_af) {
> +               if (mc->sysvm_size > size_bf) {
> +                       dev_warn(adev->dev, "limiting SYSVM\n");
> +                       mc->sysvm_size = size_bf;
> +               }
> +               mc->sysvm_start = 0;
> +       } else {
> +               if (mc->sysvm_size > size_af) {
> +                       dev_warn(adev->dev, "limiting SYSVM\n");
> +                       mc->sysvm_size = size_af;
> +               }
> +               mc->sysvm_start = (mc->vram_end + 1 + mc->sysvm_base_align) &
> +                       ~mc->sysvm_base_align;
> +       }
> +       mc->sysvm_end = mc->sysvm_start + mc->sysvm_size - 1;
> +       dev_info(adev->dev, "SYSVM: %lluM 0x%016llX - 0x%016llX\n",
> +                       mc->sysvm_size >> 20, mc->sysvm_start, mc->sysvm_end);
> +}
> +
> +/**
>   * amdgpu_sysvm_table_ram_alloc - allocate system ram for gart page table
>   *
>   * @adev: amdgpu_device pointer
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
> index 7846765..2336ece 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.h
> @@ -32,6 +32,7 @@
>  struct amdgpu_device;
>  struct amdgpu_bo;
>  struct amdgpu_sysvm_funcs;
> +struct amdgpu_mc;
>
>  #define AMDGPU_GPU_PAGE_SIZE 4096
>  #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
> @@ -57,6 +58,7 @@ struct amdgpu_sysvm {
>  };
>
>  void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev);
> +void amdgpu_sysvm_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
>  int amdgpu_sysvm_table_ram_alloc(struct amdgpu_device *adev);
>  void amdgpu_sysvm_table_ram_free(struct amdgpu_device *adev);
>  int amdgpu_sysvm_table_vram_alloc(struct amdgpu_device *adev);
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 10/11] drm/amdgpu: setup GTT size directly from module parameter
       [not found]     ` <1499075076-1851-10-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-06 16:26       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:26 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> Instead of relying on the sysvm_size to be the same as the module parameter.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +++++++++---
>  1 file changed, 9 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 9240357..72dd83e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -1097,6 +1097,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
>
>  int amdgpu_ttm_init(struct amdgpu_device *adev)
>  {
> +       uint64_t gtt_size;
>         int r;
>
>         r = amdgpu_ttm_global_init(adev);
> @@ -1143,14 +1144,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
>         }
>         DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
>                  (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
> -       r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
> -                               adev->mc.sysvm_size >> PAGE_SHIFT);
> +
> +       if (amdgpu_gart_size == -1)
> +               gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
> +                              adev->mc.mc_vram_size);
> +       else
> +               gtt_size = (uint64_t)amdgpu_gart_size << 20;
> +       r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
>         if (r) {
>                 DRM_ERROR("Failed initializing GTT heap.\n");
>                 return r;
>         }
>         DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
> -                (unsigned)(adev->mc.sysvm_size / (1024 * 1024)));
> +                (unsigned)(gtt_size / (1024 * 1024)));
>
>         adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
>         adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 11/11] drm/amdgpu: add sysvm_size
       [not found]     ` <1499075076-1851-11-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
@ 2017-07-06 16:28       ` Alex Deucher
  0 siblings, 0 replies; 30+ messages in thread
From: Alex Deucher @ 2017-07-06 16:28 UTC (permalink / raw)
  To: Christian König; +Cc: amd-gfx list

On Mon, Jul 3, 2017 at 5:44 AM, Christian König <deathsimple@vodafone.de> wrote:
> From: Christian König <christian.koenig@amd.com>
>
> Limit the size of the SYSVM. This saves us a bunch of visible VRAM,
> but also limitates the maximum BO size we can swap out.

Update the description.  The limitation is removed now.

>
> v2: rebased and cleaned up after GART to SYSVM rename.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu.h         | 1 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  | 6 ++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c     | 4 ++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 6 ++++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c   | 9 +--------
>  5 files changed, 16 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 1ed6b7a..81de31a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -77,6 +77,7 @@
>  extern int amdgpu_modeset;
>  extern int amdgpu_vram_limit;
>  extern int amdgpu_gart_size;
> +extern unsigned amdgpu_sysvm_size;
>  extern int amdgpu_moverate;
>  extern int amdgpu_benchmarking;
>  extern int amdgpu_testing;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 228b262..daded9c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -1086,6 +1086,12 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
>                 }
>         }
>
> +       if (amdgpu_sysvm_size < 32) {
> +               dev_warn(adev->dev, "sysvm size (%d) too small\n",
> +                                amdgpu_sysvm_size);
> +               amdgpu_sysvm_size = 32;
> +       }
> +
>         amdgpu_check_vm_size(adev);
>
>         amdgpu_check_block_size(adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index 4bf4a80..56f9867 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -75,6 +75,7 @@
>
>  int amdgpu_vram_limit = 0;
>  int amdgpu_gart_size = -1; /* auto */
> +unsigned amdgpu_sysvm_size = 256;
>  int amdgpu_moverate = -1; /* auto */
>  int amdgpu_benchmarking = 0;
>  int amdgpu_testing = 0;
> @@ -124,6 +125,9 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
>  MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
>  module_param_named(gartsize, amdgpu_gart_size, int, 0600);
>
> +MODULE_PARM_DESC(sysvmsize, "Size of the system VM in megabytes (default 256)");
> +module_param_named(sysvmsize, amdgpu_sysvm_size, int, 0600);
> +
>  MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
>  module_param_named(moverate, amdgpu_moverate, int, 0600);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> index f46a97d..bbf6bd0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> @@ -42,6 +42,7 @@ struct amdgpu_gtt_mgr {
>  static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
>                                unsigned long p_size)
>  {
> +       struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
>         struct amdgpu_gtt_mgr *mgr;
>         uint64_t start, size;
>
> @@ -50,7 +51,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
>                 return -ENOMEM;
>
>         start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
> -       size = p_size - start;
> +       size = (adev->mc.sysvm_size >> PAGE_SHIFT) - start;
>         drm_mm_init(&mgr->mm, start, size);
>         spin_lock_init(&mgr->lock);
>         mgr->available = p_size;
> @@ -112,6 +113,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
>                          const struct ttm_place *place,
>                          struct ttm_mem_reg *mem)
>  {
> +       struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
>         struct amdgpu_gtt_mgr *mgr = man->priv;
>         struct drm_mm_node *node = mem->mm_node;
>         enum drm_mm_insert_mode mode;
> @@ -129,7 +131,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
>         if (place && place->lpfn)
>                 lpfn = place->lpfn;
>         else
> -               lpfn = man->size;
> +               lpfn = adev->sysvm.num_cpu_pages;
>
>         mode = DRM_MM_INSERT_BEST;
>         if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> index ff436ad..711e4b6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sysvm.c
> @@ -62,14 +62,7 @@
>   */
>  void amdgpu_sysvm_set_defaults(struct amdgpu_device *adev)
>  {
> -       /* unless the user had overridden it, set the gart
> -        * size equal to the 1024 or vram, whichever is larger.
> -        */
> -       if (amdgpu_gart_size == -1)
> -               adev->mc.sysvm_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
> -                                       adev->mc.mc_vram_size);
> -       else
> -               adev->mc.sysvm_size = (uint64_t)amdgpu_gart_size << 20;
> +       adev->mc.sysvm_size = (uint64_t)amdgpu_sysvm_size << 20;
>  }
>
>  /**
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2017-07-06 16:28 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-07-03  9:44 [PATCH 01/11] drm/amdgpu: reserve the first 2x512 of GART Christian König
     [not found] ` <1499075076-1851-1-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-03  9:44   ` [PATCH 02/11] drm/amdgpu: add amdgpu_gart_map function v2 Christian König
     [not found]     ` <1499075076-1851-2-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-06 16:16       ` Alex Deucher
2017-07-03  9:44   ` [PATCH 03/11] drm/amdgpu: use the GTT windows for BO moves v2 Christian König
     [not found]     ` <1499075076-1851-3-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-06 16:17       ` Alex Deucher
2017-07-03  9:44   ` [PATCH 04/11] drm/amdgpu: stop mapping BOs to GTT Christian König
     [not found]     ` <1499075076-1851-4-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-06 16:18       ` Alex Deucher
2017-07-03  9:44   ` [PATCH 05/11] drm/amdgpu: remove maximum BO size limitation v2 Christian König
     [not found]     ` <1499075076-1851-5-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-06 16:18       ` Alex Deucher
2017-07-03  9:44   ` [PATCH 06/11] drm/amdgpu: use TTM values instead of MC values for the info queries Christian König
     [not found]     ` <1499075076-1851-6-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-06 16:19       ` Alex Deucher
2017-07-03  9:44   ` [PATCH 07/11] drm/amdgpu: rename GART to SYSVM Christian König
     [not found]     ` <1499075076-1851-7-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-04  3:13       ` Zhou, David(ChunMing)
     [not found]         ` <MWHPR1201MB0206D4883B42434777D43C12B4D70-3iK1xFAIwjrUF/YbdlDdgWrFom/aUZj6nBOFsp37pqbUKgpGm//BTAC/G2K4zDHf@public.gmane.org>
2017-07-04  7:43           ` Christian König
     [not found]             ` <b838bbac-38df-5ee4-5447-9edba988ea8a-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-04  7:59               ` zhoucm1
2017-07-04  8:09       ` Huang Rui
2017-07-04  8:46         ` Christian König
2017-07-04 21:11       ` Felix Kuehling
     [not found]         ` <61109920-9d05-cb27-67b3-51a1b46b15bc-5C7GfCeVMHo@public.gmane.org>
2017-07-05  0:57           ` Michel Dänzer
     [not found]             ` <06b47744-60a4-f79e-e120-60d7bcff8526-otUistvHUpPR7s880joybQ@public.gmane.org>
2017-07-05  7:22               ` Christian König
2017-07-06 16:22           ` Alex Deucher
2017-07-03  9:44   ` [PATCH 08/11] drm/amdgpu: move SYSVM struct and function into amdgpu_sysvm.h Christian König
     [not found]     ` <1499075076-1851-8-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-06 16:24       ` Alex Deucher
2017-07-03  9:44   ` [PATCH 09/11] drm/amdgpu: move amdgpu_sysvm_location into amdgpu_sysvm.c as well Christian König
     [not found]     ` <1499075076-1851-9-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-06 16:25       ` Alex Deucher
2017-07-03  9:44   ` [PATCH 10/11] drm/amdgpu: setup GTT size directly from module parameter Christian König
     [not found]     ` <1499075076-1851-10-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-06 16:26       ` Alex Deucher
2017-07-03  9:44   ` [PATCH 11/11] drm/amdgpu: add sysvm_size Christian König
     [not found]     ` <1499075076-1851-11-git-send-email-deathsimple-ANTagKRnAhcb1SvskN2V4Q@public.gmane.org>
2017-07-06 16:28       ` Alex Deucher
2017-07-06 16:15   ` [PATCH 01/11] drm/amdgpu: reserve the first 2x512 of GART Alex Deucher

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.