All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/ttm: cleanup BO size handling v3
@ 2020-12-10 14:52 Christian König
  2020-12-14 16:55 ` Daniel Vetter
  0 siblings, 1 reply; 3+ messages in thread
From: Christian König @ 2020-12-10 14:52 UTC (permalink / raw)
  To: dri-devel

Based on an idea from Dave, but cleaned up a bit.

We had multiple fields for essentially the same thing.

Now bo->base.size is the original size of the BO in
arbitrary units, usually bytes.

bo->mem.num_pages is the size in number of pages in the
resource domain of bo->mem.mem_type.

v2: use the GEM object size instead of the BO size
v3: fix printks in some places

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c       |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h    |  4 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h     |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  6 ++--
 drivers/gpu/drm/amd/amdgpu/mes_v10_1.c        |  2 +-
 drivers/gpu/drm/nouveau/nouveau_bo.c          | 10 +++---
 drivers/gpu/drm/nouveau/nouveau_display.c     |  8 ++---
 drivers/gpu/drm/nouveau/nouveau_prime.c       |  4 +--
 drivers/gpu/drm/nouveau/nv17_fence.c          |  2 +-
 drivers/gpu/drm/nouveau/nv50_fence.c          |  2 +-
 drivers/gpu/drm/qxl/qxl_object.h              |  2 +-
 drivers/gpu/drm/radeon/radeon_cs.c            |  3 +-
 drivers/gpu/drm/radeon/radeon_object.c        | 13 ++++---
 drivers/gpu/drm/radeon/radeon_object.h        |  4 +--
 drivers/gpu/drm/radeon/radeon_prime.c         |  4 +--
 drivers/gpu/drm/radeon/radeon_trace.h         |  2 +-
 drivers/gpu/drm/radeon/radeon_ttm.c           |  2 +-
 drivers/gpu/drm/ttm/ttm_bo.c                  | 35 ++++++-------------
 drivers/gpu/drm/ttm/ttm_bo_util.c             | 12 +++----
 drivers/gpu/drm/ttm/ttm_bo_vm.c               |  6 ++--
 drivers/gpu/drm/ttm/ttm_tt.c                  |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c          |  4 +--
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            |  6 ++--
 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c       |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c       |  4 +--
 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c |  5 ++-
 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c           |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c    |  8 ++---
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c      |  2 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c        |  3 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c          |  4 +--
 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c       |  7 ++--
 include/drm/ttm/ttm_bo_api.h                  |  9 ++---
 include/drm/ttm/ttm_resource.h                |  1 -
 36 files changed, 84 insertions(+), 104 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e5919efca870..c4c93f19d273 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -269,7 +269,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
 	case TTM_PL_TT:
 		sgt = drm_prime_pages_to_sg(obj->dev,
 					    bo->tbo.ttm->pages,
-					    bo->tbo.num_pages);
+					    bo->tbo.ttm->num_pages);
 		if (IS_ERR(sgt))
 			return sgt;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 056cb87d09ea..52bcd1b5582f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -121,7 +121,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
 
-	if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
+	if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
 		return AMDGPU_BO_INVALID_OFFSET;
 
 	if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index c6c9723d3d8a..381ecc4788d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 	if (r < 0)
 		return r;
 
-	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index ed47cbac4f75..a99a5cde42dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
 
 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
 {
-	return bo->tbo.num_pages << PAGE_SHIFT;
+	return bo->tbo.base.size;
 }
 
 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
 {
-	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+	return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
 }
 
 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index ee9480d14cbc..20715ddbb746 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
 
 	    TP_fast_assign(
 			   __entry->bo = bo;
-			   __entry->pages = bo->tbo.num_pages;
+			   __entry->pages = bo->tbo.mem.num_pages;
 			   __entry->type = bo->tbo.mem.mem_type;
 			   __entry->prefer = bo->preferred_domains;
 			   __entry->allow = bo->allowed_domains;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ec93d4fdabbd..5346891a3f73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -636,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 
 out:
 	/* update statistics */
-	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
+	atomic64_add(bo->base.size, &adev->num_bytes_moved);
 	amdgpu_bo_move_notify(bo, evict, new_mem);
 	return 0;
 }
@@ -2131,7 +2131,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 			return r;
 	}
 
-	num_pages = bo->tbo.num_pages;
+	num_pages = bo->tbo.mem.num_pages;
 	mm_node = bo->tbo.mem.mm_node;
 	num_loops = 0;
 	while (num_pages) {
@@ -2161,7 +2161,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 		}
 	}
 
-	num_pages = bo->tbo.num_pages;
+	num_pages = bo->tbo.mem.num_pages;
 	mm_node = bo->tbo.mem.mm_node;
 
 	while (num_pages) {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 985e454463e1..7f30629f21a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev)
 		return r;
 	}
 
-	memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size);
+	memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size);
 
 	amdgpu_bo_kunmap(adev->mes.eop_gpu_obj);
 	amdgpu_bo_unreserve(adev->mes.eop_gpu_obj);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index bc542ac4c4b6..c1a675b8fc43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
 
 	switch (bo->mem.mem_type) {
 	case TTM_PL_VRAM:
-		drm->gem.vram_available -= bo->mem.size;
+		drm->gem.vram_available -= bo->base.size;
 		break;
 	case TTM_PL_TT:
-		drm->gem.gart_available -= bo->mem.size;
+		drm->gem.gart_available -= bo->base.size;
 		break;
 	default:
 		break;
@@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 	if (!nvbo->bo.pin_count) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
-			drm->gem.vram_available += bo->mem.size;
+			drm->gem.vram_available += bo->base.size;
 			break;
 		case TTM_PL_TT:
-			drm->gem.gart_available += bo->mem.size;
+			drm->gem.gart_available += bo->base.size;
 			break;
 		default:
 			break;
@@ -913,7 +913,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
 		return 0;
 
 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
-		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
+		*new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
 					       nvbo->mode, nvbo->zeta);
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index bceb48a2dfca..17831ee897ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
 
 	bl_size = bw * bh * (1 << tile_mode) * gob_size;
 
-	DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+	DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
 		      offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
-		      nvbo->bo.mem.size);
+		      nvbo->bo.base.size);
 
-	if (bl_size + offset > nvbo->bo.mem.size)
+	if (bl_size + offset > nvbo->bo.base.size)
 		return -ERANGE;
 
 	return 0;
@@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
 		} else {
 			uint32_t size = mode_cmd->pitches[i] * height;
 
-			if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+			if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
 				return -ERANGE;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 2f16b5249283..347488685f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -30,9 +30,9 @@
 struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
 	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-	int npages = nvbo->bo.num_pages;
 
-	return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
+	return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
+				     nvbo->bo.ttm->num_pages);
 }
 
 struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 1253fdec712d..b1cd8d7dd87d 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
 	struct nv10_fence_chan *fctx;
 	struct ttm_resource *reg = &priv->bo->bo.mem;
 	u32 start = reg->start * PAGE_SIZE;
-	u32 limit = start + reg->size - 1;
+	u32 limit = start + priv->bo->bo.base.size - 1;
 	int ret = 0;
 
 	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 447238e3cbe7..1625826505f6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
 	struct nv10_fence_chan *fctx;
 	struct ttm_resource *reg = &priv->bo->bo.mem;
 	u32 start = reg->start * PAGE_SIZE;
-	u32 limit = start + reg->size - 1;
+	u32 limit = start + priv->bo->bo.base.size - 1;
 	int ret;
 
 	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ebf24c9d2bf2..e60a8f88e226 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo)
 
 static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
 {
-	return bo->tbo.num_pages << PAGE_SHIFT;
+	return bo->tbo.base.size;
 }
 
 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 21ce2f9502c0..4f35c32957be 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -401,7 +401,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
 	struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
 
 	/* Sort A before B if A is smaller. */
-	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
+	return (int)la->robj->tbo.mem.num_pages -
+		(int)lb->robj->tbo.mem.num_pages;
 }
 
 /**
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index ab81e35cb060..9a999930e3dd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -54,20 +54,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo,
 				       unsigned mem_type, int sign)
 {
 	struct radeon_device *rdev = bo->rdev;
-	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
 
 	switch (mem_type) {
 	case TTM_PL_TT:
 		if (sign > 0)
-			atomic64_add(size, &rdev->gtt_usage);
+			atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
 		else
-			atomic64_sub(size, &rdev->gtt_usage);
+			atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
 		break;
 	case TTM_PL_VRAM:
 		if (sign > 0)
-			atomic64_add(size, &rdev->vram_usage);
+			atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
 		else
-			atomic64_sub(size, &rdev->vram_usage);
+			atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
 		break;
 	}
 }
@@ -256,7 +255,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
 		}
 		return 0;
 	}
-	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
 	if (r) {
 		return r;
 	}
@@ -610,7 +609,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 out:
 	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
 			       bo->tbo.mem.start << PAGE_SHIFT,
-			       bo->tbo.num_pages << PAGE_SHIFT);
+			       bo->tbo.base.size);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d606e9a935e3..9896d8231fe5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
 
 static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
 {
-	return bo->tbo.num_pages << PAGE_SHIFT;
+	return bo->tbo.base.size;
 }
 
 static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
 {
-	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+	return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE;
 }
 
 static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 088d39a51c0d..0a09dbaba289 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -34,9 +34,9 @@
 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
 	struct radeon_bo *bo = gem_to_radeon_bo(obj);
-	int npages = bo->tbo.num_pages;
 
-	return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
+	return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages,
+				     bo->tbo.ttm->num_pages);
 }
 
 struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index c93f3ab3c4e3..1729cb9a95c5 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
 
 	    TP_fast_assign(
 			   __entry->bo = bo;
-			   __entry->pages = bo->tbo.num_pages;
+			   __entry->pages = bo->tbo.mem.num_pages;
 			   ),
 	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
 );
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a3432c6343ba..ea365ac6f4f1 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -274,7 +274,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 
 out:
 	/* update statistics */
-	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
+	atomic64_add(bo->base.size, &rdev->num_bytes_moved);
 	radeon_bo_move_notify(bo, evict, new_mem);
 	return 0;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 02cc5d247c0d..2196e3d3ef29 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -72,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
 	struct ttm_resource_manager *man;
 	int i, mem_type;
 
-	drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
-		   bo, bo->mem.num_pages, bo->mem.size >> 10,
-		   bo->mem.size >> 20);
+	drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
+		   bo, bo->mem.num_pages, bo->base.size >> 10,
+		   bo->base.size >> 20);
 	for (i = 0; i < placement->num_placement; i++) {
 		mem_type = placement->placement[i].mem_type;
 		drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
@@ -268,7 +268,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 		goto out_err;
 	}
 
-	ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
+	ctx->bytes_moved += bo->base.size;
 	return 0;
 
 out_err:
@@ -985,8 +985,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 
 	memset(&hop, 0, sizeof(hop));
 
-	mem.num_pages = bo->num_pages;
-	mem.size = mem.num_pages << PAGE_SHIFT;
+	mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
 	mem.bus.offset = 0;
 	mem.bus.addr = NULL;
@@ -1102,7 +1101,7 @@ EXPORT_SYMBOL(ttm_bo_validate);
 
 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 			 struct ttm_buffer_object *bo,
-			 unsigned long size,
+			 size_t size,
 			 enum ttm_bo_type type,
 			 struct ttm_placement *placement,
 			 uint32_t page_alignment,
@@ -1113,9 +1112,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 			 void (*destroy) (struct ttm_buffer_object *))
 {
 	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
-	int ret = 0;
-	unsigned long num_pages;
 	bool locked;
+	int ret = 0;
 
 	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
 	if (ret) {
@@ -1127,16 +1125,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 		return -ENOMEM;
 	}
 
-	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	if (num_pages == 0) {
-		pr_err("Illegal buffer object size\n");
-		if (destroy)
-			(*destroy)(bo);
-		else
-			kfree(bo);
-		ttm_mem_global_free(mem_glob, acc_size);
-		return -EINVAL;
-	}
 	bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
 
 	kref_init(&bo->kref);
@@ -1145,10 +1133,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	INIT_LIST_HEAD(&bo->swap);
 	bo->bdev = bdev;
 	bo->type = type;
-	bo->num_pages = num_pages;
-	bo->mem.size = num_pages << PAGE_SHIFT;
 	bo->mem.mem_type = TTM_PL_SYSTEM;
-	bo->mem.num_pages = bo->num_pages;
+	bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	bo->mem.mm_node = NULL;
 	bo->mem.page_alignment = page_alignment;
 	bo->mem.bus.offset = 0;
@@ -1166,9 +1152,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 	}
 	if (!ttm_bo_uses_embedded_gem_object(bo)) {
 		/*
-		 * bo.gem is not initialized, so we have to setup the
+		 * bo.base is not initialized, so we have to setup the
 		 * struct elements we want use regardless.
 		 */
+		bo->base.size = size;
 		dma_resv_init(&bo->base._resv);
 		drm_vma_node_reset(&bo->base.vma_node);
 	}
@@ -1210,7 +1197,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved);
 
 int ttm_bo_init(struct ttm_bo_device *bdev,
 		struct ttm_buffer_object *bo,
-		unsigned long size,
+		size_t size,
 		enum ttm_bo_type type,
 		struct ttm_placement *placement,
 		uint32_t page_alignment,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 5bbc1339d28e..398d5013fc39 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -431,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
 
 	map->virtual = NULL;
 	map->bo = bo;
-	if (num_pages > bo->num_pages)
+	if (num_pages > bo->mem.num_pages)
 		return -EINVAL;
-	if (start_page > bo->num_pages)
+	if ((start_page + num_pages) > bo->mem.num_pages)
 		return -EINVAL;
 
 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
@@ -485,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 
 	if (mem->bus.is_iomem) {
 		void __iomem *vaddr_iomem;
-		size_t size = bo->num_pages << PAGE_SHIFT;
 
 		if (mem->bus.addr)
 			vaddr_iomem = (void __iomem *)mem->bus.addr;
 		else if (mem->bus.caching == ttm_write_combined)
-			vaddr_iomem = ioremap_wc(mem->bus.offset, size);
+			vaddr_iomem = ioremap_wc(mem->bus.offset,
+						 bo->base.size);
 		else
-			vaddr_iomem = ioremap(mem->bus.offset, size);
+			vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
 
 		if (!vaddr_iomem)
 			return -ENOMEM;
@@ -517,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
 		 * or to make the buffer object look contiguous.
 		 */
 		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
-		vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
+		vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
 		if (!vaddr)
 			return -ENOMEM;
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 144a4940b6b6..6dc96cf66744 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -198,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
 
 	/* Fault should not cross bo boundary. */
 	page_offset &= ~(fault_page_size - 1);
-	if (page_offset + fault_page_size > bo->num_pages)
+	if (page_offset + fault_page_size > bo->mem.num_pages)
 		goto out_fallback;
 
 	if (bo->mem.bus.is_iomem)
@@ -306,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
 	page_last = vma_pages(vma) + vma->vm_pgoff -
 		drm_vma_node_start(&bo->base.vma_node);
 
-	if (unlikely(page_offset >= bo->num_pages))
+	if (unlikely(page_offset >= bo->mem.num_pages))
 		return VM_FAULT_SIGBUS;
 
 	prot = ttm_io_prot(bo, &bo->mem, prot);
@@ -469,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
 		 << PAGE_SHIFT);
 	int ret;
 
-	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
 		return -EIO;
 
 	ret = ttm_bo_reserve(bo, true, false, NULL);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 77ba784425dd..7f75a13163f0 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
 			       uint32_t page_flags,
 			       enum ttm_caching caching)
 {
-	ttm->num_pages = bo->num_pages;
+	ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
 	ttm->caching = ttm_cached;
 	ttm->page_flags = page_flags;
 	ttm->dma_address = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index f21881e087db..9f2779ddcf08 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
 	d.src_addr = NULL;
 	d.dst_pages = dst->ttm->pages;
 	d.src_pages = src->ttm->pages;
-	d.dst_num_pages = dst->num_pages;
-	d.src_num_pages = src->num_pages;
+	d.dst_num_pages = dst->mem.num_pages;
+	d.src_num_pages = src->mem.num_pages;
 	d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
 	d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
 	d.diff = diff;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 263d76ae43f0..63dbc44eebe0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 	uint32_t new_flags;
 
 	place = vmw_vram_placement.placement[0];
-	place.lpfn = bo->num_pages;
+	place.lpfn = bo->mem.num_pages;
 	placement.num_placement = 1;
 	placement.placement = &place;
 	placement.num_busy_placement = 1;
@@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 	 * that situation.
 	 */
 	if (bo->mem.mem_type == TTM_PL_VRAM &&
-	    bo->mem.start < bo->num_pages &&
+	    bo->mem.start < bo->mem.num_pages &&
 	    bo->mem.start > 0 &&
 	    buf->base.pin_count == 0) {
 		ctx.interruptible = false;
@@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
 	if (virtual)
 		return virtual;
 
-	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+	ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
 	if (ret)
 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 984d8884357d..a077e420d2ff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
 	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
 	 * This should really be a TTM utility.
 	 */
-	for (i = 0; i < old_bo->num_pages; ++i) {
+	for (i = 0; i < old_bo->mem.num_pages; ++i) {
 		bool dummy;
 
 		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e67e2e8f6e6f..6c016001721d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
-		if (unlikely(new_query_bo->base.num_pages > 4)) {
+		if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
 			VMW_DEBUG_USER("Query buffer too large.\n");
 			return -EINVAL;
 		}
@@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 		return ret;
 
 	/* Make sure DMA doesn't cross BO boundaries. */
-	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+	bo_size = vmw_bo->base.base.size;
 	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
 		VMW_DEBUG_USER("Invalid DMA offset.\n");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 8fe26e32f920..1774960d1b89 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -64,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
 	spin_lock(&gman->lock);
 
 	if (gman->max_gmr_pages > 0) {
-		gman->used_gmr_pages += bo->num_pages;
+		gman->used_gmr_pages += mem->num_pages;
 		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
 			goto nospace;
 	}
 
 	mem->mm_node = gman;
 	mem->start = id;
-	mem->num_pages = bo->num_pages;
 
 	spin_unlock(&gman->lock);
 	return 0;
 
 nospace:
-	gman->used_gmr_pages -= bo->num_pages;
+	gman->used_gmr_pages -= mem->num_pages;
 	spin_unlock(&gman->lock);
 	ida_free(&gman->gmr_ida, id);
 	return -ENOSPC;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index bc67f2b930e1..7dc96125e5c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1220,7 +1220,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
 	int ret;
 
 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
-	if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
+	if (unlikely(requested_size > bo->base.base.size)) {
 		DRM_ERROR("Screen buffer object size is too small "
 			  "for requested mode.\n");
 		return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 0b76b3d17d4c..0a900afc66ff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
 {
 	struct vmw_bo_dirty *dirty = vbo->dirty;
-	pgoff_t num_pages = vbo->base.num_pages;
+	pgoff_t num_pages = vbo->base.mem.num_pages;
 	size_t size, acc_size;
 	int ret;
 	static struct ttm_operation_ctx ctx = {
@@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
 		return ret;
 
 	page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
-	if (unlikely(page_offset >= bo->num_pages)) {
+	if (unlikely(page_offset >= bo->mem.num_pages)) {
 		ret = VM_FAULT_SIGBUS;
 		goto out_unlock;
 	}
@@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
 
 		page_offset = vmf->pgoff -
 			drm_vma_node_start(&bo->base.vma_node);
-		if (page_offset >= bo->num_pages ||
+		if (page_offset >= bo->mem.num_pages ||
 		    vmw_resources_clean(vbo, page_offset,
 					page_offset + PAGE_SIZE,
 					&allowed_prefault)) {
@@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
 
 		page_offset = vmf->pgoff -
 			drm_vma_node_start(&bo->base.vma_node);
-		if (page_offset >= bo->num_pages ||
+		if (page_offset >= bo->mem.num_pages ||
 		    vmw_resources_clean(vbo, page_offset,
 					page_offset + PAGE_SIZE,
 					&allowed_prefault)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 00b535831a7a..f6e8fdfc76e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
 	int ret;
 
 	if (likely(res->backup)) {
-		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+		BUG_ON(res->backup->base.base.size < size);
 		return 0;
 	}
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index f328aa5839a2..e76a720f841e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
 			return ret;
 		}
 
-		if ((u64)buffer->base.num_pages * PAGE_SIZE <
-		    (u64)size + (u64)offset) {
+		if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
 			VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
 			ret = -EINVAL;
 			goto out_bad_arg;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 5b04ec047ef3..27ab2c50312b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
 	cmd->body.host.mipmap = 0;
 	cmd->body.transfer = ddirty->transfer;
 	suffix->suffixSize = sizeof(*suffix);
-	suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
+	suffix->maximumOffset = ddirty->buf->base.base.size;
 
 	if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
 		blit_size += sizeof(struct vmw_stdu_update);
@@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane  *update,
 	vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
 
 	suffix->suffixSize = sizeof(*suffix);
-	suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+	suffix->maximumOffset = vfbbo->buffer->base.base.size;
 
 	vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
 				 bb->y1, bb->y2);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3914bfee0533..fa9be30bec6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
 					 &res->backup,
 					 &user_srf->backup_base);
 		if (ret == 0) {
-			if (res->backup->base.num_pages * PAGE_SIZE <
-			    res->backup_size) {
+			if (res->backup->base.base.size < res->backup_size) {
 				VMW_DEBUG_USER("Surface backup buffer too small.\n");
 				vmw_bo_unreference(&res->backup);
 				ret = -EINVAL;
@@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
 	if (res->backup) {
 		rep->buffer_map_handle =
 			drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
-		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+		rep->buffer_size = res->backup->base.base.size;
 		rep->buffer_handle = backup_handle;
 	} else {
 		rep->buffer_map_handle = 0;
@@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
 	rep->crep.buffer_handle = backup_handle;
 	rep->crep.buffer_map_handle =
 		drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
-	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+	rep->crep.buffer_size = srf->res.backup->base.base.size;
 
 	rep->creq.version = drm_vmw_gb_surface_v1;
 	rep->creq.svga3d_flags_upper_32_bits =
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 79b9367e0ffd..0d4e3fccaa8a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -125,7 +125,6 @@ struct ttm_buffer_object {
 	struct ttm_bo_device *bdev;
 	enum ttm_bo_type type;
 	void (*destroy) (struct ttm_buffer_object *);
-	unsigned long num_pages;
 	size_t acc_size;
 
 	/**
@@ -397,13 +396,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
 
 int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 			 struct ttm_buffer_object *bo,
-			 unsigned long size,
-			 enum ttm_bo_type type,
+			 size_t size, enum ttm_bo_type type,
 			 struct ttm_placement *placement,
 			 uint32_t page_alignment,
 			 struct ttm_operation_ctx *ctx,
-			 size_t acc_size,
-			 struct sg_table *sg,
+			 size_t acc_size, struct sg_table *sg,
 			 struct dma_resv *resv,
 			 void (*destroy) (struct ttm_buffer_object *));
 
@@ -445,7 +442,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
  * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
  */
 int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
-		unsigned long size, enum ttm_bo_type type,
+		size_t size, enum ttm_bo_type type,
 		struct ttm_placement *placement,
 		uint32_t page_alignment, bool interrubtible, size_t acc_size,
 		struct sg_table *sg, struct dma_resv *resv,
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index f48a70d39ac5..ad6da99770e9 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -171,7 +171,6 @@ struct ttm_bus_placement {
 struct ttm_resource {
 	void *mm_node;
 	unsigned long start;
-	unsigned long size;
 	unsigned long num_pages;
 	uint32_t page_alignment;
 	uint32_t mem_type;
-- 
2.25.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] drm/ttm: cleanup BO size handling v3
  2020-12-10 14:52 [PATCH] drm/ttm: cleanup BO size handling v3 Christian König
@ 2020-12-14 16:55 ` Daniel Vetter
  2020-12-14 20:34   ` Christian König
  0 siblings, 1 reply; 3+ messages in thread
From: Daniel Vetter @ 2020-12-14 16:55 UTC (permalink / raw)
  To: Christian König; +Cc: dri-devel

I think you forgot to compile test with amdkfd :-)

drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c: In function ‘add_bo_to_vm’:
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:456:37: error:
‘struct ttm_resource’ has no member named ‘size’
 456 |  unsigned long bo_size = bo->tbo.mem.size;
     |                                     ^
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c: In function
‘amdgpu_amdkfd_gpuvm_free_memory_of_gpu’:
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:1284:42: error:
‘struct ttm_resource’ has no member named ‘size’
1284 |  unsigned long bo_size = mem->bo->tbo.mem.size;
     |                                          ^
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c: In function
‘amdgpu_amdkfd_gpuvm_map_memory_to_gpu’:
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:1405:23: error:
‘struct ttm_resource’ has no member named ‘size’
1405 |  bo_size = bo->tbo.mem.size;
     |                       ^
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c: In function
‘amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu’:
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:1509:42: error:
‘struct ttm_resource’ has no member named ‘size’
1509 |  unsigned long bo_size = mem->bo->tbo.mem.size;
     |                                          ^
Also we really need gitlab CI to stop this kind of stuff ...
-Daniel

On Thu, Dec 10, 2020 at 3:52 PM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Based on an idea from Dave, but cleaned up a bit.
>
> We had multiple fields for essentially the same thing.
>
> Now bo->base.size is the original size of the BO in
> arbitrary units, usually bytes.
>
> bo->mem.num_pages is the size in number of pages in the
> resource domain of bo->mem.mem_type.
>
> v2: use the GEM object size instead of the BO size
> v3: fix printks in some places
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c       |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_object.h    |  4 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h     |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  6 ++--
>  drivers/gpu/drm/amd/amdgpu/mes_v10_1.c        |  2 +-
>  drivers/gpu/drm/nouveau/nouveau_bo.c          | 10 +++---
>  drivers/gpu/drm/nouveau/nouveau_display.c     |  8 ++---
>  drivers/gpu/drm/nouveau/nouveau_prime.c       |  4 +--
>  drivers/gpu/drm/nouveau/nv17_fence.c          |  2 +-
>  drivers/gpu/drm/nouveau/nv50_fence.c          |  2 +-
>  drivers/gpu/drm/qxl/qxl_object.h              |  2 +-
>  drivers/gpu/drm/radeon/radeon_cs.c            |  3 +-
>  drivers/gpu/drm/radeon/radeon_object.c        | 13 ++++---
>  drivers/gpu/drm/radeon/radeon_object.h        |  4 +--
>  drivers/gpu/drm/radeon/radeon_prime.c         |  4 +--
>  drivers/gpu/drm/radeon/radeon_trace.h         |  2 +-
>  drivers/gpu/drm/radeon/radeon_ttm.c           |  2 +-
>  drivers/gpu/drm/ttm/ttm_bo.c                  | 35 ++++++-------------
>  drivers/gpu/drm/ttm/ttm_bo_util.c             | 12 +++----
>  drivers/gpu/drm/ttm/ttm_bo_vm.c               |  6 ++--
>  drivers/gpu/drm/ttm/ttm_tt.c                  |  2 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_blit.c          |  4 +--
>  drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            |  6 ++--
>  drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c       |  2 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c       |  4 +--
>  drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c |  5 ++-
>  drivers/gpu/drm/vmwgfx/vmwgfx_kms.c           |  2 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c    |  8 ++---
>  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c      |  2 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_shader.c        |  3 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c          |  4 +--
>  drivers/gpu/drm/vmwgfx/vmwgfx_surface.c       |  7 ++--
>  include/drm/ttm/ttm_bo_api.h                  |  9 ++---
>  include/drm/ttm/ttm_resource.h                |  1 -
>  36 files changed, 84 insertions(+), 104 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index e5919efca870..c4c93f19d273 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -269,7 +269,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
>         case TTM_PL_TT:
>                 sgt = drm_prime_pages_to_sg(obj->dev,
>                                             bo->tbo.ttm->pages,
> -                                           bo->tbo.num_pages);
> +                                           bo->tbo.ttm->num_pages);
>                 if (IS_ERR(sgt))
>                         return sgt;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
> index 056cb87d09ea..52bcd1b5582f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
> @@ -121,7 +121,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
>  {
>         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
>
> -       if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
> +       if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
>                 return AMDGPU_BO_INVALID_OFFSET;
>
>         if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index c6c9723d3d8a..381ecc4788d5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
>         if (r < 0)
>                 return r;
>
> -       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
> +       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
>         if (r)
>                 return r;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
> index ed47cbac4f75..a99a5cde42dd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
> @@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
>
>  static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
>  {
> -       return bo->tbo.num_pages << PAGE_SHIFT;
> +       return bo->tbo.base.size;
>  }
>
>  static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
>  {
> -       return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
> +       return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
>  }
>
>  static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
> index ee9480d14cbc..20715ddbb746 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
> @@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
>
>             TP_fast_assign(
>                            __entry->bo = bo;
> -                          __entry->pages = bo->tbo.num_pages;
> +                          __entry->pages = bo->tbo.mem.num_pages;
>                            __entry->type = bo->tbo.mem.mem_type;
>                            __entry->prefer = bo->preferred_domains;
>                            __entry->allow = bo->allowed_domains;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index ec93d4fdabbd..5346891a3f73 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -636,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
>
>  out:
>         /* update statistics */
> -       atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
> +       atomic64_add(bo->base.size, &adev->num_bytes_moved);
>         amdgpu_bo_move_notify(bo, evict, new_mem);
>         return 0;
>  }
> @@ -2131,7 +2131,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
>                         return r;
>         }
>
> -       num_pages = bo->tbo.num_pages;
> +       num_pages = bo->tbo.mem.num_pages;
>         mm_node = bo->tbo.mem.mm_node;
>         num_loops = 0;
>         while (num_pages) {
> @@ -2161,7 +2161,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
>                 }
>         }
>
> -       num_pages = bo->tbo.num_pages;
> +       num_pages = bo->tbo.mem.num_pages;
>         mm_node = bo->tbo.mem.mm_node;
>
>         while (num_pages) {
> diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
> index 985e454463e1..7f30629f21a2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
> @@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev)
>                 return r;
>         }
>
> -       memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size);
> +       memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size);
>
>         amdgpu_bo_kunmap(adev->mes.eop_gpu_obj);
>         amdgpu_bo_unreserve(adev->mes.eop_gpu_obj);
> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
> index bc542ac4c4b6..c1a675b8fc43 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
> @@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
>
>         switch (bo->mem.mem_type) {
>         case TTM_PL_VRAM:
> -               drm->gem.vram_available -= bo->mem.size;
> +               drm->gem.vram_available -= bo->base.size;
>                 break;
>         case TTM_PL_TT:
> -               drm->gem.gart_available -= bo->mem.size;
> +               drm->gem.gart_available -= bo->base.size;
>                 break;
>         default:
>                 break;
> @@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
>         if (!nvbo->bo.pin_count) {
>                 switch (bo->mem.mem_type) {
>                 case TTM_PL_VRAM:
> -                       drm->gem.vram_available += bo->mem.size;
> +                       drm->gem.vram_available += bo->base.size;
>                         break;
>                 case TTM_PL_TT:
> -                       drm->gem.gart_available += bo->mem.size;
> +                       drm->gem.gart_available += bo->base.size;
>                         break;
>                 default:
>                         break;
> @@ -913,7 +913,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
>                 return 0;
>
>         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
> -               *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
> +               *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
>                                                nvbo->mode, nvbo->zeta);
>         }
>
> diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
> index bceb48a2dfca..17831ee897ea 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_display.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_display.c
> @@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
>
>         bl_size = bw * bh * (1 << tile_mode) * gob_size;
>
> -       DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
> +       DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
>                       offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
> -                     nvbo->bo.mem.size);
> +                     nvbo->bo.base.size);
>
> -       if (bl_size + offset > nvbo->bo.mem.size)
> +       if (bl_size + offset > nvbo->bo.base.size)
>                 return -ERANGE;
>
>         return 0;
> @@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
>                 } else {
>                         uint32_t size = mode_cmd->pitches[i] * height;
>
> -                       if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
> +                       if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
>                                 return -ERANGE;
>                 }
>         }
> diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
> index 2f16b5249283..347488685f74 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_prime.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
> @@ -30,9 +30,9 @@
>  struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
>  {
>         struct nouveau_bo *nvbo = nouveau_gem_object(obj);
> -       int npages = nvbo->bo.num_pages;
>
> -       return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
> +       return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
> +                                    nvbo->bo.ttm->num_pages);
>  }
>
>  struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
> diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
> index 1253fdec712d..b1cd8d7dd87d 100644
> --- a/drivers/gpu/drm/nouveau/nv17_fence.c
> +++ b/drivers/gpu/drm/nouveau/nv17_fence.c
> @@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
>         struct nv10_fence_chan *fctx;
>         struct ttm_resource *reg = &priv->bo->bo.mem;
>         u32 start = reg->start * PAGE_SIZE;
> -       u32 limit = start + reg->size - 1;
> +       u32 limit = start + priv->bo->bo.base.size - 1;
>         int ret = 0;
>
>         fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
> diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
> index 447238e3cbe7..1625826505f6 100644
> --- a/drivers/gpu/drm/nouveau/nv50_fence.c
> +++ b/drivers/gpu/drm/nouveau/nv50_fence.c
> @@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
>         struct nv10_fence_chan *fctx;
>         struct ttm_resource *reg = &priv->bo->bo.mem;
>         u32 start = reg->start * PAGE_SIZE;
> -       u32 limit = start + reg->size - 1;
> +       u32 limit = start + priv->bo->bo.base.size - 1;
>         int ret;
>
>         fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
> diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
> index ebf24c9d2bf2..e60a8f88e226 100644
> --- a/drivers/gpu/drm/qxl/qxl_object.h
> +++ b/drivers/gpu/drm/qxl/qxl_object.h
> @@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo)
>
>  static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
>  {
> -       return bo->tbo.num_pages << PAGE_SHIFT;
> +       return bo->tbo.base.size;
>  }
>
>  static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
> diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
> index 21ce2f9502c0..4f35c32957be 100644
> --- a/drivers/gpu/drm/radeon/radeon_cs.c
> +++ b/drivers/gpu/drm/radeon/radeon_cs.c
> @@ -401,7 +401,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
>         struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
>
>         /* Sort A before B if A is smaller. */
> -       return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
> +       return (int)la->robj->tbo.mem.num_pages -
> +               (int)lb->robj->tbo.mem.num_pages;
>  }
>
>  /**
> diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
> index ab81e35cb060..9a999930e3dd 100644
> --- a/drivers/gpu/drm/radeon/radeon_object.c
> +++ b/drivers/gpu/drm/radeon/radeon_object.c
> @@ -54,20 +54,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo,
>                                        unsigned mem_type, int sign)
>  {
>         struct radeon_device *rdev = bo->rdev;
> -       u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
>
>         switch (mem_type) {
>         case TTM_PL_TT:
>                 if (sign > 0)
> -                       atomic64_add(size, &rdev->gtt_usage);
> +                       atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
>                 else
> -                       atomic64_sub(size, &rdev->gtt_usage);
> +                       atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
>                 break;
>         case TTM_PL_VRAM:
>                 if (sign > 0)
> -                       atomic64_add(size, &rdev->vram_usage);
> +                       atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
>                 else
> -                       atomic64_sub(size, &rdev->vram_usage);
> +                       atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
>                 break;
>         }
>  }
> @@ -256,7 +255,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
>                 }
>                 return 0;
>         }
> -       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
> +       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
>         if (r) {
>                 return r;
>         }
> @@ -610,7 +609,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
>  out:
>         radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
>                                bo->tbo.mem.start << PAGE_SHIFT,
> -                              bo->tbo.num_pages << PAGE_SHIFT);
> +                              bo->tbo.base.size);
>         return 0;
>  }
>
> diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
> index d606e9a935e3..9896d8231fe5 100644
> --- a/drivers/gpu/drm/radeon/radeon_object.h
> +++ b/drivers/gpu/drm/radeon/radeon_object.h
> @@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
>
>  static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
>  {
> -       return bo->tbo.num_pages << PAGE_SHIFT;
> +       return bo->tbo.base.size;
>  }
>
>  static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
>  {
> -       return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
> +       return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE;
>  }
>
>  static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
> diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
> index 088d39a51c0d..0a09dbaba289 100644
> --- a/drivers/gpu/drm/radeon/radeon_prime.c
> +++ b/drivers/gpu/drm/radeon/radeon_prime.c
> @@ -34,9 +34,9 @@
>  struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
>  {
>         struct radeon_bo *bo = gem_to_radeon_bo(obj);
> -       int npages = bo->tbo.num_pages;
>
> -       return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
> +       return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages,
> +                                    bo->tbo.ttm->num_pages);
>  }
>
>  struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
> diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
> index c93f3ab3c4e3..1729cb9a95c5 100644
> --- a/drivers/gpu/drm/radeon/radeon_trace.h
> +++ b/drivers/gpu/drm/radeon/radeon_trace.h
> @@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
>
>             TP_fast_assign(
>                            __entry->bo = bo;
> -                          __entry->pages = bo->tbo.num_pages;
> +                          __entry->pages = bo->tbo.mem.num_pages;
>                            ),
>             TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
>  );
> diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
> index a3432c6343ba..ea365ac6f4f1 100644
> --- a/drivers/gpu/drm/radeon/radeon_ttm.c
> +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
> @@ -274,7 +274,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
>
>  out:
>         /* update statistics */
> -       atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
> +       atomic64_add(bo->base.size, &rdev->num_bytes_moved);
>         radeon_bo_move_notify(bo, evict, new_mem);
>         return 0;
>  }
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 02cc5d247c0d..2196e3d3ef29 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -72,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
>         struct ttm_resource_manager *man;
>         int i, mem_type;
>
> -       drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
> -                  bo, bo->mem.num_pages, bo->mem.size >> 10,
> -                  bo->mem.size >> 20);
> +       drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
> +                  bo, bo->mem.num_pages, bo->base.size >> 10,
> +                  bo->base.size >> 20);
>         for (i = 0; i < placement->num_placement; i++) {
>                 mem_type = placement->placement[i].mem_type;
>                 drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
> @@ -268,7 +268,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
>                 goto out_err;
>         }
>
> -       ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
> +       ctx->bytes_moved += bo->base.size;
>         return 0;
>
>  out_err:
> @@ -985,8 +985,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
>
>         memset(&hop, 0, sizeof(hop));
>
> -       mem.num_pages = bo->num_pages;
> -       mem.size = mem.num_pages << PAGE_SHIFT;
> +       mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
>         mem.page_alignment = bo->mem.page_alignment;
>         mem.bus.offset = 0;
>         mem.bus.addr = NULL;
> @@ -1102,7 +1101,7 @@ EXPORT_SYMBOL(ttm_bo_validate);
>
>  int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>                          struct ttm_buffer_object *bo,
> -                        unsigned long size,
> +                        size_t size,
>                          enum ttm_bo_type type,
>                          struct ttm_placement *placement,
>                          uint32_t page_alignment,
> @@ -1113,9 +1112,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>                          void (*destroy) (struct ttm_buffer_object *))
>  {
>         struct ttm_mem_global *mem_glob = &ttm_mem_glob;
> -       int ret = 0;
> -       unsigned long num_pages;
>         bool locked;
> +       int ret = 0;
>
>         ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
>         if (ret) {
> @@ -1127,16 +1125,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>                 return -ENOMEM;
>         }
>
> -       num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
> -       if (num_pages == 0) {
> -               pr_err("Illegal buffer object size\n");
> -               if (destroy)
> -                       (*destroy)(bo);
> -               else
> -                       kfree(bo);
> -               ttm_mem_global_free(mem_glob, acc_size);
> -               return -EINVAL;
> -       }
>         bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
>
>         kref_init(&bo->kref);
> @@ -1145,10 +1133,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>         INIT_LIST_HEAD(&bo->swap);
>         bo->bdev = bdev;
>         bo->type = type;
> -       bo->num_pages = num_pages;
> -       bo->mem.size = num_pages << PAGE_SHIFT;
>         bo->mem.mem_type = TTM_PL_SYSTEM;
> -       bo->mem.num_pages = bo->num_pages;
> +       bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
>         bo->mem.mm_node = NULL;
>         bo->mem.page_alignment = page_alignment;
>         bo->mem.bus.offset = 0;
> @@ -1166,9 +1152,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>         }
>         if (!ttm_bo_uses_embedded_gem_object(bo)) {
>                 /*
> -                * bo.gem is not initialized, so we have to setup the
> +                * bo.base is not initialized, so we have to setup the
>                  * struct elements we want use regardless.
>                  */
> +               bo->base.size = size;
>                 dma_resv_init(&bo->base._resv);
>                 drm_vma_node_reset(&bo->base.vma_node);
>         }
> @@ -1210,7 +1197,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved);
>
>  int ttm_bo_init(struct ttm_bo_device *bdev,
>                 struct ttm_buffer_object *bo,
> -               unsigned long size,
> +               size_t size,
>                 enum ttm_bo_type type,
>                 struct ttm_placement *placement,
>                 uint32_t page_alignment,
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 5bbc1339d28e..398d5013fc39 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -431,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
>
>         map->virtual = NULL;
>         map->bo = bo;
> -       if (num_pages > bo->num_pages)
> +       if (num_pages > bo->mem.num_pages)
>                 return -EINVAL;
> -       if (start_page > bo->num_pages)
> +       if ((start_page + num_pages) > bo->mem.num_pages)
>                 return -EINVAL;
>
>         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
> @@ -485,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
>
>         if (mem->bus.is_iomem) {
>                 void __iomem *vaddr_iomem;
> -               size_t size = bo->num_pages << PAGE_SHIFT;
>
>                 if (mem->bus.addr)
>                         vaddr_iomem = (void __iomem *)mem->bus.addr;
>                 else if (mem->bus.caching == ttm_write_combined)
> -                       vaddr_iomem = ioremap_wc(mem->bus.offset, size);
> +                       vaddr_iomem = ioremap_wc(mem->bus.offset,
> +                                                bo->base.size);
>                 else
> -                       vaddr_iomem = ioremap(mem->bus.offset, size);
> +                       vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
>
>                 if (!vaddr_iomem)
>                         return -ENOMEM;
> @@ -517,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
>                  * or to make the buffer object look contiguous.
>                  */
>                 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
> -               vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
> +               vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
>                 if (!vaddr)
>                         return -ENOMEM;
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index 144a4940b6b6..6dc96cf66744 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -198,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
>
>         /* Fault should not cross bo boundary. */
>         page_offset &= ~(fault_page_size - 1);
> -       if (page_offset + fault_page_size > bo->num_pages)
> +       if (page_offset + fault_page_size > bo->mem.num_pages)
>                 goto out_fallback;
>
>         if (bo->mem.bus.is_iomem)
> @@ -306,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
>         page_last = vma_pages(vma) + vma->vm_pgoff -
>                 drm_vma_node_start(&bo->base.vma_node);
>
> -       if (unlikely(page_offset >= bo->num_pages))
> +       if (unlikely(page_offset >= bo->mem.num_pages))
>                 return VM_FAULT_SIGBUS;
>
>         prot = ttm_io_prot(bo, &bo->mem, prot);
> @@ -469,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
>                  << PAGE_SHIFT);
>         int ret;
>
> -       if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
> +       if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
>                 return -EIO;
>
>         ret = ttm_bo_reserve(bo, true, false, NULL);
> diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
> index 77ba784425dd..7f75a13163f0 100644
> --- a/drivers/gpu/drm/ttm/ttm_tt.c
> +++ b/drivers/gpu/drm/ttm/ttm_tt.c
> @@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
>                                uint32_t page_flags,
>                                enum ttm_caching caching)
>  {
> -       ttm->num_pages = bo->num_pages;
> +       ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
>         ttm->caching = ttm_cached;
>         ttm->page_flags = page_flags;
>         ttm->dma_address = NULL;
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
> index f21881e087db..9f2779ddcf08 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
> @@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
>         d.src_addr = NULL;
>         d.dst_pages = dst->ttm->pages;
>         d.src_pages = src->ttm->pages;
> -       d.dst_num_pages = dst->num_pages;
> -       d.src_num_pages = src->num_pages;
> +       d.dst_num_pages = dst->mem.num_pages;
> +       d.src_num_pages = src->mem.num_pages;
>         d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
>         d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
>         d.diff = diff;
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> index 263d76ae43f0..63dbc44eebe0 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> @@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
>         uint32_t new_flags;
>
>         place = vmw_vram_placement.placement[0];
> -       place.lpfn = bo->num_pages;
> +       place.lpfn = bo->mem.num_pages;
>         placement.num_placement = 1;
>         placement.placement = &place;
>         placement.num_busy_placement = 1;
> @@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
>          * that situation.
>          */
>         if (bo->mem.mem_type == TTM_PL_VRAM &&
> -           bo->mem.start < bo->num_pages &&
> +           bo->mem.start < bo->mem.num_pages &&
>             bo->mem.start > 0 &&
>             buf->base.pin_count == 0) {
>                 ctx.interruptible = false;
> @@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
>         if (virtual)
>                 return virtual;
>
> -       ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
> +       ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
>         if (ret)
>                 DRM_ERROR("Buffer object map failed: %d.\n", ret);
>
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
> index 984d8884357d..a077e420d2ff 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
> @@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
>          * Do a page by page copy of COTables. This eliminates slow vmap()s.
>          * This should really be a TTM utility.
>          */
> -       for (i = 0; i < old_bo->num_pages; ++i) {
> +       for (i = 0; i < old_bo->mem.num_pages; ++i) {
>                 bool dummy;
>
>                 ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> index e67e2e8f6e6f..6c016001721d 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> @@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
>
>         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
>
> -               if (unlikely(new_query_bo->base.num_pages > 4)) {
> +               if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
>                         VMW_DEBUG_USER("Query buffer too large.\n");
>                         return -EINVAL;
>                 }
> @@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
>                 return ret;
>
>         /* Make sure DMA doesn't cross BO boundaries. */
> -       bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
> +       bo_size = vmw_bo->base.base.size;
>         if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
>                 VMW_DEBUG_USER("Invalid DMA offset.\n");
>                 return -EINVAL;
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> index 8fe26e32f920..1774960d1b89 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> @@ -64,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
>         spin_lock(&gman->lock);
>
>         if (gman->max_gmr_pages > 0) {
> -               gman->used_gmr_pages += bo->num_pages;
> +               gman->used_gmr_pages += mem->num_pages;
>                 if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
>                         goto nospace;
>         }
>
>         mem->mm_node = gman;
>         mem->start = id;
> -       mem->num_pages = bo->num_pages;
>
>         spin_unlock(&gman->lock);
>         return 0;
>
>  nospace:
> -       gman->used_gmr_pages -= bo->num_pages;
> +       gman->used_gmr_pages -= mem->num_pages;
>         spin_unlock(&gman->lock);
>         ida_free(&gman->gmr_ida, id);
>         return -ENOSPC;
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
> index bc67f2b930e1..7dc96125e5c2 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
> @@ -1220,7 +1220,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
>         int ret;
>
>         requested_size = mode_cmd->height * mode_cmd->pitches[0];
> -       if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
> +       if (unlikely(requested_size > bo->base.base.size)) {
>                 DRM_ERROR("Screen buffer object size is too small "
>                           "for requested mode.\n");
>                 return -EINVAL;
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
> index 0b76b3d17d4c..0a900afc66ff 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
> @@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
>  int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
>  {
>         struct vmw_bo_dirty *dirty = vbo->dirty;
> -       pgoff_t num_pages = vbo->base.num_pages;
> +       pgoff_t num_pages = vbo->base.mem.num_pages;
>         size_t size, acc_size;
>         int ret;
>         static struct ttm_operation_ctx ctx = {
> @@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
>                 return ret;
>
>         page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
> -       if (unlikely(page_offset >= bo->num_pages)) {
> +       if (unlikely(page_offset >= bo->mem.num_pages)) {
>                 ret = VM_FAULT_SIGBUS;
>                 goto out_unlock;
>         }
> @@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
>
>                 page_offset = vmf->pgoff -
>                         drm_vma_node_start(&bo->base.vma_node);
> -               if (page_offset >= bo->num_pages ||
> +               if (page_offset >= bo->mem.num_pages ||
>                     vmw_resources_clean(vbo, page_offset,
>                                         page_offset + PAGE_SIZE,
>                                         &allowed_prefault)) {
> @@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
>
>                 page_offset = vmf->pgoff -
>                         drm_vma_node_start(&bo->base.vma_node);
> -               if (page_offset >= bo->num_pages ||
> +               if (page_offset >= bo->mem.num_pages ||
>                     vmw_resources_clean(vbo, page_offset,
>                                         page_offset + PAGE_SIZE,
>                                         &allowed_prefault)) {
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index 00b535831a7a..f6e8fdfc76e5 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
>         int ret;
>
>         if (likely(res->backup)) {
> -               BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
> +               BUG_ON(res->backup->base.base.size < size);
>                 return 0;
>         }
>
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
> index f328aa5839a2..e76a720f841e 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
> @@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
>                         return ret;
>                 }
>
> -               if ((u64)buffer->base.num_pages * PAGE_SIZE <
> -                   (u64)size + (u64)offset) {
> +               if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
>                         VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
>                         ret = -EINVAL;
>                         goto out_bad_arg;
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
> index 5b04ec047ef3..27ab2c50312b 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
> @@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
>         cmd->body.host.mipmap = 0;
>         cmd->body.transfer = ddirty->transfer;
>         suffix->suffixSize = sizeof(*suffix);
> -       suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
> +       suffix->maximumOffset = ddirty->buf->base.base.size;
>
>         if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
>                 blit_size += sizeof(struct vmw_stdu_update);
> @@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane  *update,
>         vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
>
>         suffix->suffixSize = sizeof(*suffix);
> -       suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
> +       suffix->maximumOffset = vfbbo->buffer->base.base.size;
>
>         vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
>                                  bb->y1, bb->y2);
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
> index 3914bfee0533..fa9be30bec6c 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
> @@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
>                                          &res->backup,
>                                          &user_srf->backup_base);
>                 if (ret == 0) {
> -                       if (res->backup->base.num_pages * PAGE_SIZE <
> -                           res->backup_size) {
> +                       if (res->backup->base.base.size < res->backup_size) {
>                                 VMW_DEBUG_USER("Surface backup buffer too small.\n");
>                                 vmw_bo_unreference(&res->backup);
>                                 ret = -EINVAL;
> @@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
>         if (res->backup) {
>                 rep->buffer_map_handle =
>                         drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
> -               rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
> +               rep->buffer_size = res->backup->base.base.size;
>                 rep->buffer_handle = backup_handle;
>         } else {
>                 rep->buffer_map_handle = 0;
> @@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
>         rep->crep.buffer_handle = backup_handle;
>         rep->crep.buffer_map_handle =
>                 drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
> -       rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
> +       rep->crep.buffer_size = srf->res.backup->base.base.size;
>
>         rep->creq.version = drm_vmw_gb_surface_v1;
>         rep->creq.svga3d_flags_upper_32_bits =
> diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
> index 79b9367e0ffd..0d4e3fccaa8a 100644
> --- a/include/drm/ttm/ttm_bo_api.h
> +++ b/include/drm/ttm/ttm_bo_api.h
> @@ -125,7 +125,6 @@ struct ttm_buffer_object {
>         struct ttm_bo_device *bdev;
>         enum ttm_bo_type type;
>         void (*destroy) (struct ttm_buffer_object *);
> -       unsigned long num_pages;
>         size_t acc_size;
>
>         /**
> @@ -397,13 +396,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
>
>  int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>                          struct ttm_buffer_object *bo,
> -                        unsigned long size,
> -                        enum ttm_bo_type type,
> +                        size_t size, enum ttm_bo_type type,
>                          struct ttm_placement *placement,
>                          uint32_t page_alignment,
>                          struct ttm_operation_ctx *ctx,
> -                        size_t acc_size,
> -                        struct sg_table *sg,
> +                        size_t acc_size, struct sg_table *sg,
>                          struct dma_resv *resv,
>                          void (*destroy) (struct ttm_buffer_object *));
>
> @@ -445,7 +442,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>   * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
>   */
>  int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
> -               unsigned long size, enum ttm_bo_type type,
> +               size_t size, enum ttm_bo_type type,
>                 struct ttm_placement *placement,
>                 uint32_t page_alignment, bool interrubtible, size_t acc_size,
>                 struct sg_table *sg, struct dma_resv *resv,
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index f48a70d39ac5..ad6da99770e9 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -171,7 +171,6 @@ struct ttm_bus_placement {
>  struct ttm_resource {
>         void *mm_node;
>         unsigned long start;
> -       unsigned long size;
>         unsigned long num_pages;
>         uint32_t page_alignment;
>         uint32_t mem_type;
> --
> 2.25.1
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] drm/ttm: cleanup BO size handling v3
  2020-12-14 16:55 ` Daniel Vetter
@ 2020-12-14 20:34   ` Christian König
  0 siblings, 0 replies; 3+ messages in thread
From: Christian König @ 2020-12-14 20:34 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: dri-devel

Turned out I did test it, but then forgot to do git add to the changes 
before pushing....

It was a really long day/week/year :)

Christian.

Am 14.12.20 um 17:55 schrieb Daniel Vetter:
> I think you forgot to compile test with amdkfd :-)
>
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c: In function ‘add_bo_to_vm’:
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:456:37: error:
> ‘struct ttm_resource’ has no member named ‘size’
>   456 |  unsigned long bo_size = bo->tbo.mem.size;
>       |                                     ^
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c: In function
> ‘amdgpu_amdkfd_gpuvm_free_memory_of_gpu’:
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:1284:42: error:
> ‘struct ttm_resource’ has no member named ‘size’
> 1284 |  unsigned long bo_size = mem->bo->tbo.mem.size;
>       |                                          ^
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c: In function
> ‘amdgpu_amdkfd_gpuvm_map_memory_to_gpu’:
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:1405:23: error:
> ‘struct ttm_resource’ has no member named ‘size’
> 1405 |  bo_size = bo->tbo.mem.size;
>       |                       ^
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c: In function
> ‘amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu’:
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c:1509:42: error:
> ‘struct ttm_resource’ has no member named ‘size’
> 1509 |  unsigned long bo_size = mem->bo->tbo.mem.size;
>       |                                          ^
> Also we really need gitlab CI to stop this kind of stuff ...
> -Daniel
>
> On Thu, Dec 10, 2020 at 3:52 PM Christian König
> <ckoenig.leichtzumerken@gmail.com> wrote:
>> Based on an idea from Dave, but cleaned up a bit.
>>
>> We had multiple fields for essentially the same thing.
>>
>> Now bo->base.size is the original size of the BO in
>> arbitrary units, usually bytes.
>>
>> bo->mem.num_pages is the size in number of pages in the
>> resource domain of bo->mem.mem_type.
>>
>> v2: use the GEM object size instead of the BO size
>> v3: fix printks in some places
>>
>> Signed-off-by: Christian König <christian.koenig@amd.com>
>> Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
>> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c   |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c       |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.h    |  4 +--
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h     |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       |  6 ++--
>>   drivers/gpu/drm/amd/amdgpu/mes_v10_1.c        |  2 +-
>>   drivers/gpu/drm/nouveau/nouveau_bo.c          | 10 +++---
>>   drivers/gpu/drm/nouveau/nouveau_display.c     |  8 ++---
>>   drivers/gpu/drm/nouveau/nouveau_prime.c       |  4 +--
>>   drivers/gpu/drm/nouveau/nv17_fence.c          |  2 +-
>>   drivers/gpu/drm/nouveau/nv50_fence.c          |  2 +-
>>   drivers/gpu/drm/qxl/qxl_object.h              |  2 +-
>>   drivers/gpu/drm/radeon/radeon_cs.c            |  3 +-
>>   drivers/gpu/drm/radeon/radeon_object.c        | 13 ++++---
>>   drivers/gpu/drm/radeon/radeon_object.h        |  4 +--
>>   drivers/gpu/drm/radeon/radeon_prime.c         |  4 +--
>>   drivers/gpu/drm/radeon/radeon_trace.h         |  2 +-
>>   drivers/gpu/drm/radeon/radeon_ttm.c           |  2 +-
>>   drivers/gpu/drm/ttm/ttm_bo.c                  | 35 ++++++-------------
>>   drivers/gpu/drm/ttm/ttm_bo_util.c             | 12 +++----
>>   drivers/gpu/drm/ttm/ttm_bo_vm.c               |  6 ++--
>>   drivers/gpu/drm/ttm/ttm_tt.c                  |  2 +-
>>   drivers/gpu/drm/vmwgfx/vmwgfx_blit.c          |  4 +--
>>   drivers/gpu/drm/vmwgfx/vmwgfx_bo.c            |  6 ++--
>>   drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c       |  2 +-
>>   drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c       |  4 +--
>>   drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c |  5 ++-
>>   drivers/gpu/drm/vmwgfx/vmwgfx_kms.c           |  2 +-
>>   drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c    |  8 ++---
>>   drivers/gpu/drm/vmwgfx/vmwgfx_resource.c      |  2 +-
>>   drivers/gpu/drm/vmwgfx/vmwgfx_shader.c        |  3 +-
>>   drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c          |  4 +--
>>   drivers/gpu/drm/vmwgfx/vmwgfx_surface.c       |  7 ++--
>>   include/drm/ttm/ttm_bo_api.h                  |  9 ++---
>>   include/drm/ttm/ttm_resource.h                |  1 -
>>   36 files changed, 84 insertions(+), 104 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> index e5919efca870..c4c93f19d273 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
>> @@ -269,7 +269,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
>>          case TTM_PL_TT:
>>                  sgt = drm_prime_pages_to_sg(obj->dev,
>>                                              bo->tbo.ttm->pages,
>> -                                           bo->tbo.num_pages);
>> +                                           bo->tbo.ttm->num_pages);
>>                  if (IS_ERR(sgt))
>>                          return sgt;
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
>> index 056cb87d09ea..52bcd1b5582f 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
>> @@ -121,7 +121,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
>>   {
>>          struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
>>
>> -       if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
>> +       if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
>>                  return AMDGPU_BO_INVALID_OFFSET;
>>
>>          if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>> index c6c9723d3d8a..381ecc4788d5 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
>> @@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
>>          if (r < 0)
>>                  return r;
>>
>> -       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
>> +       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
>>          if (r)
>>                  return r;
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
>> index ed47cbac4f75..a99a5cde42dd 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
>> @@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
>>
>>   static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
>>   {
>> -       return bo->tbo.num_pages << PAGE_SHIFT;
>> +       return bo->tbo.base.size;
>>   }
>>
>>   static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
>>   {
>> -       return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
>> +       return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
>>   }
>>
>>   static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
>> index ee9480d14cbc..20715ddbb746 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
>> @@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
>>
>>              TP_fast_assign(
>>                             __entry->bo = bo;
>> -                          __entry->pages = bo->tbo.num_pages;
>> +                          __entry->pages = bo->tbo.mem.num_pages;
>>                             __entry->type = bo->tbo.mem.mem_type;
>>                             __entry->prefer = bo->preferred_domains;
>>                             __entry->allow = bo->allowed_domains;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index ec93d4fdabbd..5346891a3f73 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -636,7 +636,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
>>
>>   out:
>>          /* update statistics */
>> -       atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
>> +       atomic64_add(bo->base.size, &adev->num_bytes_moved);
>>          amdgpu_bo_move_notify(bo, evict, new_mem);
>>          return 0;
>>   }
>> @@ -2131,7 +2131,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
>>                          return r;
>>          }
>>
>> -       num_pages = bo->tbo.num_pages;
>> +       num_pages = bo->tbo.mem.num_pages;
>>          mm_node = bo->tbo.mem.mm_node;
>>          num_loops = 0;
>>          while (num_pages) {
>> @@ -2161,7 +2161,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
>>                  }
>>          }
>>
>> -       num_pages = bo->tbo.num_pages;
>> +       num_pages = bo->tbo.mem.num_pages;
>>          mm_node = bo->tbo.mem.mm_node;
>>
>>          while (num_pages) {
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
>> index 985e454463e1..7f30629f21a2 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
>> @@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev)
>>                  return r;
>>          }
>>
>> -       memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size);
>> +       memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size);
>>
>>          amdgpu_bo_kunmap(adev->mes.eop_gpu_obj);
>>          amdgpu_bo_unreserve(adev->mes.eop_gpu_obj);
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
>> index bc542ac4c4b6..c1a675b8fc43 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
>> +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
>> @@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
>>
>>          switch (bo->mem.mem_type) {
>>          case TTM_PL_VRAM:
>> -               drm->gem.vram_available -= bo->mem.size;
>> +               drm->gem.vram_available -= bo->base.size;
>>                  break;
>>          case TTM_PL_TT:
>> -               drm->gem.gart_available -= bo->mem.size;
>> +               drm->gem.gart_available -= bo->base.size;
>>                  break;
>>          default:
>>                  break;
>> @@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
>>          if (!nvbo->bo.pin_count) {
>>                  switch (bo->mem.mem_type) {
>>                  case TTM_PL_VRAM:
>> -                       drm->gem.vram_available += bo->mem.size;
>> +                       drm->gem.vram_available += bo->base.size;
>>                          break;
>>                  case TTM_PL_TT:
>> -                       drm->gem.gart_available += bo->mem.size;
>> +                       drm->gem.gart_available += bo->base.size;
>>                          break;
>>                  default:
>>                          break;
>> @@ -913,7 +913,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
>>                  return 0;
>>
>>          if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
>> -               *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
>> +               *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size,
>>                                                 nvbo->mode, nvbo->zeta);
>>          }
>>
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
>> index bceb48a2dfca..17831ee897ea 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_display.c
>> +++ b/drivers/gpu/drm/nouveau/nouveau_display.c
>> @@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
>>
>>          bl_size = bw * bh * (1 << tile_mode) * gob_size;
>>
>> -       DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
>> +       DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
>>                        offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
>> -                     nvbo->bo.mem.size);
>> +                     nvbo->bo.base.size);
>>
>> -       if (bl_size + offset > nvbo->bo.mem.size)
>> +       if (bl_size + offset > nvbo->bo.base.size)
>>                  return -ERANGE;
>>
>>          return 0;
>> @@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
>>                  } else {
>>                          uint32_t size = mode_cmd->pitches[i] * height;
>>
>> -                       if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
>> +                       if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
>>                                  return -ERANGE;
>>                  }
>>          }
>> diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
>> index 2f16b5249283..347488685f74 100644
>> --- a/drivers/gpu/drm/nouveau/nouveau_prime.c
>> +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
>> @@ -30,9 +30,9 @@
>>   struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
>>   {
>>          struct nouveau_bo *nvbo = nouveau_gem_object(obj);
>> -       int npages = nvbo->bo.num_pages;
>>
>> -       return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
>> +       return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages,
>> +                                    nvbo->bo.ttm->num_pages);
>>   }
>>
>>   struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
>> diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
>> index 1253fdec712d..b1cd8d7dd87d 100644
>> --- a/drivers/gpu/drm/nouveau/nv17_fence.c
>> +++ b/drivers/gpu/drm/nouveau/nv17_fence.c
>> @@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
>>          struct nv10_fence_chan *fctx;
>>          struct ttm_resource *reg = &priv->bo->bo.mem;
>>          u32 start = reg->start * PAGE_SIZE;
>> -       u32 limit = start + reg->size - 1;
>> +       u32 limit = start + priv->bo->bo.base.size - 1;
>>          int ret = 0;
>>
>>          fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
>> diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
>> index 447238e3cbe7..1625826505f6 100644
>> --- a/drivers/gpu/drm/nouveau/nv50_fence.c
>> +++ b/drivers/gpu/drm/nouveau/nv50_fence.c
>> @@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
>>          struct nv10_fence_chan *fctx;
>>          struct ttm_resource *reg = &priv->bo->bo.mem;
>>          u32 start = reg->start * PAGE_SIZE;
>> -       u32 limit = start + reg->size - 1;
>> +       u32 limit = start + priv->bo->bo.base.size - 1;
>>          int ret;
>>
>>          fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
>> diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
>> index ebf24c9d2bf2..e60a8f88e226 100644
>> --- a/drivers/gpu/drm/qxl/qxl_object.h
>> +++ b/drivers/gpu/drm/qxl/qxl_object.h
>> @@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo)
>>
>>   static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
>>   {
>> -       return bo->tbo.num_pages << PAGE_SHIFT;
>> +       return bo->tbo.base.size;
>>   }
>>
>>   static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
>> diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
>> index 21ce2f9502c0..4f35c32957be 100644
>> --- a/drivers/gpu/drm/radeon/radeon_cs.c
>> +++ b/drivers/gpu/drm/radeon/radeon_cs.c
>> @@ -401,7 +401,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
>>          struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
>>
>>          /* Sort A before B if A is smaller. */
>> -       return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
>> +       return (int)la->robj->tbo.mem.num_pages -
>> +               (int)lb->robj->tbo.mem.num_pages;
>>   }
>>
>>   /**
>> diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
>> index ab81e35cb060..9a999930e3dd 100644
>> --- a/drivers/gpu/drm/radeon/radeon_object.c
>> +++ b/drivers/gpu/drm/radeon/radeon_object.c
>> @@ -54,20 +54,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo,
>>                                         unsigned mem_type, int sign)
>>   {
>>          struct radeon_device *rdev = bo->rdev;
>> -       u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
>>
>>          switch (mem_type) {
>>          case TTM_PL_TT:
>>                  if (sign > 0)
>> -                       atomic64_add(size, &rdev->gtt_usage);
>> +                       atomic64_add(bo->tbo.base.size, &rdev->gtt_usage);
>>                  else
>> -                       atomic64_sub(size, &rdev->gtt_usage);
>> +                       atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage);
>>                  break;
>>          case TTM_PL_VRAM:
>>                  if (sign > 0)
>> -                       atomic64_add(size, &rdev->vram_usage);
>> +                       atomic64_add(bo->tbo.base.size, &rdev->vram_usage);
>>                  else
>> -                       atomic64_sub(size, &rdev->vram_usage);
>> +                       atomic64_sub(bo->tbo.base.size, &rdev->vram_usage);
>>                  break;
>>          }
>>   }
>> @@ -256,7 +255,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
>>                  }
>>                  return 0;
>>          }
>> -       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
>> +       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
>>          if (r) {
>>                  return r;
>>          }
>> @@ -610,7 +609,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
>>   out:
>>          radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
>>                                 bo->tbo.mem.start << PAGE_SHIFT,
>> -                              bo->tbo.num_pages << PAGE_SHIFT);
>> +                              bo->tbo.base.size);
>>          return 0;
>>   }
>>
>> diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
>> index d606e9a935e3..9896d8231fe5 100644
>> --- a/drivers/gpu/drm/radeon/radeon_object.h
>> +++ b/drivers/gpu/drm/radeon/radeon_object.h
>> @@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
>>
>>   static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
>>   {
>> -       return bo->tbo.num_pages << PAGE_SHIFT;
>> +       return bo->tbo.base.size;
>>   }
>>
>>   static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
>>   {
>> -       return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
>> +       return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE;
>>   }
>>
>>   static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
>> diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
>> index 088d39a51c0d..0a09dbaba289 100644
>> --- a/drivers/gpu/drm/radeon/radeon_prime.c
>> +++ b/drivers/gpu/drm/radeon/radeon_prime.c
>> @@ -34,9 +34,9 @@
>>   struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
>>   {
>>          struct radeon_bo *bo = gem_to_radeon_bo(obj);
>> -       int npages = bo->tbo.num_pages;
>>
>> -       return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
>> +       return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages,
>> +                                    bo->tbo.ttm->num_pages);
>>   }
>>
>>   struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
>> diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
>> index c93f3ab3c4e3..1729cb9a95c5 100644
>> --- a/drivers/gpu/drm/radeon/radeon_trace.h
>> +++ b/drivers/gpu/drm/radeon/radeon_trace.h
>> @@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
>>
>>              TP_fast_assign(
>>                             __entry->bo = bo;
>> -                          __entry->pages = bo->tbo.num_pages;
>> +                          __entry->pages = bo->tbo.mem.num_pages;
>>                             ),
>>              TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
>>   );
>> diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
>> index a3432c6343ba..ea365ac6f4f1 100644
>> --- a/drivers/gpu/drm/radeon/radeon_ttm.c
>> +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
>> @@ -274,7 +274,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
>>
>>   out:
>>          /* update statistics */
>> -       atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
>> +       atomic64_add(bo->base.size, &rdev->num_bytes_moved);
>>          radeon_bo_move_notify(bo, evict, new_mem);
>>          return 0;
>>   }
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
>> index 02cc5d247c0d..2196e3d3ef29 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
>> @@ -72,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
>>          struct ttm_resource_manager *man;
>>          int i, mem_type;
>>
>> -       drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
>> -                  bo, bo->mem.num_pages, bo->mem.size >> 10,
>> -                  bo->mem.size >> 20);
>> +       drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
>> +                  bo, bo->mem.num_pages, bo->base.size >> 10,
>> +                  bo->base.size >> 20);
>>          for (i = 0; i < placement->num_placement; i++) {
>>                  mem_type = placement->placement[i].mem_type;
>>                  drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
>> @@ -268,7 +268,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
>>                  goto out_err;
>>          }
>>
>> -       ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
>> +       ctx->bytes_moved += bo->base.size;
>>          return 0;
>>
>>   out_err:
>> @@ -985,8 +985,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
>>
>>          memset(&hop, 0, sizeof(hop));
>>
>> -       mem.num_pages = bo->num_pages;
>> -       mem.size = mem.num_pages << PAGE_SHIFT;
>> +       mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
>>          mem.page_alignment = bo->mem.page_alignment;
>>          mem.bus.offset = 0;
>>          mem.bus.addr = NULL;
>> @@ -1102,7 +1101,7 @@ EXPORT_SYMBOL(ttm_bo_validate);
>>
>>   int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>>                           struct ttm_buffer_object *bo,
>> -                        unsigned long size,
>> +                        size_t size,
>>                           enum ttm_bo_type type,
>>                           struct ttm_placement *placement,
>>                           uint32_t page_alignment,
>> @@ -1113,9 +1112,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>>                           void (*destroy) (struct ttm_buffer_object *))
>>   {
>>          struct ttm_mem_global *mem_glob = &ttm_mem_glob;
>> -       int ret = 0;
>> -       unsigned long num_pages;
>>          bool locked;
>> +       int ret = 0;
>>
>>          ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
>>          if (ret) {
>> @@ -1127,16 +1125,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>>                  return -ENOMEM;
>>          }
>>
>> -       num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
>> -       if (num_pages == 0) {
>> -               pr_err("Illegal buffer object size\n");
>> -               if (destroy)
>> -                       (*destroy)(bo);
>> -               else
>> -                       kfree(bo);
>> -               ttm_mem_global_free(mem_glob, acc_size);
>> -               return -EINVAL;
>> -       }
>>          bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
>>
>>          kref_init(&bo->kref);
>> @@ -1145,10 +1133,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>>          INIT_LIST_HEAD(&bo->swap);
>>          bo->bdev = bdev;
>>          bo->type = type;
>> -       bo->num_pages = num_pages;
>> -       bo->mem.size = num_pages << PAGE_SHIFT;
>>          bo->mem.mem_type = TTM_PL_SYSTEM;
>> -       bo->mem.num_pages = bo->num_pages;
>> +       bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
>>          bo->mem.mm_node = NULL;
>>          bo->mem.page_alignment = page_alignment;
>>          bo->mem.bus.offset = 0;
>> @@ -1166,9 +1152,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>>          }
>>          if (!ttm_bo_uses_embedded_gem_object(bo)) {
>>                  /*
>> -                * bo.gem is not initialized, so we have to setup the
>> +                * bo.base is not initialized, so we have to setup the
>>                   * struct elements we want use regardless.
>>                   */
>> +               bo->base.size = size;
>>                  dma_resv_init(&bo->base._resv);
>>                  drm_vma_node_reset(&bo->base.vma_node);
>>          }
>> @@ -1210,7 +1197,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved);
>>
>>   int ttm_bo_init(struct ttm_bo_device *bdev,
>>                  struct ttm_buffer_object *bo,
>> -               unsigned long size,
>> +               size_t size,
>>                  enum ttm_bo_type type,
>>                  struct ttm_placement *placement,
>>                  uint32_t page_alignment,
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
>> index 5bbc1339d28e..398d5013fc39 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
>> @@ -431,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
>>
>>          map->virtual = NULL;
>>          map->bo = bo;
>> -       if (num_pages > bo->num_pages)
>> +       if (num_pages > bo->mem.num_pages)
>>                  return -EINVAL;
>> -       if (start_page > bo->num_pages)
>> +       if ((start_page + num_pages) > bo->mem.num_pages)
>>                  return -EINVAL;
>>
>>          ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
>> @@ -485,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
>>
>>          if (mem->bus.is_iomem) {
>>                  void __iomem *vaddr_iomem;
>> -               size_t size = bo->num_pages << PAGE_SHIFT;
>>
>>                  if (mem->bus.addr)
>>                          vaddr_iomem = (void __iomem *)mem->bus.addr;
>>                  else if (mem->bus.caching == ttm_write_combined)
>> -                       vaddr_iomem = ioremap_wc(mem->bus.offset, size);
>> +                       vaddr_iomem = ioremap_wc(mem->bus.offset,
>> +                                                bo->base.size);
>>                  else
>> -                       vaddr_iomem = ioremap(mem->bus.offset, size);
>> +                       vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
>>
>>                  if (!vaddr_iomem)
>>                          return -ENOMEM;
>> @@ -517,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
>>                   * or to make the buffer object look contiguous.
>>                   */
>>                  prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
>> -               vaddr = vmap(ttm->pages, bo->num_pages, 0, prot);
>> +               vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
>>                  if (!vaddr)
>>                          return -ENOMEM;
>>
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
>> index 144a4940b6b6..6dc96cf66744 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
>> @@ -198,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
>>
>>          /* Fault should not cross bo boundary. */
>>          page_offset &= ~(fault_page_size - 1);
>> -       if (page_offset + fault_page_size > bo->num_pages)
>> +       if (page_offset + fault_page_size > bo->mem.num_pages)
>>                  goto out_fallback;
>>
>>          if (bo->mem.bus.is_iomem)
>> @@ -306,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
>>          page_last = vma_pages(vma) + vma->vm_pgoff -
>>                  drm_vma_node_start(&bo->base.vma_node);
>>
>> -       if (unlikely(page_offset >= bo->num_pages))
>> +       if (unlikely(page_offset >= bo->mem.num_pages))
>>                  return VM_FAULT_SIGBUS;
>>
>>          prot = ttm_io_prot(bo, &bo->mem, prot);
>> @@ -469,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
>>                   << PAGE_SHIFT);
>>          int ret;
>>
>> -       if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
>> +       if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
>>                  return -EIO;
>>
>>          ret = ttm_bo_reserve(bo, true, false, NULL);
>> diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
>> index 77ba784425dd..7f75a13163f0 100644
>> --- a/drivers/gpu/drm/ttm/ttm_tt.c
>> +++ b/drivers/gpu/drm/ttm/ttm_tt.c
>> @@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
>>                                 uint32_t page_flags,
>>                                 enum ttm_caching caching)
>>   {
>> -       ttm->num_pages = bo->num_pages;
>> +       ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
>>          ttm->caching = ttm_cached;
>>          ttm->page_flags = page_flags;
>>          ttm->dma_address = NULL;
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
>> index f21881e087db..9f2779ddcf08 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
>> @@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
>>          d.src_addr = NULL;
>>          d.dst_pages = dst->ttm->pages;
>>          d.src_pages = src->ttm->pages;
>> -       d.dst_num_pages = dst->num_pages;
>> -       d.src_num_pages = src->num_pages;
>> +       d.dst_num_pages = dst->mem.num_pages;
>> +       d.src_num_pages = src->mem.num_pages;
>>          d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
>>          d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
>>          d.diff = diff;
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
>> index 263d76ae43f0..63dbc44eebe0 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
>> @@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
>>          uint32_t new_flags;
>>
>>          place = vmw_vram_placement.placement[0];
>> -       place.lpfn = bo->num_pages;
>> +       place.lpfn = bo->mem.num_pages;
>>          placement.num_placement = 1;
>>          placement.placement = &place;
>>          placement.num_busy_placement = 1;
>> @@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
>>           * that situation.
>>           */
>>          if (bo->mem.mem_type == TTM_PL_VRAM &&
>> -           bo->mem.start < bo->num_pages &&
>> +           bo->mem.start < bo->mem.num_pages &&
>>              bo->mem.start > 0 &&
>>              buf->base.pin_count == 0) {
>>                  ctx.interruptible = false;
>> @@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
>>          if (virtual)
>>                  return virtual;
>>
>> -       ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
>> +       ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
>>          if (ret)
>>                  DRM_ERROR("Buffer object map failed: %d.\n", ret);
>>
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
>> index 984d8884357d..a077e420d2ff 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
>> @@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
>>           * Do a page by page copy of COTables. This eliminates slow vmap()s.
>>           * This should really be a TTM utility.
>>           */
>> -       for (i = 0; i < old_bo->num_pages; ++i) {
>> +       for (i = 0; i < old_bo->mem.num_pages; ++i) {
>>                  bool dummy;
>>
>>                  ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
>> index e67e2e8f6e6f..6c016001721d 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
>> @@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
>>
>>          if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
>>
>> -               if (unlikely(new_query_bo->base.num_pages > 4)) {
>> +               if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
>>                          VMW_DEBUG_USER("Query buffer too large.\n");
>>                          return -EINVAL;
>>                  }
>> @@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
>>                  return ret;
>>
>>          /* Make sure DMA doesn't cross BO boundaries. */
>> -       bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
>> +       bo_size = vmw_bo->base.base.size;
>>          if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
>>                  VMW_DEBUG_USER("Invalid DMA offset.\n");
>>                  return -EINVAL;
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
>> index 8fe26e32f920..1774960d1b89 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
>> @@ -64,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
>>          spin_lock(&gman->lock);
>>
>>          if (gman->max_gmr_pages > 0) {
>> -               gman->used_gmr_pages += bo->num_pages;
>> +               gman->used_gmr_pages += mem->num_pages;
>>                  if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
>>                          goto nospace;
>>          }
>>
>>          mem->mm_node = gman;
>>          mem->start = id;
>> -       mem->num_pages = bo->num_pages;
>>
>>          spin_unlock(&gman->lock);
>>          return 0;
>>
>>   nospace:
>> -       gman->used_gmr_pages -= bo->num_pages;
>> +       gman->used_gmr_pages -= mem->num_pages;
>>          spin_unlock(&gman->lock);
>>          ida_free(&gman->gmr_ida, id);
>>          return -ENOSPC;
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
>> index bc67f2b930e1..7dc96125e5c2 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
>> @@ -1220,7 +1220,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
>>          int ret;
>>
>>          requested_size = mode_cmd->height * mode_cmd->pitches[0];
>> -       if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
>> +       if (unlikely(requested_size > bo->base.base.size)) {
>>                  DRM_ERROR("Screen buffer object size is too small "
>>                            "for requested mode.\n");
>>                  return -EINVAL;
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
>> index 0b76b3d17d4c..0a900afc66ff 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
>> @@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
>>   int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
>>   {
>>          struct vmw_bo_dirty *dirty = vbo->dirty;
>> -       pgoff_t num_pages = vbo->base.num_pages;
>> +       pgoff_t num_pages = vbo->base.mem.num_pages;
>>          size_t size, acc_size;
>>          int ret;
>>          static struct ttm_operation_ctx ctx = {
>> @@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
>>                  return ret;
>>
>>          page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
>> -       if (unlikely(page_offset >= bo->num_pages)) {
>> +       if (unlikely(page_offset >= bo->mem.num_pages)) {
>>                  ret = VM_FAULT_SIGBUS;
>>                  goto out_unlock;
>>          }
>> @@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
>>
>>                  page_offset = vmf->pgoff -
>>                          drm_vma_node_start(&bo->base.vma_node);
>> -               if (page_offset >= bo->num_pages ||
>> +               if (page_offset >= bo->mem.num_pages ||
>>                      vmw_resources_clean(vbo, page_offset,
>>                                          page_offset + PAGE_SIZE,
>>                                          &allowed_prefault)) {
>> @@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
>>
>>                  page_offset = vmf->pgoff -
>>                          drm_vma_node_start(&bo->base.vma_node);
>> -               if (page_offset >= bo->num_pages ||
>> +               if (page_offset >= bo->mem.num_pages ||
>>                      vmw_resources_clean(vbo, page_offset,
>>                                          page_offset + PAGE_SIZE,
>>                                          &allowed_prefault)) {
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
>> index 00b535831a7a..f6e8fdfc76e5 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
>> @@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
>>          int ret;
>>
>>          if (likely(res->backup)) {
>> -               BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
>> +               BUG_ON(res->backup->base.base.size < size);
>>                  return 0;
>>          }
>>
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
>> index f328aa5839a2..e76a720f841e 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
>> @@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
>>                          return ret;
>>                  }
>>
>> -               if ((u64)buffer->base.num_pages * PAGE_SIZE <
>> -                   (u64)size + (u64)offset) {
>> +               if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
>>                          VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
>>                          ret = -EINVAL;
>>                          goto out_bad_arg;
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
>> index 5b04ec047ef3..27ab2c50312b 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
>> @@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
>>          cmd->body.host.mipmap = 0;
>>          cmd->body.transfer = ddirty->transfer;
>>          suffix->suffixSize = sizeof(*suffix);
>> -       suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
>> +       suffix->maximumOffset = ddirty->buf->base.base.size;
>>
>>          if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
>>                  blit_size += sizeof(struct vmw_stdu_update);
>> @@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane  *update,
>>          vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
>>
>>          suffix->suffixSize = sizeof(*suffix);
>> -       suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
>> +       suffix->maximumOffset = vfbbo->buffer->base.base.size;
>>
>>          vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
>>                                   bb->y1, bb->y2);
>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
>> index 3914bfee0533..fa9be30bec6c 100644
>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
>> @@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
>>                                           &res->backup,
>>                                           &user_srf->backup_base);
>>                  if (ret == 0) {
>> -                       if (res->backup->base.num_pages * PAGE_SIZE <
>> -                           res->backup_size) {
>> +                       if (res->backup->base.base.size < res->backup_size) {
>>                                  VMW_DEBUG_USER("Surface backup buffer too small.\n");
>>                                  vmw_bo_unreference(&res->backup);
>>                                  ret = -EINVAL;
>> @@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
>>          if (res->backup) {
>>                  rep->buffer_map_handle =
>>                          drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
>> -               rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
>> +               rep->buffer_size = res->backup->base.base.size;
>>                  rep->buffer_handle = backup_handle;
>>          } else {
>>                  rep->buffer_map_handle = 0;
>> @@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
>>          rep->crep.buffer_handle = backup_handle;
>>          rep->crep.buffer_map_handle =
>>                  drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
>> -       rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
>> +       rep->crep.buffer_size = srf->res.backup->base.base.size;
>>
>>          rep->creq.version = drm_vmw_gb_surface_v1;
>>          rep->creq.svga3d_flags_upper_32_bits =
>> diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
>> index 79b9367e0ffd..0d4e3fccaa8a 100644
>> --- a/include/drm/ttm/ttm_bo_api.h
>> +++ b/include/drm/ttm/ttm_bo_api.h
>> @@ -125,7 +125,6 @@ struct ttm_buffer_object {
>>          struct ttm_bo_device *bdev;
>>          enum ttm_bo_type type;
>>          void (*destroy) (struct ttm_buffer_object *);
>> -       unsigned long num_pages;
>>          size_t acc_size;
>>
>>          /**
>> @@ -397,13 +396,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
>>
>>   int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>>                           struct ttm_buffer_object *bo,
>> -                        unsigned long size,
>> -                        enum ttm_bo_type type,
>> +                        size_t size, enum ttm_bo_type type,
>>                           struct ttm_placement *placement,
>>                           uint32_t page_alignment,
>>                           struct ttm_operation_ctx *ctx,
>> -                        size_t acc_size,
>> -                        struct sg_table *sg,
>> +                        size_t acc_size, struct sg_table *sg,
>>                           struct dma_resv *resv,
>>                           void (*destroy) (struct ttm_buffer_object *));
>>
>> @@ -445,7 +442,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>>    * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
>>    */
>>   int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
>> -               unsigned long size, enum ttm_bo_type type,
>> +               size_t size, enum ttm_bo_type type,
>>                  struct ttm_placement *placement,
>>                  uint32_t page_alignment, bool interrubtible, size_t acc_size,
>>                  struct sg_table *sg, struct dma_resv *resv,
>> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
>> index f48a70d39ac5..ad6da99770e9 100644
>> --- a/include/drm/ttm/ttm_resource.h
>> +++ b/include/drm/ttm/ttm_resource.h
>> @@ -171,7 +171,6 @@ struct ttm_bus_placement {
>>   struct ttm_resource {
>>          void *mm_node;
>>          unsigned long start;
>> -       unsigned long size;
>>          unsigned long num_pages;
>>          uint32_t page_alignment;
>>          uint32_t mem_type;
>> --
>> 2.25.1
>>
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
>

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-12-14 20:35 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-10 14:52 [PATCH] drm/ttm: cleanup BO size handling v3 Christian König
2020-12-14 16:55 ` Daniel Vetter
2020-12-14 20:34   ` Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.