All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/6] drm/i915: Rename the magic polymorphic macro __I915__
@ 2016-03-18 21:16 Chris Wilson
  2016-03-18 21:16 ` [PATCH 2/6] drm/i915: Allow passing any known pointer to for_each_engine() Chris Wilson
                   ` (5 more replies)
  0 siblings, 6 replies; 16+ messages in thread
From: Chris Wilson @ 2016-03-18 21:16 UTC (permalink / raw)
  To: intel-gfx

To open up the future of using a generic to_i915() convenience macro,
rename the existing __I915__ superconvenience macro.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f330a53c19b9..8606e2c7db04 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1972,14 +1972,14 @@ struct drm_i915_private {
 	 */
 };
 
-static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
+static inline struct drm_i915_private *__to_i915(const struct drm_device *dev)
 {
 	return dev->dev_private;
 }
 
 static inline struct drm_i915_private *dev_to_i915(struct device *dev)
 {
-	return to_i915(dev_get_drvdata(dev));
+	return __to_i915(dev_get_drvdata(dev));
 }
 
 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
@@ -2449,19 +2449,19 @@ struct drm_i915_cmd_table {
 };
 
 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
-#define __I915__(p) ({ \
+#define to_i915(p) ({ \
 	struct drm_i915_private *__p; \
 	if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
 		__p = (struct drm_i915_private *)p; \
 	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
-		__p = to_i915((struct drm_device *)p); \
+		__p = __to_i915((struct drm_device *)p); \
 	else \
 		BUILD_BUG(); \
 	__p; \
 })
-#define INTEL_INFO(p) 	(&__I915__(p)->info)
+#define INTEL_INFO(p) 	(&to_i915(p)->info)
 #define INTEL_DEVID(p)	(INTEL_INFO(p)->device_id)
-#define INTEL_REVID(p)	(__I915__(p)->dev->pdev->revision)
+#define INTEL_REVID(p)	(to_i915(p)->dev->pdev->revision)
 
 #define REVID_FOREVER		0xff
 /*
@@ -2587,7 +2587,7 @@ struct drm_i915_cmd_table {
 #define HAS_LLC(dev)		(INTEL_INFO(dev)->has_llc)
 #define HAS_SNOOP(dev)		(INTEL_INFO(dev)->has_snoop)
 #define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
-				 __I915__(dev)->ellc_size)
+				 to_i915(dev)->ellc_size)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 
 #define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
@@ -2667,11 +2667,11 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
 
-#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
-#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
-#define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_LP(dev) (to_i915(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+#define HAS_PCH_LPT_H(dev) (to_i915(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
-- 
2.8.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/6] drm/i915: Allow passing any known pointer to for_each_engine()
  2016-03-18 21:16 [PATCH 1/6] drm/i915: Rename the magic polymorphic macro __I915__ Chris Wilson
@ 2016-03-18 21:16 ` Chris Wilson
  2016-03-21 15:44   ` Dave Gordon
  2016-03-18 21:16 ` [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object Chris Wilson
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 16+ messages in thread
From: Chris Wilson @ 2016-03-18 21:16 UTC (permalink / raw)
  To: intel-gfx

Rather than require the user to grab a drm_i915_private, allow them to
pass anything that we know how to derive such a pointer user to_i915()

Note this fixes a macro bug in for_each_engine_masked() which was not
using its dev_priv__ parameter.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         | 8 ++++----
 drivers/gpu/drm/i915/i915_gem_context.c | 4 ++--
 drivers/gpu/drm/i915/intel_mocs.c       | 3 +--
 3 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8606e2c7db04..0c9fe00d3e83 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1988,12 +1988,12 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
 }
 
 /* Iterate over initialised rings */
-#define for_each_engine(ring__, dev_priv__, i__) \
+#define for_each_engine(ring__, ptr__, i__) \
 	for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
-		for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_engine_initialized((ring__))))
+		for_each_if ((((ring__) = &to_i915(ptr__)->engine[(i__)]), intel_engine_initialized((ring__))))
 
-#define for_each_engine_masked(engine__, dev_priv__, mask__) \
-	for ((engine__) = &dev_priv->engine[0]; (engine__) < &dev_priv->engine[I915_NUM_ENGINES]; (engine__)++) \
+#define for_each_engine_masked(engine__, ptr__, mask__) \
+	for ((engine__) = &to_i915(ptr__)->engine[0]; (engine__) < &to_i915(ptr__)->engine[I915_NUM_ENGINES]; (engine__)++) \
 		for_each_if (intel_engine_flag((engine__)) & (mask__) && intel_engine_initialized((engine__)))
 
 enum hdmi_force_audio {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 394e525e55f1..a8afd0cee7f7 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -553,7 +553,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 			intel_ring_emit(engine,
 					MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_engine(signaller, to_i915(engine->dev), i) {
+			for_each_engine(signaller, engine->dev, i) {
 				if (signaller == engine)
 					continue;
 
@@ -582,7 +582,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 			intel_ring_emit(engine,
 					MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_engine(signaller, to_i915(engine->dev), i) {
+			for_each_engine(signaller, engine->dev, i) {
 				if (signaller == engine)
 					continue;
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 3c725dde16ed..45200b93e9bb 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -323,12 +323,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
 	int ret;
 
 	if (get_mocs_settings(req->engine->dev, &t)) {
-		struct drm_i915_private *dev_priv = req->i915;
 		struct intel_engine_cs *engine;
 		enum intel_engine_id ring_id;
 
 		/* Program the control registers */
-		for_each_engine(engine, dev_priv, ring_id) {
+		for_each_engine(engine, req->i915, ring_id) {
 			ret = emit_mocs_control_table(req, &t, ring_id);
 			if (ret)
 				return ret;
-- 
2.8.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object
  2016-03-18 21:16 [PATCH 1/6] drm/i915: Rename the magic polymorphic macro __I915__ Chris Wilson
  2016-03-18 21:16 ` [PATCH 2/6] drm/i915: Allow passing any known pointer to for_each_engine() Chris Wilson
@ 2016-03-18 21:16 ` Chris Wilson
  2016-03-21  9:47   ` Daniel Vetter
  2016-03-21  9:55   ` Tvrtko Ursulin
  2016-03-18 21:16 ` [PATCH 4/6] drm/i915: Use to_i915() instead of guc_to_i915() Chris Wilson
                   ` (3 subsequent siblings)
  5 siblings, 2 replies; 16+ messages in thread
From: Chris Wilson @ 2016-03-18 21:16 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
 drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
 drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
 drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
 drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
 drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
 10 files changed, 46 insertions(+), 51 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e0ba3e38000f..33ddcdf6d046 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
 	int pin_count = 0;
@@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
 		   obj->base.write_domain);
-	for_each_engine(engine, dev_priv, i)
+	for_each_engine(engine, obj, i)
 		seq_printf(m, "%x ",
 				i915_gem_request_get_seqno(obj->last_read_req[i]));
 	seq_printf(m, "] %x %x%s%s%s",
 		   i915_gem_request_get_seqno(obj->last_write_req),
 		   i915_gem_request_get_seqno(obj->last_fenced_req),
-		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+		   i915_cache_level_str(to_i915(obj), obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
@@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 	if (obj->base.name || obj->base.dma_buf)
 		stats->shared += obj->base.size;
 
-	if (USES_FULL_PPGTT(obj->base.dev)) {
+	if (USES_FULL_PPGTT(obj)) {
 		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			struct i915_hw_ppgtt *ppgtt;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0c9fe00d3e83..92365f047e53 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
 };
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
+static inline struct drm_i915_private *
+__obj_to_i915(const struct drm_i915_gem_object *obj)
+{
+	return __to_i915(obj->base.dev);
+}
+
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits);
@@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
 		__p = (struct drm_i915_private *)p; \
 	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
 		__p = __to_i915((struct drm_device *)p); \
+	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
+		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
 	else \
 		BUILD_BUG(); \
 	__p; \
@@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
 
 /* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
-	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
+#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
 
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
@@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
-	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
 		obj->tiling_mode != I915_TILING_NONE;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8588c83abb35..710a6bbc985e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -361,14 +361,12 @@ out:
 
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
+	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	kmem_cache_free(dev_priv->objects, obj);
+	kmem_cache_free(to_i915(obj)->objects, obj);
 }
 
 static int
@@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int ret;
 
 	if (drm_vma_node_has_offset(&obj->base.vma_node))
@@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int page_count, i;
 	struct address_space *mapping;
 	struct sg_table *st;
@@ -2372,7 +2370,7 @@ err_pages:
 int
 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	const struct drm_i915_gem_object_ops *ops = obj->ops;
 	int ret;
 
@@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 	 * so that we don't steal from recently used but inactive objects
 	 * (unless we are forced to ofc!)
 	 */
-	list_move_tail(&obj->global_list,
-		       &to_i915(obj->base.dev)->mm.bound_list);
+	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!list_empty(&vma->vm_link))
@@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		return 0;
 
 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		struct drm_i915_private *i915 = to_i915(obj);
 		ret = __i915_wait_request(from_req,
 					  atomic_read(&i915->gpu_error.reset_counter),
 					  i915->mm.interruptible,
@@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int ret;
 
 	if (list_empty(&vma->obj_link))
@@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	vma = i915_gem_obj_to_ggtt(obj);
 	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
 		list_move_tail(&vma->vm_link,
-			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
+			       &to_i915(obj)->ggtt.base.inactive_list);
 
 	return 0;
 }
@@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
 	 */
 	ret = i915_gem_object_set_cache_level(obj,
-					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
 	if (ret)
 		goto err_unpin_display;
 
@@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 		     (vma->node.start & (fence_alignment - 1)) == 0);
 
 	mappable = (vma->node.start + fence_size <=
-		    to_i915(obj->base.dev)->ggtt.mappable_end);
+		    to_i915(obj)->ggtt.mappable_end);
 
 	obj->map_and_fenceable = mappable && fenceable;
 }
@@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		       uint32_t alignment,
 		       uint64_t flags)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_vma *vma;
 	unsigned bound;
 	int ret;
@@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	obj->fence_reg = I915_FENCE_REG_NONE;
 	obj->madv = I915_MADV_WILLNEED;
 
-	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_vma *vma, *next;
 
 	intel_runtime_pm_get(dev_priv);
@@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
 
 	list_del(&vma->obj_link);
 
-	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 374a0cb7a092..39ed403b9de3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
 
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
-	return (HAS_LLC(obj->base.dev) ||
+	return (HAS_LLC(obj) ||
 		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 		obj->cache_level != I915_CACHE_NONE);
 }
@@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 		   struct drm_i915_gem_relocation_entry *reloc,
 		   uint64_t target_offset)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	uint64_t delta = relocation_target(reloc, target_offset);
 	uint64_t offset;
 	void __iomem *reloc_page;
@@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 					      offset & PAGE_MASK);
 	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(obj)->gen >= 8) {
 		offset += sizeof(uint32_t);
 
 		if (offset_in_page(offset) == 0) {
@@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
 		       struct drm_i915_gem_relocation_entry *reloc,
 		       uint64_t target_offset)
 {
-	struct drm_device *dev = obj->base.dev;
 	uint32_t page_offset = offset_in_page(reloc->offset);
 	uint64_t delta = relocation_target(reloc, target_offset);
 	char *vaddr;
@@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
 				reloc->offset >> PAGE_SHIFT));
 	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
 
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(obj)->gen >= 8) {
 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 
 		if (page_offset == 0) {
@@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 				   struct eb_vmas *eb,
 				   struct drm_i915_gem_relocation_entry *reloc)
 {
-	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_object *target_obj;
 	struct drm_i915_gem_object *target_i915_obj;
 	struct i915_vma *target_vma;
@@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 	 * pipe_control writes because the gpu doesn't properly redirect them
 	 * through the ppgtt for non_secure batchbuffers. */
-	if (unlikely(IS_GEN6(dev) &&
+	if (unlikely(IS_GEN6(obj) &&
 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
 		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
 				    PIN_GLOBAL);
@@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 
 	/* Check that the relocation address is valid... */
 	if (unlikely(reloc->offset >
-		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
+		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
 		DRM_DEBUG("Relocation beyond object bounds: "
 			  "obj %p target %d offset %d size %d.\n",
 			  obj, reloc->target_handle,
@@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
 		return false;
 
 	/* See also use_cpu_reloc() */
-	if (HAS_LLC(vma->obj->base.dev))
+	if (HAS_LLC(vma->obj))
 		return false;
 
 	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 598198543dcd..1ef75bc2220c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 					 struct drm_i915_fence_reg *fence,
 					 bool enable)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int reg = fence_number(dev_priv, fence);
 
 	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct drm_i915_fence_reg *fence;
 	int ret;
 
@@ -433,7 +433,7 @@ bool
 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 {
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct drm_i915_private *dev_priv = to_i915(obj);
 		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
 
 		WARN_ON(!ggtt_vma ||
@@ -457,7 +457,7 @@ void
 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
 {
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct drm_i915_private *dev_priv = to_i915(obj);
 		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
 		dev_priv->fence_regs[obj->fence_reg].pin_count--;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0715bb74d306..6447a5f9661e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3305,7 +3305,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
 		return ERR_PTR(-EINVAL);
 
-	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+	vma = kmem_cache_zalloc(to_i915(obj)->vmas, GFP_KERNEL);
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index de891c928b2f..224389d077c7 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -540,10 +540,8 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
 	if (obj->stolen) {
-		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
+		i915_gem_stolen_remove_node(to_i915(obj), obj->stolen);
 		kfree(obj->stolen);
 		obj->stolen = NULL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7410f6c962e7..bc4cb7f4fe80 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -122,10 +122,10 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 
-	if (INTEL_INFO(obj->base.dev)->gen >= 4)
+	if (INTEL_INFO(obj)->gen >= 4)
 		return true;
 
-	if (INTEL_INFO(obj->base.dev)->gen == 3) {
+	if (INTEL_INFO(obj)->gen == 3) {
 		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
 			return false;
 	} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 54088a4d6498..f0e3ade59177 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -303,7 +303,7 @@ __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
 static int
 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_mm_struct *mm;
 	int ret = 0;
 
@@ -376,7 +376,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
 
 	kref_put_mutex(&obj->userptr.mm->kref,
 		       __i915_mm_struct_free,
-		       &to_i915(obj->base.dev)->mm_lock);
+		       &to_i915(obj)->mm_lock);
 	obj->userptr.mm = NULL;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index df0ef5bba8e5..f981bddc9bbf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2069,7 +2069,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
-	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
+	if (HAS_LLC(ringbuf->obj) && !ringbuf->obj->stolen)
 		vunmap(ringbuf->virtual_start);
 	else
 		iounmap(ringbuf->virtual_start);
-- 
2.8.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/6] drm/i915: Use to_i915() instead of guc_to_i915()
  2016-03-18 21:16 [PATCH 1/6] drm/i915: Rename the magic polymorphic macro __I915__ Chris Wilson
  2016-03-18 21:16 ` [PATCH 2/6] drm/i915: Allow passing any known pointer to for_each_engine() Chris Wilson
  2016-03-18 21:16 ` [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object Chris Wilson
@ 2016-03-18 21:16 ` Chris Wilson
  2016-03-22 10:55   ` Dave Gordon
  2016-03-18 21:16 ` [PATCH 5/6] drm/i915: Teach to_i915() how to extract drm_i915_private from requests Chris Wilson
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 16+ messages in thread
From: Chris Wilson @ 2016-03-18 21:16 UTC (permalink / raw)
  To: intel-gfx

The convenience of saving a few characters by using a consistent
interface to obtain our drm_i915_private struct from intel_guc.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |  4 +++-
 drivers/gpu/drm/i915/i915_guc_submission.c | 23 ++++++++++-------------
 2 files changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 92365f047e53..d5fa42c96110 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1982,7 +1982,7 @@ static inline struct drm_i915_private *dev_to_i915(struct device *dev)
 	return __to_i915(dev_get_drvdata(dev));
 }
 
-static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
+static inline struct drm_i915_private *__guc_to_i915(struct intel_guc *guc)
 {
 	return container_of(guc, struct drm_i915_private, guc);
 }
@@ -2463,6 +2463,8 @@ struct drm_i915_cmd_table {
 		__p = __to_i915((struct drm_device *)p); \
 	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
 		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
+	else if (__builtin_types_compatible_p(typeof(*p), struct intel_guc)) \
+		__p = __guc_to_i915((struct intel_guc *)p); \
 	else \
 		BUILD_BUG(); \
 	__p; \
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index ae1f58d073f2..850aee78c40f 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -77,7 +77,7 @@ static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
 
 static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *dev_priv = to_i915(guc);
 	u32 status;
 	int i;
 	int ret;
@@ -152,7 +152,7 @@ static int host2guc_release_doorbell(struct intel_guc *guc,
 static int host2guc_sample_forcewake(struct intel_guc *guc,
 				     struct i915_guc_client *client)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *dev_priv = to_i915(guc);
 	struct drm_device *dev = dev_priv->dev;
 	u32 data[2];
 
@@ -254,7 +254,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
 static void guc_disable_doorbell(struct intel_guc *guc,
 				 struct i915_guc_client *client)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct drm_i915_private *dev_priv = to_i915(guc);
 	struct guc_doorbell_info *doorbell;
 	void *base;
 	i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
@@ -376,7 +376,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
 static void guc_init_ctx_desc(struct intel_guc *guc,
 			      struct i915_guc_client *client)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct intel_engine_cs *engine;
 	struct intel_context *ctx = client->owner;
 	struct guc_context_desc desc;
@@ -390,7 +389,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
 	desc.priority = client->priority;
 	desc.db_id = client->doorbell_id;
 
-	for_each_engine(engine, dev_priv, i) {
+	for_each_engine(engine, guc, i) {
 		struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
 		struct drm_i915_gem_object *obj;
 		uint64_t ctx_desc;
@@ -772,7 +771,6 @@ err:
 
 static void guc_create_log(struct intel_guc *guc)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct drm_i915_gem_object *obj;
 	unsigned long offset;
 	uint32_t size, flags;
@@ -791,7 +789,7 @@ static void guc_create_log(struct intel_guc *guc)
 
 	obj = guc->log_obj;
 	if (!obj) {
-		obj = gem_allocate_guc_obj(dev_priv->dev, size);
+		obj = gem_allocate_guc_obj(to_i915(guc)->dev, size);
 		if (!obj) {
 			/* logging will be off */
 			i915.guc_log_level = -1;
@@ -835,7 +833,6 @@ static void init_guc_policies(struct guc_policies *policies)
 
 static void guc_create_ads(struct intel_guc *guc)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct drm_i915_gem_object *obj;
 	struct guc_ads *ads;
 	struct guc_policies *policies;
@@ -851,7 +848,7 @@ static void guc_create_ads(struct intel_guc *guc)
 
 	obj = guc->ads_obj;
 	if (!obj) {
-		obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
+		obj = gem_allocate_guc_obj(to_i915(guc)->dev, PAGE_ALIGN(size));
 		if (!obj)
 			return;
 
@@ -868,10 +865,10 @@ static void guc_create_ads(struct intel_guc *guc)
 	 * so its address won't change after we've told the GuC where
 	 * to find it.
 	 */
-	engine = &dev_priv->engine[RCS];
-	ads->golden_context_lrca = engine->status_page.gfx_addr;
+	ads->golden_context_lrca =
+		to_i915(guc)->engine[RCS].status_page.gfx_addr;
 
-	for_each_engine(engine, dev_priv, i)
+	for_each_engine(engine, guc, i)
 		ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
 
 	/* GuC scheduling policies */
@@ -884,7 +881,7 @@ static void guc_create_ads(struct intel_guc *guc)
 	/* MMIO reg state */
 	reg_state = (void *)policies + sizeof(struct guc_policies);
 
-	for_each_engine(engine, dev_priv, i) {
+	for_each_engine(engine, guc, i) {
 		reg_state->mmio_white_list[engine->guc_id].mmio_start =
 			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
 
-- 
2.8.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 5/6] drm/i915: Teach to_i915() how to extract drm_i915_private from requests
  2016-03-18 21:16 [PATCH 1/6] drm/i915: Rename the magic polymorphic macro __I915__ Chris Wilson
                   ` (2 preceding siblings ...)
  2016-03-18 21:16 ` [PATCH 4/6] drm/i915: Use to_i915() instead of guc_to_i915() Chris Wilson
@ 2016-03-18 21:16 ` Chris Wilson
  2016-03-18 21:16 ` [PATCH 6/6] drm/i915: Teach to_i915() how to extract drm_i915_private from engines Chris Wilson
  2016-03-21 12:13 ` ✗ Fi.CI.BAT: warning for series starting with [1/6] drm/i915: Rename the magic polymorphic macro __I915__ Patchwork
  5 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2016-03-18 21:16 UTC (permalink / raw)
  To: intel-gfx

Since drm_i915_gem_request already contains a backpointer to
drm_i915_private, this is a fairly trivial operation. However, using a
consistent interface does lean convenience to when we need to query
device properties, for example.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |  8 ++++++
 drivers/gpu/drm/i915/i915_gem.c            | 16 +++++-------
 drivers/gpu/drm/i915/i915_gem_context.c    | 21 ++++++++--------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  3 +--
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  3 +--
 drivers/gpu/drm/i915/intel_lrc.c           | 24 ++++++++----------
 drivers/gpu/drm/i915/intel_mocs.c          |  2 +-
 drivers/gpu/drm/i915/intel_pm.c            |  3 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 40 +++++++++++++-----------------
 9 files changed, 56 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d5fa42c96110..d6840d380ca5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2293,6 +2293,12 @@ struct drm_i915_gem_request {
 
 };
 
+static inline struct drm_i915_private *
+__request_to_i915(struct drm_i915_gem_request *request)
+{
+	return request->i915;
+}
+
 struct drm_i915_gem_request * __must_check
 i915_gem_request_alloc(struct intel_engine_cs *engine,
 		       struct intel_context *ctx);
@@ -2465,6 +2471,8 @@ struct drm_i915_cmd_table {
 		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
 	else if (__builtin_types_compatible_p(typeof(*p), struct intel_guc)) \
 		__p = __guc_to_i915((struct intel_guc *)p); \
+	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_request)) \
+		__p = __request_to_i915((struct drm_i915_gem_request *)(p)); \
 	else \
 		BUILD_BUG(); \
 	__p; \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 710a6bbc985e..7e98cf884972 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1455,18 +1455,16 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
 int
 i915_wait_request(struct drm_i915_gem_request *req)
 {
-	struct drm_device *dev;
 	struct drm_i915_private *dev_priv;
 	bool interruptible;
 	int ret;
 
 	BUG_ON(req == NULL);
 
-	dev = req->engine->dev;
-	dev_priv = dev->dev_private;
+	dev_priv = to_i915(req);
 	interruptible = dev_priv->mm.interruptible;
 
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
 	if (ret)
@@ -2674,13 +2672,13 @@ void i915_gem_request_free(struct kref *req_ref)
 		i915_gem_request_remove_from_client(req);
 
 	if (ctx) {
-		if (i915.enable_execlists && ctx != req->i915->kernel_context)
+		if (i915.enable_execlists && ctx != to_i915(req)->kernel_context)
 			intel_lr_context_unpin(ctx, req->engine);
 
 		i915_gem_context_unreference(ctx);
 	}
 
-	kmem_cache_free(req->i915->requests, req);
+	kmem_cache_free(to_i915(req)->requests, req);
 }
 
 static inline int
@@ -4692,12 +4690,10 @@ err:
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+	u32 *remap_info = to_i915(req)->l3_parity.remap_info[slice];
 	int i, ret;
 
-	if (!HAS_L3_DPF(dev) || !remap_info)
+	if (!HAS_L3_DPF(req) || !remap_info)
 		return 0;
 
 	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a8afd0cee7f7..ccaa106f6936 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -515,7 +515,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
 		i915_semaphore_is_enabled(engine->dev) ?
-		hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
+		hweight32(INTEL_INFO(req)->ring_mask) - 1 :
 		0;
 	int len, i, ret;
 
@@ -524,21 +524,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * explicitly, so we rely on the value at ring init, stored in
 	 * itlb_before_ctx_switch.
 	 */
-	if (IS_GEN6(engine->dev)) {
+	if (IS_GEN6(req)) {
 		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
 	/* These flags are for resource streamer on HSW+ */
-	if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
+	if (IS_HASWELL(req) || INTEL_INFO(req)->gen >= 8)
 		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
-	else if (INTEL_INFO(engine->dev)->gen < 8)
+	else if (INTEL_INFO(req)->gen < 8)
 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
 	len = 4;
-	if (INTEL_INFO(engine->dev)->gen >= 7)
+	if (INTEL_INFO(req)->gen >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
 	ret = intel_ring_begin(req, len);
@@ -546,14 +546,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 		return ret;
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-	if (INTEL_INFO(engine->dev)->gen >= 7) {
+	if (INTEL_INFO(req)->gen >= 7) {
 		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
 			intel_ring_emit(engine,
 					MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_engine(signaller, engine->dev, i) {
+			for_each_engine(signaller, req, i) {
 				if (signaller == engine)
 					continue;
 
@@ -576,13 +576,13 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 */
 	intel_ring_emit(engine, MI_NOOP);
 
-	if (INTEL_INFO(engine->dev)->gen >= 7) {
+	if (INTEL_INFO(req)->gen >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
 			intel_ring_emit(engine,
 					MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_engine(signaller, engine->dev, i) {
+			for_each_engine(signaller, req, i) {
 				if (signaller == engine)
 					continue;
 
@@ -829,10 +829,9 @@ unpin_out:
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct drm_i915_private *dev_priv = req->i915;
 
 	WARN_ON(i915.enable_execlists);
-	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&to_i915(req)->dev->struct_mutex));
 
 	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
 		if (req->ctx != engine->last_context) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 39ed403b9de3..28614c4ecbc5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1096,7 +1096,6 @@ void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -1123,7 +1122,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 			i915_gem_request_assign(&obj->last_fenced_req, req);
 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				struct drm_i915_private *dev_priv = to_i915(engine->dev);
+				struct drm_i915_private *dev_priv = to_i915(req);
 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
 					       &dev_priv->mm.fence_list);
 			}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6447a5f9661e..8eb64f5ed78c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2192,8 +2192,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 
 int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
 {
-	struct drm_i915_private *dev_priv = req->i915;
-	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+	struct i915_hw_ppgtt *ppgtt = to_i915(req)->mm.aliasing_ppgtt;
 
 	if (i915.enable_execlists)
 		return 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3a23b9549f7b..0789f4581f7d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -361,8 +361,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
 {
 
 	struct intel_engine_cs *engine = rq0->engine;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(rq0);
 	uint64_t desc[2];
 
 	if (rq1) {
@@ -616,7 +615,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
 	struct drm_i915_gem_request *cursor;
 	int num_elements = 0;
 
-	if (request->ctx != request->i915->kernel_context)
+	if (request->ctx != to_i915(request)->kernel_context)
 		intel_lr_context_pin(request->ctx, engine);
 
 	i915_gem_request_reference(request);
@@ -712,14 +711,14 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 		 * going any further, as the i915_add_request() call
 		 * later on mustn't fail ...
 		 */
-		struct intel_guc *guc = &request->i915->guc;
+		struct intel_guc *guc = &to_i915(request)->guc;
 
 		ret = i915_guc_wq_check_space(guc->execbuf_client);
 		if (ret)
 			return ret;
 	}
 
-	if (request->ctx != request->i915->kernel_context)
+	if (request->ctx != to_i915(request)->kernel_context)
 		ret = intel_lr_context_pin(request->ctx, request->engine);
 
 	return ret;
@@ -780,7 +779,7 @@ static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct drm_i915_private *dev_priv = request->i915;
+	struct drm_i915_private *dev_priv = to_i915(request);
 	struct intel_engine_cs *engine = request->engine;
 
 	intel_logical_ring_advance(ringbuf);
@@ -802,7 +801,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	if (engine->last_context != request->ctx) {
 		if (engine->last_context)
 			intel_lr_context_unpin(engine->last_context, engine);
-		if (request->ctx != request->i915->kernel_context) {
+		if (request->ctx != to_i915(request)->kernel_context) {
 			intel_lr_context_pin(request->ctx, engine);
 			engine->last_context = request->ctx;
 		} else {
@@ -898,7 +897,7 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int ret;
 
 	WARN_ON(req == NULL);
-	dev_priv = req->i915;
+	dev_priv = to_i915(req);
 
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
@@ -1042,7 +1041,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
 		struct drm_i915_gem_object *ctx_obj =
 				ctx->engine[engine->id].state;
 
-		if (ctx_obj && (ctx != req->i915->kernel_context))
+		if (ctx_obj && (ctx != to_i915(req)->kernel_context))
 			intel_lr_context_unpin(ctx, engine);
 
 		list_del(&req->execlist_link);
@@ -1177,8 +1176,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	int ret, i;
 	struct intel_engine_cs *engine = req->engine;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(req);
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (w->count == 0)
@@ -1690,8 +1688,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 	 * not needed in 48-bit.*/
 	if (req->ctx->ppgtt &&
 	    (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
-		if (!USES_FULL_48BIT_PPGTT(req->i915) &&
-		    !intel_vgpu_active(req->i915->dev)) {
+		if (!USES_FULL_48BIT_PPGTT(req) &&
+		    !intel_vgpu_active(to_i915(req)->dev)) {
 			ret = intel_logical_ring_emit_pdps(req);
 			if (ret)
 				return ret;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 45200b93e9bb..9aa3d8750d97 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -327,7 +327,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
 		enum intel_engine_id ring_id;
 
 		/* Program the control registers */
-		for_each_engine(engine, req->i915, ring_id) {
+		for_each_engine(engine, req, ring_id) {
 			ret = emit_mocs_control_table(req, &t, ring_id);
 			if (ret)
 				return ret;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 521cf4564329..46e2fdda6101 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7385,8 +7385,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
 	struct drm_i915_gem_request *req = boost->req;
 
 	if (!i915_gem_request_completed(req, true))
-		gen6_rps_boost(to_i915(req->engine->dev), NULL,
-			       req->emitted_jiffies);
+		gen6_rps_boost(to_i915(req), NULL, req->emitted_jiffies);
 
 	i915_gem_request_unreference__unlocked(req);
 	kfree(boost);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f981bddc9bbf..8a293a25588c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -107,7 +107,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	flush_domains)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct drm_device *dev = engine->dev;
 	u32 cmd;
 	int ret;
 
@@ -146,7 +145,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		cmd |= MI_EXE_FLUSH;
 
 	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-	    (IS_G4X(dev) || IS_GEN5(dev)))
+	    (IS_G4X(req) || IS_GEN5(req)))
 		cmd |= MI_INVALIDATE_ISP;
 
 	ret = intel_ring_begin(req, 2);
@@ -705,8 +704,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
 	struct intel_engine_cs *engine = req->engine;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(req);
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (w->count == 0)
@@ -1272,12 +1270,11 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 8
 	struct intel_engine_cs *signaller = signaller_req->engine;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(signaller_req);
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1313,12 +1310,11 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 6
 	struct intel_engine_cs *signaller = signaller_req->engine;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(signaller_req);
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1351,13 +1347,12 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
 	struct intel_engine_cs *signaller = signaller_req->engine;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(signaller_req);
 	struct intel_engine_cs *useless;
 	int i, ret, num_rings;
 
 #define MBOX_UPDATE_DWORDS 3
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
@@ -1437,7 +1432,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       u32 seqno)
 {
 	struct intel_engine_cs *waiter = waiter_req->engine;
-	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(waiter_req);
 	int ret;
 
 	ret = intel_ring_begin(waiter_req, 4);
@@ -2372,8 +2367,8 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 
 	/* Make sure we do not trigger any retires */
 	return __i915_wait_request(req,
-				   atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter),
-				   to_i915(engine->dev)->mm.interruptible,
+				   atomic_read(&to_i915(req)->gpu_error.reset_counter),
+				   to_i915(req)->mm.interruptible,
 				   NULL, NULL);
 }
 
@@ -2501,7 +2496,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
 
 	WARN_ON(req == NULL);
 	engine = req->engine;
-	dev_priv = req->i915;
+	dev_priv = to_i915(req);
 
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
@@ -2600,7 +2595,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (INTEL_INFO(engine->dev)->gen >= 8)
+	if (INTEL_INFO(req)->gen >= 8)
 		cmd += 1;
 
 	/* We always require a command barrier so that subsequent
@@ -2622,7 +2617,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 	intel_ring_emit(engine, cmd);
 	intel_ring_emit(engine,
 			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	if (INTEL_INFO(engine->dev)->gen >= 8) {
+	if (INTEL_INFO(req)->gen >= 8) {
 		intel_ring_emit(engine, 0); /* upper addr */
 		intel_ring_emit(engine, 0); /* value */
 	} else  {
@@ -2639,7 +2634,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      unsigned dispatch_flags)
 {
 	struct intel_engine_cs *engine = req->engine;
-	bool ppgtt = USES_PPGTT(engine->dev) &&
+	bool ppgtt = USES_PPGTT(req) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -2713,7 +2708,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct drm_device *dev = engine->dev;
 	uint32_t cmd;
 	int ret;
 
@@ -2722,7 +2716,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (INTEL_INFO(dev)->gen >= 8)
+	if (INTEL_INFO(req)->gen >= 8)
 		cmd += 1;
 
 	/* We always require a command barrier so that subsequent
@@ -2743,7 +2737,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
 	intel_ring_emit(engine, cmd);
 	intel_ring_emit(engine,
 			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(req)->gen >= 8) {
 		intel_ring_emit(engine, 0); /* upper addr */
 		intel_ring_emit(engine, 0); /* value */
 	} else  {
-- 
2.8.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 6/6] drm/i915: Teach to_i915() how to extract drm_i915_private from engines
  2016-03-18 21:16 [PATCH 1/6] drm/i915: Rename the magic polymorphic macro __I915__ Chris Wilson
                   ` (3 preceding siblings ...)
  2016-03-18 21:16 ` [PATCH 5/6] drm/i915: Teach to_i915() how to extract drm_i915_private from requests Chris Wilson
@ 2016-03-18 21:16 ` Chris Wilson
  2016-03-21 12:13 ` ✗ Fi.CI.BAT: warning for series starting with [1/6] drm/i915: Rename the magic polymorphic macro __I915__ Patchwork
  5 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2016-03-18 21:16 UTC (permalink / raw)
  To: intel-gfx

This is primarily intended to simplify later patches that add various
backpointers to the structs, but in the meantime we can enjoy various
little syntactic conveniences.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_cmd_parser.c     |  12 +-
 drivers/gpu/drm/i915/i915_drv.h            |   9 ++
 drivers/gpu/drm/i915/i915_gem.c            |   9 +-
 drivers/gpu/drm/i915/i915_gem_context.c    |  19 ++-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   2 +-
 drivers/gpu/drm/i915/i915_irq.c            |  26 ++--
 drivers/gpu/drm/i915/intel_display.c       |   2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 122 ++++++++--------
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 219 +++++++++++++----------------
 drivers/gpu/drm/i915/intel_uncore.c        |   4 +-
 10 files changed, 195 insertions(+), 229 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 2c50142be559..efbba19bb0b2 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -696,12 +696,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 	int cmd_table_count;
 	int ret;
 
-	if (!IS_GEN7(engine->dev))
+	if (!IS_GEN7(engine))
 		return 0;
 
 	switch (engine->id) {
 	case RCS:
-		if (IS_HASWELL(engine->dev)) {
+		if (IS_HASWELL(engine)) {
 			cmd_tables = hsw_render_ring_cmds;
 			cmd_table_count =
 				ARRAY_SIZE(hsw_render_ring_cmds);
@@ -713,7 +713,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 		engine->reg_table = gen7_render_regs;
 		engine->reg_count = ARRAY_SIZE(gen7_render_regs);
 
-		if (IS_HASWELL(engine->dev)) {
+		if (IS_HASWELL(engine)) {
 			engine->master_reg_table = hsw_master_regs;
 			engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
 		} else {
@@ -729,7 +729,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 		engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
 		break;
 	case BCS:
-		if (IS_HASWELL(engine->dev)) {
+		if (IS_HASWELL(engine)) {
 			cmd_tables = hsw_blt_ring_cmds;
 			cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
 		} else {
@@ -740,7 +740,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 		engine->reg_table = gen7_blt_regs;
 		engine->reg_count = ARRAY_SIZE(gen7_blt_regs);
 
-		if (IS_HASWELL(engine->dev)) {
+		if (IS_HASWELL(engine)) {
 			engine->master_reg_table = hsw_master_regs;
 			engine->master_reg_count = ARRAY_SIZE(hsw_master_regs);
 		} else {
@@ -968,7 +968,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
 	if (!engine->needs_cmd_parser)
 		return false;
 
-	if (!USES_PPGTT(engine->dev))
+	if (!USES_PPGTT(engine))
 		return false;
 
 	return (i915.enable_cmd_parser == 1);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d6840d380ca5..85c8d93cbb0f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1987,6 +1987,11 @@ static inline struct drm_i915_private *__guc_to_i915(struct intel_guc *guc)
 	return container_of(guc, struct drm_i915_private, guc);
 }
 
+static inline struct drm_i915_private *__engine_to_i915(struct intel_engine_cs *engine)
+{
+	return __to_i915(engine->dev);
+}
+
 /* Iterate over initialised rings */
 #define for_each_engine(ring__, ptr__, i__) \
 	for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
@@ -2473,6 +2478,10 @@ struct drm_i915_cmd_table {
 		__p = __guc_to_i915((struct intel_guc *)p); \
 	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_request)) \
 		__p = __request_to_i915((struct drm_i915_gem_request *)(p)); \
+	else if (__builtin_types_compatible_p(typeof(*p), struct intel_engine_cs)) \
+		__p = __engine_to_i915((struct intel_engine_cs *)(p)); \
+	else if (__builtin_types_compatible_p(typeof(*p), struct intel_ringbuffer)) \
+		__p = __engine_to_i915(((struct intel_ringbuffer *)(p))->engine); \
 	else \
 		BUILD_BUG(); \
 	__p; \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7e98cf884972..045c609e8e9e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1242,8 +1242,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 			struct intel_rps_client *rps)
 {
 	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(req);
 	const bool irq_test_in_progress =
 		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -2545,7 +2544,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 		return;
 
 	engine = request->engine;
-	dev_priv = request->i915;
+	dev_priv = to_i915(request);
 	ringbuf = request->ringbuf;
 
 	/*
@@ -2686,7 +2685,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
 			 struct intel_context *ctx,
 			 struct drm_i915_gem_request **req_out)
 {
-	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	struct drm_i915_gem_request *req;
 	int ret;
 
@@ -2767,7 +2766,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	int err;
 
 	if (ctx == NULL)
-		ctx = to_i915(engine->dev)->kernel_context;
+		ctx = to_i915(engine)->kernel_context;
 	err = __i915_gem_request_alloc(engine, ctx, &req);
 	return err ? ERR_PTR(err) : req;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index ccaa106f6936..8ee8eff055d5 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -107,11 +107,11 @@ static size_t get_context_alignment(struct drm_device *dev)
 
 static int get_context_size(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ret;
 	u32 reg;
 
-	switch (INTEL_INFO(dev)->gen) {
+	switch (INTEL_INFO(dev_priv)->gen) {
 	case 6:
 		reg = I915_READ(CXT_SIZE);
 		ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
@@ -617,12 +617,12 @@ static inline bool should_skip_switch(struct intel_engine_cs *engine,
 static bool
 needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
 	if (!to->ppgtt)
 		return false;
 
-	if (INTEL_INFO(engine->dev)->gen < 8)
+	if (INTEL_INFO(dev_priv)->gen < 8)
 		return true;
 
 	if (engine != &dev_priv->engine[RCS])
@@ -635,12 +635,12 @@ static bool
 needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
 		   u32 hw_flags)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
 	if (!to->ppgtt)
 		return false;
 
-	if (!IS_GEN8(engine->dev))
+	if (!IS_GEN8(dev_priv))
 		return false;
 
 	if (engine != &dev_priv->engine[RCS])
@@ -656,13 +656,12 @@ static int do_switch(struct drm_i915_gem_request *req)
 {
 	struct intel_context *to = req->ctx;
 	struct intel_engine_cs *engine = req->engine;
-	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_context *from = engine->last_context;
 	u32 hw_flags = 0;
 	bool uninitialized = false;
 	int ret, i;
 
-	if (from != NULL && engine == &dev_priv->engine[RCS]) {
+	if (from != NULL && engine->id == RCS) {
 		BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
 		BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
 	}
@@ -671,7 +670,7 @@ static int do_switch(struct drm_i915_gem_request *req)
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
-	if (engine == &dev_priv->engine[RCS]) {
+	if (engine->id == RCS) {
 		ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
 					    get_context_alignment(engine->dev),
 					    0);
@@ -700,7 +699,7 @@ static int do_switch(struct drm_i915_gem_request *req)
 		to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
 	}
 
-	if (engine != &dev_priv->engine[RCS]) {
+	if (engine->id != RCS) {
 		if (from)
 			i915_gem_context_unreference(from);
 		goto done;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 28614c4ecbc5..f093fd40c07a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -720,7 +720,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
 	struct i915_address_space *vm;
 	struct list_head ordered_vmas;
 	struct list_head pinned_vmas;
-	bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
+	bool has_fenced_gpu_access = INTEL_INFO(engine)->gen < 4;
 	int retry;
 
 	i915_gem_retire_requests_ring(engine);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8f3e3309c3ab..2a376e49e153 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2827,12 +2827,11 @@ static struct intel_engine_cs *
 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
 				 u64 offset)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 	struct intel_engine_cs *signaller;
 	int i;
 
-	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
-		for_each_engine(signaller, dev_priv, i) {
+	if (INTEL_INFO(engine)->gen >= 8) {
+		for_each_engine(signaller, engine, i) {
 			if (engine == signaller)
 				continue;
 
@@ -2842,7 +2841,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
 	} else {
 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 
-		for_each_engine(signaller, dev_priv, i) {
+		for_each_engine(signaller, engine, i) {
 			if(engine == signaller)
 				continue;
 
@@ -2860,7 +2859,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
 static struct intel_engine_cs *
 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	u32 cmd, ipehr, head;
 	u64 offset = 0;
 	int i, backwards;
@@ -2898,7 +2897,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 	 * ringbuffer itself.
 	 */
 	head = I915_READ_HEAD(engine) & HEAD_ADDR;
-	backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
+	backwards = (INTEL_INFO(engine)->gen >= 8) ? 5 : 4;
 
 	for (i = backwards; i; --i) {
 		/*
@@ -2920,7 +2919,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 		return NULL;
 
 	*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
-	if (INTEL_INFO(engine->dev)->gen >= 8) {
+	if (INTEL_INFO(engine)->gen >= 8) {
 		offset = ioread32(engine->buffer->virtual_start + head + 12);
 		offset <<= 32;
 		offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2930,7 +2929,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 
 static int semaphore_passed(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	struct intel_engine_cs *signaller;
 	u32 seqno;
 
@@ -3014,8 +3013,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
 static enum intel_ring_hangcheck_action
 ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	enum intel_ring_hangcheck_action ha;
 	u32 tmp;
 
@@ -3023,7 +3021,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 	if (ha != HANGCHECK_HUNG)
 		return ha;
 
-	if (IS_GEN2(dev))
+	if (IS_GEN2(engine))
 		return HANGCHECK_HUNG;
 
 	/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3033,19 +3031,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 	 */
 	tmp = I915_READ_CTL(engine);
 	if (tmp & RING_WAIT) {
-		i915_handle_error(dev, false,
+		i915_handle_error(engine->dev, false,
 				  "Kicking stuck wait on %s",
 				  engine->name);
 		I915_WRITE_CTL(engine, tmp);
 		return HANGCHECK_KICK;
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+	if (INTEL_INFO(engine)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
 		switch (semaphore_passed(engine)) {
 		default:
 			return HANGCHECK_HUNG;
 		case 1:
-			i915_handle_error(dev, false,
+			i915_handle_error(engine->dev, false,
 					  "Kicking stuck semaphore on %s",
 					  engine->name);
 			I915_WRITE_CTL(engine, tmp);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 74b0165238dc..d6191b3c53c7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11231,7 +11231,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
 	if (engine == NULL)
 		return true;
 
-	if (INTEL_INFO(engine->dev)->gen < 5)
+	if (INTEL_INFO(engine)->gen < 5)
 		return false;
 
 	if (i915.use_mmio_flip < 0)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0789f4581f7d..5e3246b05a1e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -268,19 +268,18 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
 static void
 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-
-	if (IS_GEN8(dev) || IS_GEN9(dev))
+	if (IS_GEN8(engine) || IS_GEN9(engine))
 		engine->idle_lite_restore_wa = ~0;
 
-	engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-					IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
-					(engine->id == VCS || engine->id == VCS2);
+	engine->disable_lite_restore_wa =
+		(IS_SKL_REVID(engine, 0, SKL_REVID_B0) ||
+		 IS_BXT_REVID(engine, 0, BXT_REVID_A1)) &&
+		(engine->id == VCS || engine->id == VCS2);
 
 	engine->ctx_desc_template = GEN8_CTX_VALID;
-	engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
-				   GEN8_CTX_ADDRESSING_MODE_SHIFT;
-	if (IS_GEN8(dev))
+	engine->ctx_desc_template |=
+	       	GEN8_CTX_ADDRESSING_MODE(engine) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
+	if (IS_GEN8(engine))
 		engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
 	engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
 
@@ -397,9 +396,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 
 static void execlists_update_context(struct drm_i915_gem_request *rq)
 {
-	struct intel_engine_cs *engine = rq->engine;
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
+	uint32_t *reg_state = rq->ctx->engine[rq->engine->id].lrc_reg_state;
 
 	reg_state[CTX_RING_TAIL+1] = rq->tail;
 
@@ -408,14 +406,14 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
 	 * PML4 is allocated during ppgtt init, so this is not needed
 	 * in 48-bit mode.
 	 */
-	if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+	if (ppgtt && !USES_FULL_48BIT_PPGTT(rq))
 		execlists_update_context_pdps(ppgtt, reg_state);
 }
 
 static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
 				      struct drm_i915_gem_request *rq1)
 {
-	struct drm_i915_private *dev_priv = rq0->i915;
+	struct drm_i915_private *dev_priv = to_i915(rq0);
 
 	/* BUG_ON(!irqs_disabled());  */
 
@@ -444,7 +442,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
 	 * If irqs are not active generate a warning as batches that finish
 	 * without the irqs may get lost and a GPU Hang may occur.
 	 */
-	WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
+	WARN_ON(!intel_irqs_enabled(to_i915(engine)));
 
 	/* Try to read in pairs */
 	list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -519,7 +517,7 @@ static u32
 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
 		   u32 *context_id)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	u32 status;
 
 	read_pointer %= GEN8_CSB_ENTRIES;
@@ -544,7 +542,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
  */
 void intel_lrc_irq_handler(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	u32 status_pointer;
 	unsigned int read_pointer, write_pointer;
 	u32 csb[GEN8_CSB_ENTRIES][2];
@@ -1051,14 +1049,14 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
 
 void intel_logical_ring_stop(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	int ret;
 
 	if (!intel_engine_initialized(engine))
 		return;
 
 	ret = intel_engine_idle(engine);
-	if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
+	if (ret && !i915_reset_in_progress(&to_i915(engine)->gpu_error))
 		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
 			  engine->name, ret);
 
@@ -1090,8 +1088,7 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 static int intel_lr_context_do_pin(struct intel_context *ctx,
 				   struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = ctx->i915;
 	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
 	struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
 	struct page *lrc_state_page;
@@ -1248,7 +1245,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
 	 * this batch updates GEN8_L3SQCREG4 with default value we need to
 	 * set this bit here to retain the WA during flush.
 	 */
-	if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_E0))
 		l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
 	wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1337,7 +1334,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
 	wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
 	/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
-	if (IS_BROADWELL(engine->dev)) {
+	if (IS_BROADWELL(engine)) {
 		int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
 		if (rc < 0)
 			return rc;
@@ -1409,12 +1406,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
 				    uint32_t *offset)
 {
 	int ret;
-	struct drm_device *dev = engine->dev;
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
 	/* WaDisableCtxRestoreArbitration:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_D0) ||
+	    IS_BXT_REVID(engine, 0, BXT_REVID_A1))
 		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
 	/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1435,12 +1431,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 			       uint32_t *const batch,
 			       uint32_t *offset)
 {
-	struct drm_device *dev = engine->dev;
 	uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
 	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_B0) ||
+	    IS_BXT_REVID(engine, 0, BXT_REVID_A1)) {
 		wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
 		wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
 		wa_ctx_emit(batch, index,
@@ -1449,8 +1444,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
 	}
 
 	/* WaDisableCtxRestoreArbitration:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_D0) ||
+	    IS_BXT_REVID(engine, 0, BXT_REVID_A1))
 		wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 
 	wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1500,9 +1495,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	WARN_ON(engine->id != RCS);
 
 	/* update this when WA for higher Gen are added */
-	if (INTEL_INFO(engine->dev)->gen > 9) {
+	if (INTEL_INFO(engine)->gen > 9) {
 		DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
-			  INTEL_INFO(engine->dev)->gen);
+			  INTEL_INFO(engine)->gen);
 		return 0;
 	}
 
@@ -1522,7 +1517,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 	batch = kmap_atomic(page);
 	offset = 0;
 
-	if (INTEL_INFO(engine->dev)->gen == 8) {
+	if (INTEL_INFO(engine)->gen == 8) {
 		ret = gen8_init_indirectctx_bb(engine,
 					       &wa_ctx->indirect_ctx,
 					       batch,
@@ -1536,7 +1531,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
 					  &offset);
 		if (ret)
 			goto out;
-	} else if (INTEL_INFO(engine->dev)->gen == 9) {
+	} else if (INTEL_INFO(engine)->gen == 9) {
 		ret = gen9_init_indirectctx_bb(engine,
 					       &wa_ctx->indirect_ctx,
 					       batch,
@@ -1562,8 +1557,7 @@ out:
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned int next_context_status_buffer_hw;
 
 	lrc_setup_hardware_status_page(engine,
@@ -1611,8 +1605,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	int ret;
 
 	ret = gen8_init_common_ring(engine);
@@ -1717,8 +1710,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 
 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1737,8 +1729,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
 
 static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1755,8 +1746,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 {
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *engine = ringbuf->engine;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(request);
 	uint32_t cmd;
 	int ret;
 
@@ -1824,7 +1814,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 		 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
 		 * pipe control.
 		 */
-		if (IS_GEN9(engine->dev))
+		if (IS_GEN9(request))
 			vf_flush_wa = true;
 	}
 
@@ -2014,7 +2004,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 	if (!intel_engine_initialized(engine))
 		return;
 
-	dev_priv = engine->dev->dev_private;
+	dev_priv = to_i915(engine);
 
 	if (engine->buffer) {
 		intel_logical_ring_stop(engine);
@@ -2292,7 +2282,7 @@ cleanup_render_ring:
 }
 
 static u32
-make_rpcs(struct drm_device *dev)
+make_rpcs(struct intel_engine_cs *engine)
 {
 	u32 rpcs = 0;
 
@@ -2300,7 +2290,7 @@ make_rpcs(struct drm_device *dev)
 	 * No explicit RPCS request is needed to ensure full
 	 * slice/subslice/EU enablement prior to Gen9.
 	*/
-	if (INTEL_INFO(dev)->gen < 9)
+	if (INTEL_INFO(engine)->gen < 9)
 		return 0;
 
 	/*
@@ -2309,24 +2299,24 @@ make_rpcs(struct drm_device *dev)
 	 * must make an explicit request through RPCS for full
 	 * enablement.
 	*/
-	if (INTEL_INFO(dev)->has_slice_pg) {
+	if (INTEL_INFO(engine)->has_slice_pg) {
 		rpcs |= GEN8_RPCS_S_CNT_ENABLE;
-		rpcs |= INTEL_INFO(dev)->slice_total <<
+		rpcs |= INTEL_INFO(engine)->slice_total <<
 			GEN8_RPCS_S_CNT_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
 
-	if (INTEL_INFO(dev)->has_subslice_pg) {
+	if (INTEL_INFO(engine)->has_subslice_pg) {
 		rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
-		rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+		rpcs |= INTEL_INFO(engine)->subslice_per_slice <<
 			GEN8_RPCS_SS_CNT_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
 
-	if (INTEL_INFO(dev)->has_eu_pg) {
-		rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+	if (INTEL_INFO(engine)->has_eu_pg) {
+		rpcs |= INTEL_INFO(engine)->eu_per_subslice <<
 			GEN8_RPCS_EU_MIN_SHIFT;
-		rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+		rpcs |= INTEL_INFO(engine)->eu_per_subslice <<
 			GEN8_RPCS_EU_MAX_SHIFT;
 		rpcs |= GEN8_RPCS_ENABLE;
 	}
@@ -2338,9 +2328,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 {
 	u32 indirect_ctx_offset;
 
-	switch (INTEL_INFO(engine->dev)->gen) {
+	switch (INTEL_INFO(engine)->gen) {
 	default:
-		MISSING_CASE(INTEL_INFO(engine->dev)->gen);
+		MISSING_CASE(INTEL_INFO(engine)->gen);
 		/* fall through */
 	case 9:
 		indirect_ctx_offset =
@@ -2360,8 +2350,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 		    struct intel_engine_cs *engine,
 		    struct intel_ringbuffer *ringbuf)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = ctx->i915;
 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
 	struct page *page;
 	uint32_t *reg_state;
@@ -2400,7 +2389,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 		       RING_CONTEXT_CONTROL(engine),
 		       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
 					  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
-					  (HAS_RESOURCE_STREAMER(dev) ?
+					  (HAS_RESOURCE_STREAMER(engine) ?
 					    CTX_CTRL_RS_CTX_ENABLE : 0)));
 	ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
 		       0);
@@ -2471,7 +2460,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 	ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
 		       0);
 
-	if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+	if (USES_FULL_48BIT_PPGTT(engine)) {
 		/* 64b PPGTT (48bit canonical)
 		 * PDP0_DESCRIPTOR contains the base address to PML4 and
 		 * other PDP Descriptors are ignored.
@@ -2489,7 +2478,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 	if (engine->id == RCS) {
 		reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
 		ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
-			       make_rpcs(dev));
+			       make_rpcs(engine));
 	}
 
 	kunmap_atomic(reg_state);
@@ -2546,11 +2535,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
 {
 	int ret = 0;
 
-	WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
+	WARN_ON(INTEL_INFO(engine)->gen < 8);
 
 	switch (engine->id) {
 	case RCS:
-		if (INTEL_INFO(engine->dev)->gen >= 9)
+		if (INTEL_INFO(engine)->gen >= 9)
 			ret = GEN9_LR_CONTEXT_RENDER_SIZE;
 		else
 			ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2569,7 +2558,7 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
 static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
 					   struct drm_i915_gem_object *default_ctx_obj)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	struct page *page;
 
 	/* The HWSP is part of the default context object in LRC mode. */
@@ -2601,7 +2590,6 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
 				    struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
 	struct drm_i915_gem_object *ctx_obj;
 	uint32_t context_size;
 	struct intel_ringbuffer *ringbuf;
@@ -2615,7 +2603,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
 	/* One extra page as the sharing data between driver and GuC */
 	context_size += PAGE_SIZE * LRC_PPHWSP_PN;
 
-	ctx_obj = i915_gem_alloc_object(dev, context_size);
+	ctx_obj = i915_gem_alloc_object(engine->dev, context_size);
 	if (!ctx_obj) {
 		DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8a293a25588c..5173d52b76de 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -61,7 +61,7 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf)
 
 bool intel_engine_stopped(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
 }
 
@@ -431,19 +431,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
 static void ring_write_tail(struct intel_engine_cs *engine,
 			    u32 value)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	I915_WRITE_TAIL(engine, value);
 }
 
 u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	u64 acthd;
 
-	if (INTEL_INFO(engine->dev)->gen >= 8)
+	if (INTEL_INFO(engine)->gen >= 8)
 		acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
 					 RING_ACTHD_UDW(engine->mmio_base));
-	else if (INTEL_INFO(engine->dev)->gen >= 4)
+	else if (INTEL_INFO(engine)->gen >= 4)
 		acthd = I915_READ(RING_ACTHD(engine->mmio_base));
 	else
 		acthd = I915_READ(ACTHD);
@@ -453,25 +453,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	u32 addr;
 
 	addr = dev_priv->status_page_dmah->busaddr;
-	if (INTEL_INFO(engine->dev)->gen >= 4)
+	if (INTEL_INFO(engine)->gen >= 4)
 		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
 	I915_WRITE(HWS_PGA, addr);
 }
 
 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	i915_reg_t mmio;
 
 	/* The ring status page addresses are no longer next to the rest of
 	 * the ring registers as of gen7.
 	 */
-	if (IS_GEN7(dev)) {
+	if (IS_GEN7(engine)) {
 		switch (engine->id) {
 		case RCS:
 			mmio = RENDER_HWS_PGA_GEN7;
@@ -491,7 +490,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 			mmio = VEBOX_HWS_PGA_GEN7;
 			break;
 		}
-	} else if (IS_GEN6(engine->dev)) {
+	} else if (IS_GEN6(engine)) {
 		mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
 	} else {
 		/* XXX: gen8 returns to sanity */
@@ -508,7 +507,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 	 * arises: do we still need this and if so how should we go about
 	 * invalidating the TLB?
 	 */
-	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+	if (INTEL_INFO(engine)->gen >= 6 && INTEL_INFO(engine)->gen < 8) {
 		i915_reg_t reg = RING_INSTPM(engine->mmio_base);
 
 		/* ring should be idle before issuing a sync flush*/
@@ -526,9 +525,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 
 static bool stop_ring(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
-	if (!IS_GEN2(engine->dev)) {
+	if (!IS_GEN2(engine)) {
 		I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
 		if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
 			DRM_ERROR("%s : timed out trying to stop ring\n",
@@ -546,7 +545,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
 	I915_WRITE_HEAD(engine, 0);
 	engine->write_tail(engine, 0);
 
-	if (!IS_GEN2(engine->dev)) {
+	if (!IS_GEN2(engine)) {
 		(void)I915_READ_CTL(engine);
 		I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 	}
@@ -556,8 +555,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
 
 static int init_ring_common(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	struct intel_ringbuffer *ringbuf = engine->buffer;
 	struct drm_i915_gem_object *obj = ringbuf->obj;
 	int ret = 0;
@@ -587,7 +585,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
 		}
 	}
 
-	if (I915_NEED_GFX_HWS(dev))
+	if (I915_NEED_GFX_HWS(dev_priv))
 		intel_ring_setup_status_page(engine);
 	else
 		ring_setup_phys_status_page(engine);
@@ -644,12 +642,10 @@ out:
 void
 intel_fini_pipe_control(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-
 	if (engine->scratch.obj == NULL)
 		return;
 
-	if (INTEL_INFO(dev)->gen >= 5) {
+	if (INTEL_INFO(engine)->gen >= 5) {
 		kunmap(sg_page(engine->scratch.obj->pages->sgl));
 		i915_gem_object_ggtt_unpin(engine->scratch.obj);
 	}
@@ -794,7 +790,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
 				 i915_reg_t reg)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	struct i915_workarounds *wa = &dev_priv->workarounds;
 	const uint32_t index = wa->hw_whitelist_count[engine->id];
 
@@ -810,8 +806,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
 
 static int gen8_init_workarounds(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
 	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
@@ -862,9 +857,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
 
 static int bdw_init_workarounds(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	int ret;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ret = gen8_init_workarounds(engine);
 	if (ret)
@@ -884,16 +878,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
 			  /* WaForceContextSaveRestoreNonCoherent:bdw */
 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
 			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-			  (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+			  (IS_BDW_GT3(engine) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 
 	return 0;
 }
 
 static int chv_init_workarounds(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	int ret;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ret = gen8_init_workarounds(engine);
 	if (ret)
@@ -910,8 +903,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
 
 static int gen9_init_workarounds(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	uint32_t tmp;
 	int ret;
 
@@ -934,14 +926,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 			  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
 	/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_B0) ||
+	    IS_BXT_REVID(engine, 0, BXT_REVID_A1))
 		WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
 				  GEN9_DG_MIRROR_FIX_ENABLE);
 
 	/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_B0) ||
+	    IS_BXT_REVID(engine, 0, BXT_REVID_A1)) {
 		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
 				  GEN9_RHWO_OPTIMIZATION_DISABLE);
 		/*
@@ -952,7 +944,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 	}
 
 	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
-	if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
+	if (IS_SKL_REVID(engine, SKL_REVID_C0, REVID_FOREVER) ||
+	    IS_BROXTON(engine))
 		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
 				  GEN9_ENABLE_YV12_BUGFIX);
 
@@ -966,20 +959,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 			  GEN9_CCS_TLB_PREFETCH_ENABLE);
 
 	/* WaDisableMaskBasedCammingInRCC:skl,bxt */
-	if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
-	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_SKL_REVID(engine, SKL_REVID_C0, SKL_REVID_C0) ||
+	    IS_BXT_REVID(engine, 0, BXT_REVID_A1))
 		WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
 				  PIXEL_MASK_CAMMING_DISABLE);
 
 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
 	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-	if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
-	    IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
+	if (IS_SKL_REVID(engine, SKL_REVID_F0, SKL_REVID_F0) ||
+	    IS_BXT_REVID(engine, BXT_REVID_B0, REVID_FOREVER))
 		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
 	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
 
 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
-	if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+	if (IS_SKYLAKE(engine) || IS_BXT_REVID(engine, 0, BXT_REVID_B0))
 		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
 				  GEN8_SAMPLER_POWER_BYPASS_DIS);
 
@@ -1005,8 +998,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 
 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	u8 vals[3] = { 0, 0, 0 };
 	unsigned int i;
 
@@ -1047,9 +1039,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 
 static int skl_init_workarounds(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	int ret;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ret = gen9_init_workarounds(engine);
 	if (ret)
@@ -1060,12 +1051,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
 	 * until D0 which is the default case so this is equivalent to
 	 * !WaDisablePerCtxtPreemptionGranularityControl:skl
 	 */
-	if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+	if (IS_SKL_REVID(engine, SKL_REVID_E0, REVID_FOREVER)) {
 		I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
 			   _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
 	}
 
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_D0)) {
 		/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
 		I915_WRITE(FF_SLICE_CS_CHICKEN2,
 			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1074,23 +1065,23 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
 	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
 	 * involving this register should also be added to WA batch as required.
 	 */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_E0))
 		/* WaDisableLSQCROPERFforOCL:skl */
 		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
 			   GEN8_LQSC_RO_PERF_DIS);
 
 	/* WaEnableGapsTsvCreditFix:skl */
-	if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+	if (IS_SKL_REVID(engine, SKL_REVID_C0, REVID_FOREVER)) {
 		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
 					   GEN9_GAPS_TSV_CREDIT_DISABLE));
 	}
 
 	/* WaDisablePowerCompilerClockGating:skl */
-	if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+	if (IS_SKL_REVID(engine, SKL_REVID_B0, SKL_REVID_B0))
 		WA_SET_BIT_MASKED(HIZ_CHICKEN,
 				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_F0)) {
 		/*
 		 *Use Force Non-Coherent whenever executing a 3D context. This
 		 * is a workaround for a possible hang in the unlikely event
@@ -1106,13 +1097,13 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
 	}
 
 	/* WaBarrierPerformanceFixDisable:skl */
-	if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+	if (IS_SKL_REVID(engine, SKL_REVID_C0, SKL_REVID_D0))
 		WA_SET_BIT_MASKED(HDC_CHICKEN0,
 				  HDC_FENCE_DEST_SLM_DISABLE |
 				  HDC_BARRIER_PERFORMANCE_DISABLE);
 
 	/* WaDisableSbeCacheDispatchPortSharing:skl */
-	if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+	if (IS_SKL_REVID(engine, 0, SKL_REVID_F0))
 		WA_SET_BIT_MASKED(
 			GEN7_HALF_SLICE_CHICKEN1,
 			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1127,9 +1118,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
 
 static int bxt_init_workarounds(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	int ret;
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	ret = gen9_init_workarounds(engine);
 	if (ret)
@@ -1137,11 +1127,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
 
 	/* WaStoreMultiplePTEenable:bxt */
 	/* This is a requirement according to Hardware specification */
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+	if (IS_BXT_REVID(engine, 0, BXT_REVID_A1))
 		I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
 
 	/* WaSetClckGatingDisableMedia:bxt */
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+	if (IS_BXT_REVID(engine, 0, BXT_REVID_A1)) {
 		I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
 					    ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
 	}
@@ -1151,7 +1141,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
 			  STALL_DOP_GATING_DISABLE);
 
 	/* WaDisableSbeCacheDispatchPortSharing:bxt */
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+	if (IS_BXT_REVID(engine, 0, BXT_REVID_B0)) {
 		WA_SET_BIT_MASKED(
 			GEN7_HALF_SLICE_CHICKEN1,
 			GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1161,7 +1151,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
 	/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
 	/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
 	/* WaDisableLSQCROPERFforOCL:bxt */
-	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+	if (IS_BXT_REVID(engine, 0, BXT_REVID_A1)) {
 		ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
 		if (ret)
 			return ret;
@@ -1176,24 +1166,23 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
 
 int init_workarounds_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
 	WARN_ON(engine->id != RCS);
 
 	dev_priv->workarounds.count = 0;
 	dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
-	if (IS_BROADWELL(dev))
+	if (IS_BROADWELL(engine))
 		return bdw_init_workarounds(engine);
 
-	if (IS_CHERRYVIEW(dev))
+	if (IS_CHERRYVIEW(engine))
 		return chv_init_workarounds(engine);
 
-	if (IS_SKYLAKE(dev))
+	if (IS_SKYLAKE(engine))
 		return skl_init_workarounds(engine);
 
-	if (IS_BROXTON(dev))
+	if (IS_BROXTON(engine))
 		return bxt_init_workarounds(engine);
 
 	return 0;
@@ -1201,14 +1190,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
 
 static int init_render_ring(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	int ret = init_ring_common(engine);
 	if (ret)
 		return ret;
 
 	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-	if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+	if (INTEL_INFO(engine)->gen >= 4 && INTEL_INFO(engine)->gen < 7)
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
 	/* We need to disable the AsyncFlip performance optimisations in order
@@ -1217,22 +1205,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
 	 *
 	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
 	 */
-	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+	if (INTEL_INFO(engine)->gen >= 6 && INTEL_INFO(engine)->gen < 8)
 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
 	/* Required for the hardware to program scanline values for waiting */
 	/* WaEnableFlushTlbInvalidationMode:snb */
-	if (INTEL_INFO(dev)->gen == 6)
+	if (INTEL_INFO(engine)->gen == 6)
 		I915_WRITE(GFX_MODE,
 			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
 	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-	if (IS_GEN7(dev))
+	if (IS_GEN7(engine))
 		I915_WRITE(GFX_MODE_GEN7,
 			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
 			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
-	if (IS_GEN6(dev)) {
+	if (IS_GEN6(engine)) {
 		/* From the Sandybridge PRM, volume 1 part 3, page 24:
 		 * "If this bit is set, STCunit will have LRA as replacement
 		 *  policy. [...] This bit must be reset.  LRA replacement
@@ -1242,19 +1230,18 @@ static int init_render_ring(struct intel_engine_cs *engine)
 			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+	if (INTEL_INFO(engine)->gen >= 6 && INTEL_INFO(engine)->gen < 8)
 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-	if (HAS_L3_DPF(dev))
-		I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+	if (HAS_L3_DPF(engine))
+		I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(engine));
 
 	return init_workarounds_ring(engine);
 }
 
 static void render_ring_cleanup(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
 	if (dev_priv->semaphore_obj) {
 		i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1414,8 +1401,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
 					      u32 seqno)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	return dev_priv->last_seqno < seqno;
+	return to_i915(dev)->last_seqno < seqno;
 }
 
 /**
@@ -1562,7 +1548,7 @@ gen6_ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency)
 	 * ivb (and maybe also on snb) by reading from a CS register (like
 	 * ACTHD) before reading the status page. */
 	if (!lazy_coherency) {
-		struct drm_i915_private *dev_priv = engine->dev->dev_private;
+		struct drm_i915_private *dev_priv = to_i915(engine);
 		POSTING_READ(RING_ACTHD(engine->mmio_base));
 	}
 
@@ -1596,8 +1582,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
 static bool
 gen5_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1614,8 +1599,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine)
 static void
 gen5_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1627,8 +1611,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine)
 static bool
 i9xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -1648,8 +1631,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine)
 static void
 i9xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1664,8 +1646,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine)
 static bool
 i8xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	if (!intel_irqs_enabled(dev_priv))
@@ -1685,8 +1666,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine)
 static void
 i8xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1739,8 +1719,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 static bool
 gen6_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1748,10 +1727,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (engine->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && engine->id == RCS)
+		if (HAS_L3_DPF(engine) && engine->id == RCS)
 			I915_WRITE_IMR(engine,
 				       ~(engine->irq_enable_mask |
-					 GT_PARITY_ERROR(dev)));
+					 GT_PARITY_ERROR(engine)));
 		else
 			I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
 		gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1764,14 +1743,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
 static void
 gen6_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--engine->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && engine->id == RCS)
-			I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+		if (HAS_L3_DPF(engine) && engine->id == RCS)
+			I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(engine));
 		else
 			I915_WRITE_IMR(engine, ~0);
 		gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1782,8 +1760,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine)
 static bool
 hsw_vebox_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1802,8 +1779,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine)
 static void
 hsw_vebox_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1817,8 +1793,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine)
 static bool
 gen8_ring_get_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1826,7 +1801,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (engine->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && engine->id == RCS) {
+		if (HAS_L3_DPF(engine) && engine->id == RCS) {
 			I915_WRITE_IMR(engine,
 				       ~(engine->irq_enable_mask |
 					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
@@ -1843,13 +1818,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
 static void
 gen8_ring_put_irq(struct intel_engine_cs *engine)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
 	if (--engine->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && engine->id == RCS) {
+		if (HAS_L3_DPF(engine) && engine->id == RCS) {
 			I915_WRITE_IMR(engine,
 				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
 		} else {
@@ -1971,7 +1945,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 
 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = to_i915(engine->dev);
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
 	if (!dev_priv->status_page_dmah)
 		return;
@@ -2013,7 +1987,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 			goto err_unref;
 
 		flags = 0;
-		if (!HAS_LLC(engine->dev))
+		if (!HAS_LLC(engine))
 			/* On g33, we cannot place HWS above 256MiB, so
 			 * restrict its pinning to the low mappable arena.
 			 * Though this restriction is not documented for
@@ -2047,7 +2021,7 @@ err_unref:
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
 	if (!dev_priv->status_page_dmah) {
 		dev_priv->status_page_dmah =
@@ -2193,7 +2167,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
 	 * of the buffer.
 	 */
 	ring->effective_size = size;
-	if (IS_I830(engine->dev) || IS_845G(engine->dev))
+	if (IS_I830(engine) || IS_845G(engine))
 		ring->effective_size -= 2 * CACHELINE_BYTES;
 
 	ring->last_retired_head = -1;
@@ -2282,11 +2256,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
 	if (!intel_engine_initialized(engine))
 		return;
 
-	dev_priv = to_i915(engine->dev);
+	dev_priv = to_i915(engine);
 
 	if (engine->buffer) {
 		intel_stop_engine(engine);
-		WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+		WARN_ON(!IS_GEN2(engine) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
 		intel_unpin_ringbuffer_obj(engine->buffer);
 		intel_ringbuffer_free(engine->buffer);
@@ -2296,7 +2270,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
 	if (engine->cleanup)
 		engine->cleanup(engine);
 
-	if (I915_NEED_GFX_HWS(engine->dev)) {
+	if (I915_NEED_GFX_HWS(engine)) {
 		cleanup_status_page(engine);
 	} else {
 		WARN_ON(engine->id != RCS);
@@ -2536,13 +2510,12 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 
 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-	struct drm_device *dev = engine->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
-	if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
+	if (INTEL_INFO(engine)->gen == 6 || INTEL_INFO(engine)->gen == 7) {
 		I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
 		I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
-		if (HAS_VEBOX(dev))
+		if (HAS_VEBOX(engine))
 			I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
 	}
 
@@ -2553,7 +2526,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 				     u32 value)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
        /* Every tail move must follow the sequence below */
 
@@ -3167,7 +3140,7 @@ intel_stop_engine(struct intel_engine_cs *engine)
 		return;
 
 	ret = intel_engine_idle(engine);
-	if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
+	if (ret && !i915_reset_in_progress(&to_i915(engine)->gpu_error))
 		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
 			  engine->name, ret);
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 512b7faedefd..44f921be04e7 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1583,8 +1583,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv,
 
 static int gen8_request_engine_reset(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *dev_priv = to_i915(engine);
 	int ret;
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
 	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
 		      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
@@ -1602,7 +1602,7 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
 
 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
 {
-	struct drm_i915_private *dev_priv = engine->dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(engine);
 
 	I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
 		      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
-- 
2.8.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object
  2016-03-18 21:16 ` [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object Chris Wilson
@ 2016-03-21  9:47   ` Daniel Vetter
  2016-03-21 13:01     ` Jani Nikula
  2016-03-21  9:55   ` Tvrtko Ursulin
  1 sibling, 1 reply; 16+ messages in thread
From: Daniel Vetter @ 2016-03-21  9:47 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On Fri, Mar 18, 2016 at 09:16:21PM +0000, Chris Wilson wrote:
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

I'm not sure this is too much magic ... If it gathers raving applaus and
support from others then I'm ok ;-)
-Daniel
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
>  drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
>  drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
>  drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
>  drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
>  drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
>  drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
>  drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
>  drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
>  10 files changed, 46 insertions(+), 51 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index e0ba3e38000f..33ddcdf6d046 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
>  static void
>  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>  	struct intel_engine_cs *engine;
>  	struct i915_vma *vma;
>  	int pin_count = 0;
> @@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>  		   obj->base.size / 1024,
>  		   obj->base.read_domains,
>  		   obj->base.write_domain);
> -	for_each_engine(engine, dev_priv, i)
> +	for_each_engine(engine, obj, i)
>  		seq_printf(m, "%x ",
>  				i915_gem_request_get_seqno(obj->last_read_req[i]));
>  	seq_printf(m, "] %x %x%s%s%s",
>  		   i915_gem_request_get_seqno(obj->last_write_req),
>  		   i915_gem_request_get_seqno(obj->last_fenced_req),
> -		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
> +		   i915_cache_level_str(to_i915(obj), obj->cache_level),
>  		   obj->dirty ? " dirty" : "",
>  		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
>  	if (obj->base.name)
> @@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
>  	if (obj->base.name || obj->base.dma_buf)
>  		stats->shared += obj->base.size;
>  
> -	if (USES_FULL_PPGTT(obj->base.dev)) {
> +	if (USES_FULL_PPGTT(obj)) {
>  		list_for_each_entry(vma, &obj->vma_list, obj_link) {
>  			struct i915_hw_ppgtt *ppgtt;
>  
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 0c9fe00d3e83..92365f047e53 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
>  };
>  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>  
> +static inline struct drm_i915_private *
> +__obj_to_i915(const struct drm_i915_gem_object *obj)
> +{
> +	return __to_i915(obj->base.dev);
> +}
> +
>  void i915_gem_track_fb(struct drm_i915_gem_object *old,
>  		       struct drm_i915_gem_object *new,
>  		       unsigned frontbuffer_bits);
> @@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
>  		__p = (struct drm_i915_private *)p; \
>  	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
>  		__p = __to_i915((struct drm_device *)p); \
> +	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
> +		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
>  	else \
>  		BUILD_BUG(); \
>  	__p; \
> @@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
>  bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
>  
>  /* Some GGTT VM helpers */
> -#define i915_obj_to_ggtt(obj) \
> -	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
> +#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
>  
>  static inline struct i915_hw_ppgtt *
>  i915_vm_to_ppgtt(struct i915_address_space *vm)
> @@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
>  /* i915_gem_tiling.c */
>  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -
> -	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
> +	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
>  		obj->tiling_mode != I915_TILING_NONE;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 8588c83abb35..710a6bbc985e 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -361,14 +361,12 @@ out:
>  
>  void *i915_gem_object_alloc(struct drm_device *dev)
>  {
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> -	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
> +	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
>  }
>  
>  void i915_gem_object_free(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -	kmem_cache_free(dev_priv->objects, obj);
> +	kmem_cache_free(to_i915(obj)->objects, obj);
>  }
>  
>  static int
> @@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
>  
>  static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	int ret;
>  
>  	if (drm_vma_node_has_offset(&obj->base.vma_node))
> @@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>  static int
>  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	int page_count, i;
>  	struct address_space *mapping;
>  	struct sg_table *st;
> @@ -2372,7 +2370,7 @@ err_pages:
>  int
>  i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	const struct drm_i915_gem_object_ops *ops = obj->ops;
>  	int ret;
>  
> @@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
>  	 * so that we don't steal from recently used but inactive objects
>  	 * (unless we are forced to ofc!)
>  	 */
> -	list_move_tail(&obj->global_list,
> -		       &to_i915(obj->base.dev)->mm.bound_list);
> +	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
>  
>  	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>  		if (!list_empty(&vma->vm_link))
> @@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  		return 0;
>  
>  	if (!i915_semaphore_is_enabled(obj->base.dev)) {
> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +		struct drm_i915_private *i915 = to_i915(obj);
>  		ret = __i915_wait_request(from_req,
>  					  atomic_read(&i915->gpu_error.reset_counter),
>  					  i915->mm.interruptible,
> @@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
>  static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>  {
>  	struct drm_i915_gem_object *obj = vma->obj;
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	int ret;
>  
>  	if (list_empty(&vma->obj_link))
> @@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>  	vma = i915_gem_obj_to_ggtt(obj);
>  	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
>  		list_move_tail(&vma->vm_link,
> -			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
> +			       &to_i915(obj)->ggtt.base.inactive_list);
>  
>  	return 0;
>  }
> @@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>  	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
>  	 */
>  	ret = i915_gem_object_set_cache_level(obj,
> -					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
> +					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
>  	if (ret)
>  		goto err_unpin_display;
>  
> @@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
>  		     (vma->node.start & (fence_alignment - 1)) == 0);
>  
>  	mappable = (vma->node.start + fence_size <=
> -		    to_i915(obj->base.dev)->ggtt.mappable_end);
> +		    to_i915(obj)->ggtt.mappable_end);
>  
>  	obj->map_and_fenceable = mappable && fenceable;
>  }
> @@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
>  		       uint32_t alignment,
>  		       uint64_t flags)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	struct i915_vma *vma;
>  	unsigned bound;
>  	int ret;
> @@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>  	obj->fence_reg = I915_FENCE_REG_NONE;
>  	obj->madv = I915_MADV_WILLNEED;
>  
> -	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
> +	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
>  }
>  
>  static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
> @@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
>  void i915_gem_free_object(struct drm_gem_object *gem_obj)
>  {
>  	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	struct i915_vma *vma, *next;
>  
>  	intel_runtime_pm_get(dev_priv);
> @@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
>  
>  	list_del(&vma->obj_link);
>  
> -	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
> +	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
>  }
>  
>  static void
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 374a0cb7a092..39ed403b9de3 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
>  
>  static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
>  {
> -	return (HAS_LLC(obj->base.dev) ||
> +	return (HAS_LLC(obj) ||
>  		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
>  		obj->cache_level != I915_CACHE_NONE);
>  }
> @@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>  		   struct drm_i915_gem_relocation_entry *reloc,
>  		   uint64_t target_offset)
>  {
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	uint64_t delta = relocation_target(reloc, target_offset);
>  	uint64_t offset;
>  	void __iomem *reloc_page;
> @@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>  					      offset & PAGE_MASK);
>  	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
>  
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_INFO(obj)->gen >= 8) {
>  		offset += sizeof(uint32_t);
>  
>  		if (offset_in_page(offset) == 0) {
> @@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>  		       struct drm_i915_gem_relocation_entry *reloc,
>  		       uint64_t target_offset)
>  {
> -	struct drm_device *dev = obj->base.dev;
>  	uint32_t page_offset = offset_in_page(reloc->offset);
>  	uint64_t delta = relocation_target(reloc, target_offset);
>  	char *vaddr;
> @@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>  				reloc->offset >> PAGE_SHIFT));
>  	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
>  
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_INFO(obj)->gen >= 8) {
>  		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
>  
>  		if (page_offset == 0) {
> @@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>  				   struct eb_vmas *eb,
>  				   struct drm_i915_gem_relocation_entry *reloc)
>  {
> -	struct drm_device *dev = obj->base.dev;
>  	struct drm_gem_object *target_obj;
>  	struct drm_i915_gem_object *target_i915_obj;
>  	struct i915_vma *target_vma;
> @@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>  	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
>  	 * pipe_control writes because the gpu doesn't properly redirect them
>  	 * through the ppgtt for non_secure batchbuffers. */
> -	if (unlikely(IS_GEN6(dev) &&
> +	if (unlikely(IS_GEN6(obj) &&
>  	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
>  		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
>  				    PIN_GLOBAL);
> @@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>  
>  	/* Check that the relocation address is valid... */
>  	if (unlikely(reloc->offset >
> -		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
> +		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
>  		DRM_DEBUG("Relocation beyond object bounds: "
>  			  "obj %p target %d offset %d size %d.\n",
>  			  obj, reloc->target_handle,
> @@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
>  		return false;
>  
>  	/* See also use_cpu_reloc() */
> -	if (HAS_LLC(vma->obj->base.dev))
> +	if (HAS_LLC(vma->obj))
>  		return false;
>  
>  	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
> index 598198543dcd..1ef75bc2220c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
> @@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
>  					 struct drm_i915_fence_reg *fence,
>  					 bool enable)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	int reg = fence_number(dev_priv, fence);
>  
>  	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
> @@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
>  int
>  i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	struct drm_i915_fence_reg *fence;
>  	int ret;
>  
> @@ -433,7 +433,7 @@ bool
>  i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
>  {
>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +		struct drm_i915_private *dev_priv = to_i915(obj);
>  		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
>  
>  		WARN_ON(!ggtt_vma ||
> @@ -457,7 +457,7 @@ void
>  i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
>  {
>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +		struct drm_i915_private *dev_priv = to_i915(obj);
>  		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
>  		dev_priv->fence_regs[obj->fence_reg].pin_count--;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 0715bb74d306..6447a5f9661e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -3305,7 +3305,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>  	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
>  		return ERR_PTR(-EINVAL);
>  
> -	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
> +	vma = kmem_cache_zalloc(to_i915(obj)->vmas, GFP_KERNEL);
>  	if (vma == NULL)
>  		return ERR_PTR(-ENOMEM);
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index de891c928b2f..224389d077c7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -540,10 +540,8 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
>  static void
>  i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -
>  	if (obj->stolen) {
> -		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
> +		i915_gem_stolen_remove_node(to_i915(obj), obj->stolen);
>  		kfree(obj->stolen);
>  		obj->stolen = NULL;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> index 7410f6c962e7..bc4cb7f4fe80 100644
> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> @@ -122,10 +122,10 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
>  	if (tiling_mode == I915_TILING_NONE)
>  		return true;
>  
> -	if (INTEL_INFO(obj->base.dev)->gen >= 4)
> +	if (INTEL_INFO(obj)->gen >= 4)
>  		return true;
>  
> -	if (INTEL_INFO(obj->base.dev)->gen == 3) {
> +	if (INTEL_INFO(obj)->gen == 3) {
>  		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
>  			return false;
>  	} else {
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index 54088a4d6498..f0e3ade59177 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -303,7 +303,7 @@ __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
>  static int
>  i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
>  {
> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>  	struct i915_mm_struct *mm;
>  	int ret = 0;
>  
> @@ -376,7 +376,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
>  
>  	kref_put_mutex(&obj->userptr.mm->kref,
>  		       __i915_mm_struct_free,
> -		       &to_i915(obj->base.dev)->mm_lock);
> +		       &to_i915(obj)->mm_lock);
>  	obj->userptr.mm = NULL;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index df0ef5bba8e5..f981bddc9bbf 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2069,7 +2069,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
>  
>  void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
>  {
> -	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
> +	if (HAS_LLC(ringbuf->obj) && !ringbuf->obj->stolen)
>  		vunmap(ringbuf->virtual_start);
>  	else
>  		iounmap(ringbuf->virtual_start);
> -- 
> 2.8.0.rc3
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object
  2016-03-18 21:16 ` [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object Chris Wilson
  2016-03-21  9:47   ` Daniel Vetter
@ 2016-03-21  9:55   ` Tvrtko Ursulin
  2016-03-21 10:04     ` Chris Wilson
  1 sibling, 1 reply; 16+ messages in thread
From: Tvrtko Ursulin @ 2016-03-21  9:55 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 18/03/16 21:16, Chris Wilson wrote:
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
>   drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
>   drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
>   drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
>   drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
>   drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
>   drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
>   drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
>   drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
>   drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
>   10 files changed, 46 insertions(+), 51 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index e0ba3e38000f..33ddcdf6d046 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
>   static void
>   describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>   	struct intel_engine_cs *engine;
>   	struct i915_vma *vma;
>   	int pin_count = 0;
> @@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>   		   obj->base.size / 1024,
>   		   obj->base.read_domains,
>   		   obj->base.write_domain);
> -	for_each_engine(engine, dev_priv, i)
> +	for_each_engine(engine, obj, i)
>   		seq_printf(m, "%x ",
>   				i915_gem_request_get_seqno(obj->last_read_req[i]));
>   	seq_printf(m, "] %x %x%s%s%s",
>   		   i915_gem_request_get_seqno(obj->last_write_req),
>   		   i915_gem_request_get_seqno(obj->last_fenced_req),
> -		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
> +		   i915_cache_level_str(to_i915(obj), obj->cache_level),
>   		   obj->dirty ? " dirty" : "",
>   		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
>   	if (obj->base.name)
> @@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
>   	if (obj->base.name || obj->base.dma_buf)
>   		stats->shared += obj->base.size;
>
> -	if (USES_FULL_PPGTT(obj->base.dev)) {
> +	if (USES_FULL_PPGTT(obj)) {
>   		list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   			struct i915_hw_ppgtt *ppgtt;
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 0c9fe00d3e83..92365f047e53 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
>   };
>   #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>
> +static inline struct drm_i915_private *
> +__obj_to_i915(const struct drm_i915_gem_object *obj)
> +{
> +	return __to_i915(obj->base.dev);
> +}
> +
>   void i915_gem_track_fb(struct drm_i915_gem_object *old,
>   		       struct drm_i915_gem_object *new,
>   		       unsigned frontbuffer_bits);
> @@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
>   		__p = (struct drm_i915_private *)p; \
>   	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
>   		__p = __to_i915((struct drm_device *)p); \
> +	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
> +		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
>   	else \
>   		BUILD_BUG(); \
>   	__p; \
> @@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
>   bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
>
>   /* Some GGTT VM helpers */
> -#define i915_obj_to_ggtt(obj) \
> -	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
> +#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
>
>   static inline struct i915_hw_ppgtt *
>   i915_vm_to_ppgtt(struct i915_address_space *vm)
> @@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
>   /* i915_gem_tiling.c */
>   static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -
> -	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
> +	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
>   		obj->tiling_mode != I915_TILING_NONE;
>   }
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 8588c83abb35..710a6bbc985e 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -361,14 +361,12 @@ out:
>
>   void *i915_gem_object_alloc(struct drm_device *dev)
>   {
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> -	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
> +	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
>   }
>
>   void i915_gem_object_free(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -	kmem_cache_free(dev_priv->objects, obj);
> +	kmem_cache_free(to_i915(obj)->objects, obj);
>   }
>
>   static int
> @@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
>
>   static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	int ret;
>
>   	if (drm_vma_node_has_offset(&obj->base.vma_node))
> @@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>   static int
>   i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	int page_count, i;
>   	struct address_space *mapping;
>   	struct sg_table *st;
> @@ -2372,7 +2370,7 @@ err_pages:
>   int
>   i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	const struct drm_i915_gem_object_ops *ops = obj->ops;
>   	int ret;
>
> @@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
>   	 * so that we don't steal from recently used but inactive objects
>   	 * (unless we are forced to ofc!)
>   	 */
> -	list_move_tail(&obj->global_list,
> -		       &to_i915(obj->base.dev)->mm.bound_list);
> +	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
>
>   	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>   		if (!list_empty(&vma->vm_link))
> @@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>   		return 0;
>
>   	if (!i915_semaphore_is_enabled(obj->base.dev)) {
> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +		struct drm_i915_private *i915 = to_i915(obj);
>   		ret = __i915_wait_request(from_req,
>   					  atomic_read(&i915->gpu_error.reset_counter),
>   					  i915->mm.interruptible,
> @@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
>   static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>   {
>   	struct drm_i915_gem_object *obj = vma->obj;
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	int ret;
>
>   	if (list_empty(&vma->obj_link))
> @@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>   	vma = i915_gem_obj_to_ggtt(obj);
>   	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
>   		list_move_tail(&vma->vm_link,
> -			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
> +			       &to_i915(obj)->ggtt.base.inactive_list);
>
>   	return 0;
>   }
> @@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>   	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
>   	 */
>   	ret = i915_gem_object_set_cache_level(obj,
> -					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
> +					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
>   	if (ret)
>   		goto err_unpin_display;
>
> @@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
>   		     (vma->node.start & (fence_alignment - 1)) == 0);
>
>   	mappable = (vma->node.start + fence_size <=
> -		    to_i915(obj->base.dev)->ggtt.mappable_end);
> +		    to_i915(obj)->ggtt.mappable_end);
>
>   	obj->map_and_fenceable = mappable && fenceable;
>   }
> @@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
>   		       uint32_t alignment,
>   		       uint64_t flags)
>   {
> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	struct i915_vma *vma;
>   	unsigned bound;
>   	int ret;
> @@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>   	obj->fence_reg = I915_FENCE_REG_NONE;
>   	obj->madv = I915_MADV_WILLNEED;
>
> -	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
> +	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
>   }
>
>   static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
> @@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
>   void i915_gem_free_object(struct drm_gem_object *gem_obj)
>   {
>   	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	struct i915_vma *vma, *next;
>
>   	intel_runtime_pm_get(dev_priv);
> @@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
>
>   	list_del(&vma->obj_link);
>
> -	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
> +	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
>   }
>
>   static void
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 374a0cb7a092..39ed403b9de3 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
>
>   static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
>   {
> -	return (HAS_LLC(obj->base.dev) ||
> +	return (HAS_LLC(obj) ||
>   		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
>   		obj->cache_level != I915_CACHE_NONE);
>   }
> @@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>   		   struct drm_i915_gem_relocation_entry *reloc,
>   		   uint64_t target_offset)
>   {
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = to_i915(obj);
>   	uint64_t delta = relocation_target(reloc, target_offset);
>   	uint64_t offset;
>   	void __iomem *reloc_page;
> @@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>   					      offset & PAGE_MASK);
>   	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
>
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_INFO(obj)->gen >= 8) {
>   		offset += sizeof(uint32_t);
>
>   		if (offset_in_page(offset) == 0) {
> @@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>   		       struct drm_i915_gem_relocation_entry *reloc,
>   		       uint64_t target_offset)
>   {
> -	struct drm_device *dev = obj->base.dev;
>   	uint32_t page_offset = offset_in_page(reloc->offset);
>   	uint64_t delta = relocation_target(reloc, target_offset);
>   	char *vaddr;
> @@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>   				reloc->offset >> PAGE_SHIFT));
>   	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
>
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_INFO(obj)->gen >= 8) {
>   		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
>
>   		if (page_offset == 0) {
> @@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>   				   struct eb_vmas *eb,
>   				   struct drm_i915_gem_relocation_entry *reloc)
>   {
> -	struct drm_device *dev = obj->base.dev;
>   	struct drm_gem_object *target_obj;
>   	struct drm_i915_gem_object *target_i915_obj;
>   	struct i915_vma *target_vma;
> @@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>   	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
>   	 * pipe_control writes because the gpu doesn't properly redirect them
>   	 * through the ppgtt for non_secure batchbuffers. */
> -	if (unlikely(IS_GEN6(dev) &&
> +	if (unlikely(IS_GEN6(obj) &&
>   	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
>   		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
>   				    PIN_GLOBAL);
> @@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>
>   	/* Check that the relocation address is valid... */
>   	if (unlikely(reloc->offset >
> -		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
> +		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
>   		DRM_DEBUG("Relocation beyond object bounds: "
>   			  "obj %p target %d offset %d size %d.\n",
>   			  obj, reloc->target_handle,
> @@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
>   		return false;
>
>   	/* See also use_cpu_reloc() */
> -	if (HAS_LLC(vma->obj->base.dev))
> +	if (HAS_LLC(vma->obj))

Things like HAS_LLC(obj) and HAS_LLC(engine) are IMHO illogical and 
non-intuitive.

to_i915(various) makes sense, because any i915 object is exactly that - 
an i915 object. for_each_engine is also OK. But going further than that 
is not desirable.

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object
  2016-03-21  9:55   ` Tvrtko Ursulin
@ 2016-03-21 10:04     ` Chris Wilson
  0 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2016-03-21 10:04 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

On Mon, Mar 21, 2016 at 09:55:10AM +0000, Tvrtko Ursulin wrote:
> >  	/* See also use_cpu_reloc() */
> >-	if (HAS_LLC(vma->obj->base.dev))
> >+	if (HAS_LLC(vma->obj))
> 
> Things like HAS_LLC(obj) and HAS_LLC(engine) are IMHO illogical and
> non-intuitive.

Does this object/engine have last-line coherency, seems to make sense to
me. The goal has been to shift these to using dev_priv, for 2 reaons, on
the hotpaths careless pointer dereferencing adds extra memory loads that
are not immediately obvious unless you are familar with the macros and
the second reason is that widespread dev -> dev_private -> dev spreads
add a few extra pages of object size just from the extra mov
instructions!

INTEL_INFO() is definitely a more subjective matter of taste, but anything
other then INTEL_INFO(i915) is a stopgap imo.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* ✗ Fi.CI.BAT: warning for series starting with [1/6] drm/i915: Rename the magic polymorphic macro __I915__
  2016-03-18 21:16 [PATCH 1/6] drm/i915: Rename the magic polymorphic macro __I915__ Chris Wilson
                   ` (4 preceding siblings ...)
  2016-03-18 21:16 ` [PATCH 6/6] drm/i915: Teach to_i915() how to extract drm_i915_private from engines Chris Wilson
@ 2016-03-21 12:13 ` Patchwork
  5 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2016-03-21 12:13 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/6] drm/i915: Rename the magic polymorphic macro __I915__
URL   : https://patchwork.freedesktop.org/series/4650/
State : warning

== Summary ==

Series 4650v1 Series without cover letter
http://patchwork.freedesktop.org/api/1.0/series/4650/revisions/1/mbox/

Test gem_ringfill:
        Subgroup basic-default-s3:
                dmesg-warn -> PASS       (bsw-nuc-2)
Test kms_flip:
        Subgroup basic-flip-vs-dpms:
                pass       -> DMESG-WARN (ilk-hp8440p) UNSTABLE
        Subgroup basic-flip-vs-wf_vblank:
                fail       -> PASS       (byt-nuc)
Test kms_force_connector_basic:
        Subgroup force-load-detect:
                pass       -> DMESG-WARN (snb-x220t)
Test kms_pipe_crc_basic:
        Subgroup nonblocking-crc-pipe-a:
                dmesg-warn -> PASS       (snb-x220t)
        Subgroup suspend-read-crc-pipe-c:
                pass       -> DMESG-WARN (bsw-nuc-2)
                incomplete -> PASS       (hsw-gt2)
Test pm_rpm:
        Subgroup basic-rte:
                pass       -> DMESG-WARN (bsw-nuc-2)

bdw-nuci7        total:194  pass:182  dwarn:0   dfail:0   fail:0   skip:12 
bdw-ultra        total:194  pass:173  dwarn:0   dfail:0   fail:0   skip:21 
bsw-nuc-2        total:194  pass:155  dwarn:2   dfail:0   fail:0   skip:37 
byt-nuc          total:194  pass:159  dwarn:0   dfail:0   fail:0   skip:35 
hsw-brixbox      total:194  pass:172  dwarn:0   dfail:0   fail:0   skip:22 
hsw-gt2          total:194  pass:176  dwarn:1   dfail:0   fail:0   skip:17 
ilk-hp8440p      total:194  pass:130  dwarn:1   dfail:0   fail:0   skip:63 
ivb-t430s        total:194  pass:169  dwarn:0   dfail:0   fail:0   skip:25 
snb-dellxps      total:194  pass:159  dwarn:1   dfail:0   fail:0   skip:34 
snb-x220t        total:194  pass:159  dwarn:1   dfail:0   fail:1   skip:33 

Results at /archive/results/CI_IGT_test/Patchwork_1652/

e7a7673e9840fe8b50a5a2894c75565ec7858a00 drm-intel-nightly: 2016y-03m-19d-10h-09m-53s UTC integration manifest
fbb3d3722b619f6f6c9324bae561fca5b2dfce2a drm/i915: Teach to_i915() how to extract drm_i915_private from engines
28093cabed7845945d2a11f4caf178e3a03f77bb drm/i915: Teach to_i915() how to extract drm_i915_private from requests
8457d817df25f95902c327776ce80d22dce7de89 drm/i915: Use to_i915() instead of guc_to_i915()
135238c386d70ca2dd0415c183151eaf6e6bc5aa drm/i915: Extend magic to_i915() to work with drm_i915_gem_object
523bef5ba430cb1d27ae227c033cd87fc27c110b drm/i915: Allow passing any known pointer to for_each_engine()
61f0a9367ceaa2db44cca01ba417c2da3977ca48 drm/i915: Rename the magic polymorphic macro __I915__

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object
  2016-03-21  9:47   ` Daniel Vetter
@ 2016-03-21 13:01     ` Jani Nikula
  2016-03-21 17:44       ` Daniel Vetter
  0 siblings, 1 reply; 16+ messages in thread
From: Jani Nikula @ 2016-03-21 13:01 UTC (permalink / raw)
  To: Daniel Vetter, Chris Wilson; +Cc: intel-gfx

On Mon, 21 Mar 2016, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Fri, Mar 18, 2016 at 09:16:21PM +0000, Chris Wilson wrote:
>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>
> I'm not sure this is too much magic ... If it gathers raving applaus and
> support from others then I'm ok ;-)

I'm not thrilled, like I said in [1].

If you guys really insist on having this, please at least make all the
*other* macros require dev_priv, and use to_i915() at the call sites.

BR,
Jani.



[1] http://mid.gmane.org/871t79hriq.fsf@intel.com

> -Daniel
>> ---
>>  drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
>>  drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
>>  drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
>>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
>>  drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
>>  drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
>>  drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
>>  drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
>>  drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
>>  drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
>>  10 files changed, 46 insertions(+), 51 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
>> index e0ba3e38000f..33ddcdf6d046 100644
>> --- a/drivers/gpu/drm/i915/i915_debugfs.c
>> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
>> @@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
>>  static void
>>  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>>  	struct intel_engine_cs *engine;
>>  	struct i915_vma *vma;
>>  	int pin_count = 0;
>> @@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>>  		   obj->base.size / 1024,
>>  		   obj->base.read_domains,
>>  		   obj->base.write_domain);
>> -	for_each_engine(engine, dev_priv, i)
>> +	for_each_engine(engine, obj, i)
>>  		seq_printf(m, "%x ",
>>  				i915_gem_request_get_seqno(obj->last_read_req[i]));
>>  	seq_printf(m, "] %x %x%s%s%s",
>>  		   i915_gem_request_get_seqno(obj->last_write_req),
>>  		   i915_gem_request_get_seqno(obj->last_fenced_req),
>> -		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
>> +		   i915_cache_level_str(to_i915(obj), obj->cache_level),
>>  		   obj->dirty ? " dirty" : "",
>>  		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
>>  	if (obj->base.name)
>> @@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
>>  	if (obj->base.name || obj->base.dma_buf)
>>  		stats->shared += obj->base.size;
>>  
>> -	if (USES_FULL_PPGTT(obj->base.dev)) {
>> +	if (USES_FULL_PPGTT(obj)) {
>>  		list_for_each_entry(vma, &obj->vma_list, obj_link) {
>>  			struct i915_hw_ppgtt *ppgtt;
>>  
>> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
>> index 0c9fe00d3e83..92365f047e53 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.h
>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>> @@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
>>  };
>>  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>>  
>> +static inline struct drm_i915_private *
>> +__obj_to_i915(const struct drm_i915_gem_object *obj)
>> +{
>> +	return __to_i915(obj->base.dev);
>> +}
>> +
>>  void i915_gem_track_fb(struct drm_i915_gem_object *old,
>>  		       struct drm_i915_gem_object *new,
>>  		       unsigned frontbuffer_bits);
>> @@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
>>  		__p = (struct drm_i915_private *)p; \
>>  	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
>>  		__p = __to_i915((struct drm_device *)p); \
>> +	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
>> +		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
>>  	else \
>>  		BUILD_BUG(); \
>>  	__p; \
>> @@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
>>  bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
>>  
>>  /* Some GGTT VM helpers */
>> -#define i915_obj_to_ggtt(obj) \
>> -	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
>> +#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
>>  
>>  static inline struct i915_hw_ppgtt *
>>  i915_vm_to_ppgtt(struct i915_address_space *vm)
>> @@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
>>  /* i915_gem_tiling.c */
>>  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> -
>> -	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
>> +	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
>>  		obj->tiling_mode != I915_TILING_NONE;
>>  }
>>  
>> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
>> index 8588c83abb35..710a6bbc985e 100644
>> --- a/drivers/gpu/drm/i915/i915_gem.c
>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>> @@ -361,14 +361,12 @@ out:
>>  
>>  void *i915_gem_object_alloc(struct drm_device *dev)
>>  {
>> -	struct drm_i915_private *dev_priv = dev->dev_private;
>> -	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
>> +	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
>>  }
>>  
>>  void i915_gem_object_free(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> -	kmem_cache_free(dev_priv->objects, obj);
>> +	kmem_cache_free(to_i915(obj)->objects, obj);
>>  }
>>  
>>  static int
>> @@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
>>  
>>  static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	int ret;
>>  
>>  	if (drm_vma_node_has_offset(&obj->base.vma_node))
>> @@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>>  static int
>>  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	int page_count, i;
>>  	struct address_space *mapping;
>>  	struct sg_table *st;
>> @@ -2372,7 +2370,7 @@ err_pages:
>>  int
>>  i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	const struct drm_i915_gem_object_ops *ops = obj->ops;
>>  	int ret;
>>  
>> @@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
>>  	 * so that we don't steal from recently used but inactive objects
>>  	 * (unless we are forced to ofc!)
>>  	 */
>> -	list_move_tail(&obj->global_list,
>> -		       &to_i915(obj->base.dev)->mm.bound_list);
>> +	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
>>  
>>  	list_for_each_entry(vma, &obj->vma_list, obj_link) {
>>  		if (!list_empty(&vma->vm_link))
>> @@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>>  		return 0;
>>  
>>  	if (!i915_semaphore_is_enabled(obj->base.dev)) {
>> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
>> +		struct drm_i915_private *i915 = to_i915(obj);
>>  		ret = __i915_wait_request(from_req,
>>  					  atomic_read(&i915->gpu_error.reset_counter),
>>  					  i915->mm.interruptible,
>> @@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
>>  static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>>  {
>>  	struct drm_i915_gem_object *obj = vma->obj;
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	int ret;
>>  
>>  	if (list_empty(&vma->obj_link))
>> @@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>>  	vma = i915_gem_obj_to_ggtt(obj);
>>  	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
>>  		list_move_tail(&vma->vm_link,
>> -			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
>> +			       &to_i915(obj)->ggtt.base.inactive_list);
>>  
>>  	return 0;
>>  }
>> @@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
>>  	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
>>  	 */
>>  	ret = i915_gem_object_set_cache_level(obj,
>> -					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
>> +					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
>>  	if (ret)
>>  		goto err_unpin_display;
>>  
>> @@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
>>  		     (vma->node.start & (fence_alignment - 1)) == 0);
>>  
>>  	mappable = (vma->node.start + fence_size <=
>> -		    to_i915(obj->base.dev)->ggtt.mappable_end);
>> +		    to_i915(obj)->ggtt.mappable_end);
>>  
>>  	obj->map_and_fenceable = mappable && fenceable;
>>  }
>> @@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
>>  		       uint32_t alignment,
>>  		       uint64_t flags)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	struct i915_vma *vma;
>>  	unsigned bound;
>>  	int ret;
>> @@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>>  	obj->fence_reg = I915_FENCE_REG_NONE;
>>  	obj->madv = I915_MADV_WILLNEED;
>>  
>> -	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
>> +	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
>>  }
>>  
>>  static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
>> @@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
>>  void i915_gem_free_object(struct drm_gem_object *gem_obj)
>>  {
>>  	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
>> -	struct drm_device *dev = obj->base.dev;
>> -	struct drm_i915_private *dev_priv = dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	struct i915_vma *vma, *next;
>>  
>>  	intel_runtime_pm_get(dev_priv);
>> @@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
>>  
>>  	list_del(&vma->obj_link);
>>  
>> -	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
>> +	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
>>  }
>>  
>>  static void
>> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> index 374a0cb7a092..39ed403b9de3 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> @@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
>>  
>>  static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
>>  {
>> -	return (HAS_LLC(obj->base.dev) ||
>> +	return (HAS_LLC(obj) ||
>>  		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
>>  		obj->cache_level != I915_CACHE_NONE);
>>  }
>> @@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>>  		   struct drm_i915_gem_relocation_entry *reloc,
>>  		   uint64_t target_offset)
>>  {
>> -	struct drm_device *dev = obj->base.dev;
>> -	struct drm_i915_private *dev_priv = dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	uint64_t delta = relocation_target(reloc, target_offset);
>>  	uint64_t offset;
>>  	void __iomem *reloc_page;
>> @@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
>>  					      offset & PAGE_MASK);
>>  	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
>>  
>> -	if (INTEL_INFO(dev)->gen >= 8) {
>> +	if (INTEL_INFO(obj)->gen >= 8) {
>>  		offset += sizeof(uint32_t);
>>  
>>  		if (offset_in_page(offset) == 0) {
>> @@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>>  		       struct drm_i915_gem_relocation_entry *reloc,
>>  		       uint64_t target_offset)
>>  {
>> -	struct drm_device *dev = obj->base.dev;
>>  	uint32_t page_offset = offset_in_page(reloc->offset);
>>  	uint64_t delta = relocation_target(reloc, target_offset);
>>  	char *vaddr;
>> @@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
>>  				reloc->offset >> PAGE_SHIFT));
>>  	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
>>  
>> -	if (INTEL_INFO(dev)->gen >= 8) {
>> +	if (INTEL_INFO(obj)->gen >= 8) {
>>  		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
>>  
>>  		if (page_offset == 0) {
>> @@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>>  				   struct eb_vmas *eb,
>>  				   struct drm_i915_gem_relocation_entry *reloc)
>>  {
>> -	struct drm_device *dev = obj->base.dev;
>>  	struct drm_gem_object *target_obj;
>>  	struct drm_i915_gem_object *target_i915_obj;
>>  	struct i915_vma *target_vma;
>> @@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>>  	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
>>  	 * pipe_control writes because the gpu doesn't properly redirect them
>>  	 * through the ppgtt for non_secure batchbuffers. */
>> -	if (unlikely(IS_GEN6(dev) &&
>> +	if (unlikely(IS_GEN6(obj) &&
>>  	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
>>  		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
>>  				    PIN_GLOBAL);
>> @@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
>>  
>>  	/* Check that the relocation address is valid... */
>>  	if (unlikely(reloc->offset >
>> -		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
>> +		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
>>  		DRM_DEBUG("Relocation beyond object bounds: "
>>  			  "obj %p target %d offset %d size %d.\n",
>>  			  obj, reloc->target_handle,
>> @@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
>>  		return false;
>>  
>>  	/* See also use_cpu_reloc() */
>> -	if (HAS_LLC(vma->obj->base.dev))
>> +	if (HAS_LLC(vma->obj))
>>  		return false;
>>  
>>  	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
>> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
>> index 598198543dcd..1ef75bc2220c 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
>> @@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
>>  					 struct drm_i915_fence_reg *fence,
>>  					 bool enable)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	int reg = fence_number(dev_priv, fence);
>>  
>>  	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
>> @@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
>>  int
>>  i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	struct drm_i915_fence_reg *fence;
>>  	int ret;
>>  
>> @@ -433,7 +433,7 @@ bool
>>  i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
>>  {
>>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
>> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +		struct drm_i915_private *dev_priv = to_i915(obj);
>>  		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
>>  
>>  		WARN_ON(!ggtt_vma ||
>> @@ -457,7 +457,7 @@ void
>>  i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
>>  {
>>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
>> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> +		struct drm_i915_private *dev_priv = to_i915(obj);
>>  		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
>>  		dev_priv->fence_regs[obj->fence_reg].pin_count--;
>>  	}
>> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> index 0715bb74d306..6447a5f9661e 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> @@ -3305,7 +3305,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>>  	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
>>  		return ERR_PTR(-EINVAL);
>>  
>> -	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
>> +	vma = kmem_cache_zalloc(to_i915(obj)->vmas, GFP_KERNEL);
>>  	if (vma == NULL)
>>  		return ERR_PTR(-ENOMEM);
>>  
>> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
>> index de891c928b2f..224389d077c7 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
>> @@ -540,10 +540,8 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
>>  static void
>>  i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>> -
>>  	if (obj->stolen) {
>> -		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
>> +		i915_gem_stolen_remove_node(to_i915(obj), obj->stolen);
>>  		kfree(obj->stolen);
>>  		obj->stolen = NULL;
>>  	}
>> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
>> index 7410f6c962e7..bc4cb7f4fe80 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
>> @@ -122,10 +122,10 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
>>  	if (tiling_mode == I915_TILING_NONE)
>>  		return true;
>>  
>> -	if (INTEL_INFO(obj->base.dev)->gen >= 4)
>> +	if (INTEL_INFO(obj)->gen >= 4)
>>  		return true;
>>  
>> -	if (INTEL_INFO(obj->base.dev)->gen == 3) {
>> +	if (INTEL_INFO(obj)->gen == 3) {
>>  		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
>>  			return false;
>>  	} else {
>> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> index 54088a4d6498..f0e3ade59177 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> @@ -303,7 +303,7 @@ __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
>>  static int
>>  i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
>>  {
>> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>> +	struct drm_i915_private *dev_priv = to_i915(obj);
>>  	struct i915_mm_struct *mm;
>>  	int ret = 0;
>>  
>> @@ -376,7 +376,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
>>  
>>  	kref_put_mutex(&obj->userptr.mm->kref,
>>  		       __i915_mm_struct_free,
>> -		       &to_i915(obj->base.dev)->mm_lock);
>> +		       &to_i915(obj)->mm_lock);
>>  	obj->userptr.mm = NULL;
>>  }
>>  
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> index df0ef5bba8e5..f981bddc9bbf 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> @@ -2069,7 +2069,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
>>  
>>  void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
>>  {
>> -	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
>> +	if (HAS_LLC(ringbuf->obj) && !ringbuf->obj->stolen)
>>  		vunmap(ringbuf->virtual_start);
>>  	else
>>  		iounmap(ringbuf->virtual_start);
>> -- 
>> 2.8.0.rc3
>> 
>> _______________________________________________
>> Intel-gfx mailing list
>> Intel-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Jani Nikula, Intel Open Source Technology Center
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/6] drm/i915: Allow passing any known pointer to for_each_engine()
  2016-03-18 21:16 ` [PATCH 2/6] drm/i915: Allow passing any known pointer to for_each_engine() Chris Wilson
@ 2016-03-21 15:44   ` Dave Gordon
  0 siblings, 0 replies; 16+ messages in thread
From: Dave Gordon @ 2016-03-21 15:44 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 18/03/16 21:16, Chris Wilson wrote:
> Rather than require the user to grab a drm_i915_private, allow them to
> pass anything that we know how to derive such a pointer user to_i915()
>
> Note this fixes a macro bug in for_each_engine_masked() which was not
> using its dev_priv__ parameter.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
>   drivers/gpu/drm/i915/i915_drv.h         | 8 ++++----
>   drivers/gpu/drm/i915/i915_gem_context.c | 4 ++--
>   drivers/gpu/drm/i915/intel_mocs.c       | 3 +--
>   3 files changed, 7 insertions(+), 8 deletions(-)

Hmm .. generally I'm quite keen on enhancing to_i915(), but I'm not sure 
about this iterator. The thing is, that the array of engines are 
associated with the device (or dev_priv/i915) as a whole, and not with 
an object (etc). In particular, some objects can have associations with 
one or more specific engines, either permanently (e.g. the kernel 
context subobjects) or transiently (objects being used by workloads). It 
therefore seems counterintuitive to use such an object as the basis for 
iterating over all (initialised) engines; I might just as reasonably 
expect the macro to iterate over all (but only) the engines associated 
with the object (whatever that might mean in a particular case).

So I think for_each_engine() should continue to require the caller to 
provide a (struct drm_i915_private *) so that it's clear that we're 
operating on the whole device. But the partially simplification of 
allowing to_i915(engine) rather than to_i915(engine->dev) is fine. And 
of course we still want the bug fix!

> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 8606e2c7db04..0c9fe00d3e83 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1988,12 +1988,12 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
>   }
>
>   /* Iterate over initialised rings */
> -#define for_each_engine(ring__, dev_priv__, i__) \
> +#define for_each_engine(ring__, ptr__, i__) \
>   	for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \
> -		for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_engine_initialized((ring__))))
> +		for_each_if ((((ring__) = &to_i915(ptr__)->engine[(i__)]), intel_engine_initialized((ring__))))
>
> -#define for_each_engine_masked(engine__, dev_priv__, mask__) \
> -	for ((engine__) = &dev_priv->engine[0]; (engine__) < &dev_priv->engine[I915_NUM_ENGINES]; (engine__)++) \
> +#define for_each_engine_masked(engine__, ptr__, mask__) \
> +	for ((engine__) = &to_i915(ptr__)->engine[0]; (engine__) < &to_i915(ptr__)->engine[I915_NUM_ENGINES]; (engine__)++) \
>   		for_each_if (intel_engine_flag((engine__)) & (mask__) && intel_engine_initialized((engine__)))
>
>   enum hdmi_force_audio {
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 394e525e55f1..a8afd0cee7f7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -553,7 +553,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>
>   			intel_ring_emit(engine,
>   					MI_LOAD_REGISTER_IMM(num_rings));
> -			for_each_engine(signaller, to_i915(engine->dev), i) {
> +			for_each_engine(signaller, engine->dev, i) {

for_each_engine(signaller, to_i915(engine), i) ?

>   				if (signaller == engine)
>   					continue;
>
> @@ -582,7 +582,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>
>   			intel_ring_emit(engine,
>   					MI_LOAD_REGISTER_IMM(num_rings));
> -			for_each_engine(signaller, to_i915(engine->dev), i) {
> +			for_each_engine(signaller, engine->dev, i) {

Also for_each_engine(signaller, to_i915(engine), i)

>   				if (signaller == engine)
>   					continue;
>
> diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
> index 3c725dde16ed..45200b93e9bb 100644
> --- a/drivers/gpu/drm/i915/intel_mocs.c
> +++ b/drivers/gpu/drm/i915/intel_mocs.c
> @@ -323,12 +323,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
>   	int ret;
>
>   	if (get_mocs_settings(req->engine->dev, &t)) {
> -		struct drm_i915_private *dev_priv = req->i915;
>   		struct intel_engine_cs *engine;
>   		enum intel_engine_id ring_id;
>
>   		/* Program the control registers */
> -		for_each_engine(engine, dev_priv, ring_id) {
> +		for_each_engine(engine, req->i915, ring_id) {

I'm quite happy with this one :)

.Dave.

>   			ret = emit_mocs_control_table(req, &t, ring_id);
>   			if (ret)
>   				return ret;
>

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object
  2016-03-21 13:01     ` Jani Nikula
@ 2016-03-21 17:44       ` Daniel Vetter
  0 siblings, 0 replies; 16+ messages in thread
From: Daniel Vetter @ 2016-03-21 17:44 UTC (permalink / raw)
  To: Jani Nikula; +Cc: intel-gfx

On Mon, Mar 21, 2016 at 03:01:22PM +0200, Jani Nikula wrote:
> On Mon, 21 Mar 2016, Daniel Vetter <daniel@ffwll.ch> wrote:
> > On Fri, Mar 18, 2016 at 09:16:21PM +0000, Chris Wilson wrote:
> >> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >
> > I'm not sure this is too much magic ... If it gathers raving applaus and
> > support from others then I'm ok ;-)
> 
> I'm not thrilled, like I said in [1].
> 
> If you guys really insist on having this, please at least make all the
> *other* macros require dev_priv, and use to_i915() at the call sites.

tbh personally leaning the exact same way, if someone really wants to hear
my bikeshed. I already dropped it in the first thread too, *_to_i915 seems
like a much more C like approach.
-Daniel

> 
> BR,
> Jani.
> 
> 
> 
> [1] http://mid.gmane.org/871t79hriq.fsf@intel.com
> 
> > -Daniel
> >> ---
> >>  drivers/gpu/drm/i915/i915_debugfs.c        |  7 +++---
> >>  drivers/gpu/drm/i915/i915_drv.h            | 15 ++++++++-----
> >>  drivers/gpu/drm/i915/i915_gem.c            | 34 +++++++++++++-----------------
> >>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++---------
> >>  drivers/gpu/drm/i915/i915_gem_fence.c      |  8 +++----
> >>  drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
> >>  drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +---
> >>  drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 ++--
> >>  drivers/gpu/drm/i915/i915_gem_userptr.c    |  4 ++--
> >>  drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
> >>  10 files changed, 46 insertions(+), 51 deletions(-)
> >> 
> >> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> >> index e0ba3e38000f..33ddcdf6d046 100644
> >> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> >> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> >> @@ -128,7 +128,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
> >>  static void
> >>  describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> >>  	struct intel_engine_cs *engine;
> >>  	struct i915_vma *vma;
> >>  	int pin_count = 0;
> >> @@ -143,13 +142,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
> >>  		   obj->base.size / 1024,
> >>  		   obj->base.read_domains,
> >>  		   obj->base.write_domain);
> >> -	for_each_engine(engine, dev_priv, i)
> >> +	for_each_engine(engine, obj, i)
> >>  		seq_printf(m, "%x ",
> >>  				i915_gem_request_get_seqno(obj->last_read_req[i]));
> >>  	seq_printf(m, "] %x %x%s%s%s",
> >>  		   i915_gem_request_get_seqno(obj->last_write_req),
> >>  		   i915_gem_request_get_seqno(obj->last_fenced_req),
> >> -		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
> >> +		   i915_cache_level_str(to_i915(obj), obj->cache_level),
> >>  		   obj->dirty ? " dirty" : "",
> >>  		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
> >>  	if (obj->base.name)
> >> @@ -339,7 +338,7 @@ static int per_file_stats(int id, void *ptr, void *data)
> >>  	if (obj->base.name || obj->base.dma_buf)
> >>  		stats->shared += obj->base.size;
> >>  
> >> -	if (USES_FULL_PPGTT(obj->base.dev)) {
> >> +	if (USES_FULL_PPGTT(obj)) {
> >>  		list_for_each_entry(vma, &obj->vma_list, obj_link) {
> >>  			struct i915_hw_ppgtt *ppgtt;
> >>  
> >> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> >> index 0c9fe00d3e83..92365f047e53 100644
> >> --- a/drivers/gpu/drm/i915/i915_drv.h
> >> +++ b/drivers/gpu/drm/i915/i915_drv.h
> >> @@ -2186,6 +2186,12 @@ struct drm_i915_gem_object {
> >>  };
> >>  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
> >>  
> >> +static inline struct drm_i915_private *
> >> +__obj_to_i915(const struct drm_i915_gem_object *obj)
> >> +{
> >> +	return __to_i915(obj->base.dev);
> >> +}
> >> +
> >>  void i915_gem_track_fb(struct drm_i915_gem_object *old,
> >>  		       struct drm_i915_gem_object *new,
> >>  		       unsigned frontbuffer_bits);
> >> @@ -2455,6 +2461,8 @@ struct drm_i915_cmd_table {
> >>  		__p = (struct drm_i915_private *)p; \
> >>  	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
> >>  		__p = __to_i915((struct drm_device *)p); \
> >> +	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
> >> +		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
> >>  	else \
> >>  		BUILD_BUG(); \
> >>  	__p; \
> >> @@ -3132,8 +3140,7 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
> >>  bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
> >>  
> >>  /* Some GGTT VM helpers */
> >> -#define i915_obj_to_ggtt(obj) \
> >> -	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
> >> +#define i915_obj_to_ggtt(obj) (&to_i915(obj)->ggtt.base)
> >>  
> >>  static inline struct i915_hw_ppgtt *
> >>  i915_vm_to_ppgtt(struct i915_address_space *vm)
> >> @@ -3282,9 +3289,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
> >>  /* i915_gem_tiling.c */
> >>  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> -
> >> -	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
> >> +	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
> >>  		obj->tiling_mode != I915_TILING_NONE;
> >>  }
> >>  
> >> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> >> index 8588c83abb35..710a6bbc985e 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem.c
> >> @@ -361,14 +361,12 @@ out:
> >>  
> >>  void *i915_gem_object_alloc(struct drm_device *dev)
> >>  {
> >> -	struct drm_i915_private *dev_priv = dev->dev_private;
> >> -	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
> >> +	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
> >>  }
> >>  
> >>  void i915_gem_object_free(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> -	kmem_cache_free(dev_priv->objects, obj);
> >> +	kmem_cache_free(to_i915(obj)->objects, obj);
> >>  }
> >>  
> >>  static int
> >> @@ -2028,7 +2026,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
> >>  
> >>  static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	int ret;
> >>  
> >>  	if (drm_vma_node_has_offset(&obj->base.vma_node))
> >> @@ -2241,7 +2239,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
> >>  static int
> >>  i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	int page_count, i;
> >>  	struct address_space *mapping;
> >>  	struct sg_table *st;
> >> @@ -2372,7 +2370,7 @@ err_pages:
> >>  int
> >>  i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	const struct drm_i915_gem_object_ops *ops = obj->ops;
> >>  	int ret;
> >>  
> >> @@ -2449,8 +2447,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
> >>  	 * so that we don't steal from recently used but inactive objects
> >>  	 * (unless we are forced to ofc!)
> >>  	 */
> >> -	list_move_tail(&obj->global_list,
> >> -		       &to_i915(obj->base.dev)->mm.bound_list);
> >> +	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
> >>  
> >>  	list_for_each_entry(vma, &obj->vma_list, obj_link) {
> >>  		if (!list_empty(&vma->vm_link))
> >> @@ -3172,7 +3169,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
> >>  		return 0;
> >>  
> >>  	if (!i915_semaphore_is_enabled(obj->base.dev)) {
> >> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
> >> +		struct drm_i915_private *i915 = to_i915(obj);
> >>  		ret = __i915_wait_request(from_req,
> >>  					  atomic_read(&i915->gpu_error.reset_counter),
> >>  					  i915->mm.interruptible,
> >> @@ -3312,7 +3309,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
> >>  static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
> >>  {
> >>  	struct drm_i915_gem_object *obj = vma->obj;
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	int ret;
> >>  
> >>  	if (list_empty(&vma->obj_link))
> >> @@ -3772,7 +3769,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
> >>  	vma = i915_gem_obj_to_ggtt(obj);
> >>  	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
> >>  		list_move_tail(&vma->vm_link,
> >> -			       &to_i915(obj->base.dev)->ggtt.base.inactive_list);
> >> +			       &to_i915(obj)->ggtt.base.inactive_list);
> >>  
> >>  	return 0;
> >>  }
> >> @@ -4010,7 +4007,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
> >>  	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
> >>  	 */
> >>  	ret = i915_gem_object_set_cache_level(obj,
> >> -					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
> >> +					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
> >>  	if (ret)
> >>  		goto err_unpin_display;
> >>  
> >> @@ -4209,7 +4206,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
> >>  		     (vma->node.start & (fence_alignment - 1)) == 0);
> >>  
> >>  	mappable = (vma->node.start + fence_size <=
> >> -		    to_i915(obj->base.dev)->ggtt.mappable_end);
> >> +		    to_i915(obj)->ggtt.mappable_end);
> >>  
> >>  	obj->map_and_fenceable = mappable && fenceable;
> >>  }
> >> @@ -4221,7 +4218,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
> >>  		       uint32_t alignment,
> >>  		       uint64_t flags)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	struct i915_vma *vma;
> >>  	unsigned bound;
> >>  	int ret;
> >> @@ -4456,7 +4453,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
> >>  	obj->fence_reg = I915_FENCE_REG_NONE;
> >>  	obj->madv = I915_MADV_WILLNEED;
> >>  
> >> -	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
> >> +	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
> >>  }
> >>  
> >>  static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
> >> @@ -4545,8 +4542,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
> >>  void i915_gem_free_object(struct drm_gem_object *gem_obj)
> >>  {
> >>  	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> >> -	struct drm_device *dev = obj->base.dev;
> >> -	struct drm_i915_private *dev_priv = dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	struct i915_vma *vma, *next;
> >>  
> >>  	intel_runtime_pm_get(dev_priv);
> >> @@ -4647,7 +4643,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
> >>  
> >>  	list_del(&vma->obj_link);
> >>  
> >> -	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
> >> +	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
> >>  }
> >>  
> >>  static void
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> >> index 374a0cb7a092..39ed403b9de3 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> >> @@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
> >>  
> >>  static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
> >>  {
> >> -	return (HAS_LLC(obj->base.dev) ||
> >> +	return (HAS_LLC(obj) ||
> >>  		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
> >>  		obj->cache_level != I915_CACHE_NONE);
> >>  }
> >> @@ -312,8 +312,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
> >>  		   struct drm_i915_gem_relocation_entry *reloc,
> >>  		   uint64_t target_offset)
> >>  {
> >> -	struct drm_device *dev = obj->base.dev;
> >> -	struct drm_i915_private *dev_priv = dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	uint64_t delta = relocation_target(reloc, target_offset);
> >>  	uint64_t offset;
> >>  	void __iomem *reloc_page;
> >> @@ -334,7 +333,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
> >>  					      offset & PAGE_MASK);
> >>  	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
> >>  
> >> -	if (INTEL_INFO(dev)->gen >= 8) {
> >> +	if (INTEL_INFO(obj)->gen >= 8) {
> >>  		offset += sizeof(uint32_t);
> >>  
> >>  		if (offset_in_page(offset) == 0) {
> >> @@ -367,7 +366,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
> >>  		       struct drm_i915_gem_relocation_entry *reloc,
> >>  		       uint64_t target_offset)
> >>  {
> >> -	struct drm_device *dev = obj->base.dev;
> >>  	uint32_t page_offset = offset_in_page(reloc->offset);
> >>  	uint64_t delta = relocation_target(reloc, target_offset);
> >>  	char *vaddr;
> >> @@ -381,7 +379,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
> >>  				reloc->offset >> PAGE_SHIFT));
> >>  	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
> >>  
> >> -	if (INTEL_INFO(dev)->gen >= 8) {
> >> +	if (INTEL_INFO(obj)->gen >= 8) {
> >>  		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
> >>  
> >>  		if (page_offset == 0) {
> >> @@ -403,7 +401,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> >>  				   struct eb_vmas *eb,
> >>  				   struct drm_i915_gem_relocation_entry *reloc)
> >>  {
> >> -	struct drm_device *dev = obj->base.dev;
> >>  	struct drm_gem_object *target_obj;
> >>  	struct drm_i915_gem_object *target_i915_obj;
> >>  	struct i915_vma *target_vma;
> >> @@ -422,7 +419,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> >>  	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
> >>  	 * pipe_control writes because the gpu doesn't properly redirect them
> >>  	 * through the ppgtt for non_secure batchbuffers. */
> >> -	if (unlikely(IS_GEN6(dev) &&
> >> +	if (unlikely(IS_GEN6(obj) &&
> >>  	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
> >>  		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
> >>  				    PIN_GLOBAL);
> >> @@ -464,7 +461,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> >>  
> >>  	/* Check that the relocation address is valid... */
> >>  	if (unlikely(reloc->offset >
> >> -		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
> >> +		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
> >>  		DRM_DEBUG("Relocation beyond object bounds: "
> >>  			  "obj %p target %d offset %d size %d.\n",
> >>  			  obj, reloc->target_handle,
> >> @@ -672,7 +669,7 @@ need_reloc_mappable(struct i915_vma *vma)
> >>  		return false;
> >>  
> >>  	/* See also use_cpu_reloc() */
> >> -	if (HAS_LLC(vma->obj->base.dev))
> >> +	if (HAS_LLC(vma->obj))
> >>  		return false;
> >>  
> >>  	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
> >> index 598198543dcd..1ef75bc2220c 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
> >> @@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
> >>  					 struct drm_i915_fence_reg *fence,
> >>  					 bool enable)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	int reg = fence_number(dev_priv, fence);
> >>  
> >>  	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
> >> @@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
> >>  int
> >>  i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	struct drm_i915_fence_reg *fence;
> >>  	int ret;
> >>  
> >> @@ -433,7 +433,7 @@ bool
> >>  i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
> >>  {
> >>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
> >> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +		struct drm_i915_private *dev_priv = to_i915(obj);
> >>  		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
> >>  
> >>  		WARN_ON(!ggtt_vma ||
> >> @@ -457,7 +457,7 @@ void
> >>  i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
> >>  {
> >>  	if (obj->fence_reg != I915_FENCE_REG_NONE) {
> >> -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> +		struct drm_i915_private *dev_priv = to_i915(obj);
> >>  		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
> >>  		dev_priv->fence_regs[obj->fence_reg].pin_count--;
> >>  	}
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> >> index 0715bb74d306..6447a5f9661e 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> >> @@ -3305,7 +3305,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
> >>  	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
> >>  		return ERR_PTR(-EINVAL);
> >>  
> >> -	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
> >> +	vma = kmem_cache_zalloc(to_i915(obj)->vmas, GFP_KERNEL);
> >>  	if (vma == NULL)
> >>  		return ERR_PTR(-ENOMEM);
> >>  
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> >> index de891c928b2f..224389d077c7 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> >> @@ -540,10 +540,8 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
> >>  static void
> >>  i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >> -
> >>  	if (obj->stolen) {
> >> -		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
> >> +		i915_gem_stolen_remove_node(to_i915(obj), obj->stolen);
> >>  		kfree(obj->stolen);
> >>  		obj->stolen = NULL;
> >>  	}
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> >> index 7410f6c962e7..bc4cb7f4fe80 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> >> @@ -122,10 +122,10 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
> >>  	if (tiling_mode == I915_TILING_NONE)
> >>  		return true;
> >>  
> >> -	if (INTEL_INFO(obj->base.dev)->gen >= 4)
> >> +	if (INTEL_INFO(obj)->gen >= 4)
> >>  		return true;
> >>  
> >> -	if (INTEL_INFO(obj->base.dev)->gen == 3) {
> >> +	if (INTEL_INFO(obj)->gen == 3) {
> >>  		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
> >>  			return false;
> >>  	} else {
> >> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> >> index 54088a4d6498..f0e3ade59177 100644
> >> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> >> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> >> @@ -303,7 +303,7 @@ __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
> >>  static int
> >>  i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
> >>  {
> >> -	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> >> +	struct drm_i915_private *dev_priv = to_i915(obj);
> >>  	struct i915_mm_struct *mm;
> >>  	int ret = 0;
> >>  
> >> @@ -376,7 +376,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
> >>  
> >>  	kref_put_mutex(&obj->userptr.mm->kref,
> >>  		       __i915_mm_struct_free,
> >> -		       &to_i915(obj->base.dev)->mm_lock);
> >> +		       &to_i915(obj)->mm_lock);
> >>  	obj->userptr.mm = NULL;
> >>  }
> >>  
> >> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >> index df0ef5bba8e5..f981bddc9bbf 100644
> >> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> >> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >> @@ -2069,7 +2069,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
> >>  
> >>  void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
> >>  {
> >> -	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
> >> +	if (HAS_LLC(ringbuf->obj) && !ringbuf->obj->stolen)
> >>  		vunmap(ringbuf->virtual_start);
> >>  	else
> >>  		iounmap(ringbuf->virtual_start);
> >> -- 
> >> 2.8.0.rc3
> >> 
> >> _______________________________________________
> >> Intel-gfx mailing list
> >> Intel-gfx@lists.freedesktop.org
> >> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> -- 
> Jani Nikula, Intel Open Source Technology Center

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 4/6] drm/i915: Use to_i915() instead of guc_to_i915()
  2016-03-18 21:16 ` [PATCH 4/6] drm/i915: Use to_i915() instead of guc_to_i915() Chris Wilson
@ 2016-03-22 10:55   ` Dave Gordon
  2016-03-22 11:04     ` Chris Wilson
  0 siblings, 1 reply; 16+ messages in thread
From: Dave Gordon @ 2016-03-22 10:55 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 18/03/16 21:16, Chris Wilson wrote:
> The convenience of saving a few characters by using a consistent
> interface to obtain our drm_i915_private struct from intel_guc.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_drv.h            |  4 +++-
>   drivers/gpu/drm/i915/i915_guc_submission.c | 23 ++++++++++-------------
>   2 files changed, 13 insertions(+), 14 deletions(-)

Generally: I don't mind this, though I don't think there's any huge 
advantage ...

> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 92365f047e53..d5fa42c96110 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1982,7 +1982,7 @@ static inline struct drm_i915_private *dev_to_i915(struct device *dev)
>   	return __to_i915(dev_get_drvdata(dev));
>   }
>
> -static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
> +static inline struct drm_i915_private *__guc_to_i915(struct intel_guc *guc)
>   {
>   	return container_of(guc, struct drm_i915_private, guc);
>   }
> @@ -2463,6 +2463,8 @@ struct drm_i915_cmd_table {
>   		__p = __to_i915((struct drm_device *)p); \
>   	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
>   		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
> +	else if (__builtin_types_compatible_p(typeof(*p), struct intel_guc)) \
> +		__p = __guc_to_i915((struct intel_guc *)p); \

... so yes, this is OK ...

>   	else \
>   		BUILD_BUG(); \
>   	__p; \
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index ae1f58d073f2..850aee78c40f 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -77,7 +77,7 @@ static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
>
>   static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
>   {
> -	struct drm_i915_private *dev_priv = guc_to_i915(guc);
> +	struct drm_i915_private *dev_priv = to_i915(guc);

... and these ...

>   	u32 status;
>   	int i;
>   	int ret;
> @@ -152,7 +152,7 @@ static int host2guc_release_doorbell(struct intel_guc *guc,
>   static int host2guc_sample_forcewake(struct intel_guc *guc,
>   				     struct i915_guc_client *client)
>   {
> -	struct drm_i915_private *dev_priv = guc_to_i915(guc);
> +	struct drm_i915_private *dev_priv = to_i915(guc);
>   	struct drm_device *dev = dev_priv->dev;
>   	u32 data[2];
>
> @@ -254,7 +254,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   static void guc_disable_doorbell(struct intel_guc *guc,
>   				 struct i915_guc_client *client)
>   {
> -	struct drm_i915_private *dev_priv = guc_to_i915(guc);
> +	struct drm_i915_private *dev_priv = to_i915(guc);
>   	struct guc_doorbell_info *doorbell;
>   	void *base;
>   	i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
> @@ -376,7 +376,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
>   static void guc_init_ctx_desc(struct intel_guc *guc,
>   			      struct i915_guc_client *client)
>   {
> -	struct drm_i915_private *dev_priv = guc_to_i915(guc);
>   	struct intel_engine_cs *engine;
>   	struct intel_context *ctx = client->owner;
>   	struct guc_context_desc desc;
> @@ -390,7 +389,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
>   	desc.priority = client->priority;
>   	desc.db_id = client->doorbell_id;
>
> -	for_each_engine(engine, dev_priv, i) {
> +	for_each_engine(engine, guc, i) {

... but not this (see earlier mail), although, the objection is less 
here because the GuC is singular and associated with all engines, so 
there isn't much else that we could expect to iterate over.

OTOH this may actually be less efficient, because the conversion of the 
"struct intel_guc" to the thing(s) actually needed for the iteration 
will (or at least may) occur on each iteration of the loop. Generally 
I'd prefer to pull all such conversions out to the head of the function, 
as the original code did.

>   		struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
>   		struct drm_i915_gem_object *obj;
>   		uint64_t ctx_desc;
> @@ -772,7 +771,6 @@ err:
>
>   static void guc_create_log(struct intel_guc *guc)
>   {
> -	struct drm_i915_private *dev_priv = guc_to_i915(guc);
>   	struct drm_i915_gem_object *obj;
>   	unsigned long offset;
>   	uint32_t size, flags;
> @@ -791,7 +789,7 @@ static void guc_create_log(struct intel_guc *guc)
>
>   	obj = guc->log_obj;
>   	if (!obj) {
> -		obj = gem_allocate_guc_obj(dev_priv->dev, size);
> +		obj = gem_allocate_guc_obj(to_i915(guc)->dev, size);

Should we have to_dev(any) as well as to_i915()?

>   		if (!obj) {
>   			/* logging will be off */
>   			i915.guc_log_level = -1;
> @@ -835,7 +833,6 @@ static void init_guc_policies(struct guc_policies *policies)
>
>   static void guc_create_ads(struct intel_guc *guc)
>   {
> -	struct drm_i915_private *dev_priv = guc_to_i915(guc);

This dev_priv is used more than once (in fact, it's used in a 
for_each_engine() loop below), so I'd think it worth keeping -- and 
therefore none of the changes below would be applicable.

.Dave.

>   	struct drm_i915_gem_object *obj;
>   	struct guc_ads *ads;
>   	struct guc_policies *policies;
> @@ -851,7 +848,7 @@ static void guc_create_ads(struct intel_guc *guc)
>
>   	obj = guc->ads_obj;
>   	if (!obj) {
> -		obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
> +		obj = gem_allocate_guc_obj(to_i915(guc)->dev, PAGE_ALIGN(size));
>   		if (!obj)
>   			return;
>
> @@ -868,10 +865,10 @@ static void guc_create_ads(struct intel_guc *guc)
>   	 * so its address won't change after we've told the GuC where
>   	 * to find it.
>   	 */
> -	engine = &dev_priv->engine[RCS];
> -	ads->golden_context_lrca = engine->status_page.gfx_addr;
> +	ads->golden_context_lrca =
> +		to_i915(guc)->engine[RCS].status_page.gfx_addr;
>
> -	for_each_engine(engine, dev_priv, i)
> +	for_each_engine(engine, guc, i)
>   		ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
>
>   	/* GuC scheduling policies */
> @@ -884,7 +881,7 @@ static void guc_create_ads(struct intel_guc *guc)
>   	/* MMIO reg state */
>   	reg_state = (void *)policies + sizeof(struct guc_policies);
>
> -	for_each_engine(engine, dev_priv, i) {
> +	for_each_engine(engine, guc, i) {
>   		reg_state->mmio_white_list[engine->guc_id].mmio_start =
>   			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 4/6] drm/i915: Use to_i915() instead of guc_to_i915()
  2016-03-22 10:55   ` Dave Gordon
@ 2016-03-22 11:04     ` Chris Wilson
  0 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2016-03-22 11:04 UTC (permalink / raw)
  To: Dave Gordon; +Cc: intel-gfx

On Tue, Mar 22, 2016 at 10:55:40AM +0000, Dave Gordon wrote:
> On 18/03/16 21:16, Chris Wilson wrote:
> >-	for_each_engine(engine, dev_priv, i) {
> >+	for_each_engine(engine, guc, i) {
> 
> ... but not this (see earlier mail), although, the objection is less
> here because the GuC is singular and associated with all engines, so
> there isn't much else that we could expect to iterate over.
> 
> OTOH this may actually be less efficient, because the conversion of
> the "struct intel_guc" to the thing(s) actually needed for the
> iteration will (or at least may) occur on each iteration of the
> loop. Generally I'd prefer to pull all such conversions out to the
> head of the function, as the original code did.

It's an init func, the question is simply which is more readable.

> >  		struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
> >  		struct drm_i915_gem_object *obj;
> >  		uint64_t ctx_desc;
> >@@ -772,7 +771,6 @@ err:
> >
> >  static void guc_create_log(struct intel_guc *guc)
> >  {
> >-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
> >  	struct drm_i915_gem_object *obj;
> >  	unsigned long offset;
> >  	uint32_t size, flags;
> >@@ -791,7 +789,7 @@ static void guc_create_log(struct intel_guc *guc)
> >
> >  	obj = guc->log_obj;
> >  	if (!obj) {
> >-		obj = gem_allocate_guc_obj(dev_priv->dev, size);
> >+		obj = gem_allocate_guc_obj(to_i915(guc)->dev, size);
> 
> Should we have to_dev(any) as well as to_i915()?
> 
> >  		if (!obj) {
> >  			/* logging will be off */
> >  			i915.guc_log_level = -1;
> >@@ -835,7 +833,6 @@ static void init_guc_policies(struct guc_policies *policies)
> >
> >  static void guc_create_ads(struct intel_guc *guc)
> >  {
> >-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
> 
> This dev_priv is used more than once (in fact, it's used in a
> for_each_engine() loop below), so I'd think it worth keeping -- and
> therefore none of the changes below would be applicable.

There's a later change to fix that since these functions are attrocious,
in terms of layering name and paramter abuse.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object
  2016-04-15 17:45 Polymorphic to_i915() Chris Wilson
@ 2016-04-15 17:46 ` Chris Wilson
  0 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2016-04-15 17:46 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  7 ++---
 drivers/gpu/drm/i915/i915_drv.h            | 18 ++++++-----
 drivers/gpu/drm/i915/i915_gem.c            | 48 +++++++++++-------------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 21 +++++--------
 drivers/gpu/drm/i915/i915_gem_fence.c      |  8 ++---
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  5 ++--
 drivers/gpu/drm/i915/i915_gem_shrinker.c   |  2 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c     |  4 +--
 drivers/gpu/drm/i915/i915_gem_tiling.c     |  4 +--
 drivers/gpu/drm/i915/i915_gem_userptr.c    |  6 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    |  2 +-
 11 files changed, 54 insertions(+), 71 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 931dc6086f3b..f5f9f48f8001 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -135,7 +135,6 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
 	int pin_count = 0;
@@ -153,13 +152,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->base.size / 1024,
 		   obj->base.read_domains,
 		   obj->base.write_domain);
-	for_each_engine_id(engine, dev_priv, id)
+	for_each_engine_id(engine, obj, id)
 		seq_printf(m, "%x ",
 				i915_gem_request_get_seqno(obj->last_read_req[id]));
 	seq_printf(m, "] %x %x%s%s%s",
 		   i915_gem_request_get_seqno(obj->last_write_req),
 		   i915_gem_request_get_seqno(obj->last_fenced_req),
-		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
+		   i915_cache_level_str(to_i915(obj), obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
@@ -349,7 +348,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 	if (obj->base.name || obj->base.dma_buf)
 		stats->shared += obj->base.size;
 
-	if (USES_FULL_PPGTT(obj->base.dev)) {
+	if (USES_FULL_PPGTT(obj)) {
 		list_for_each_entry(vma, &obj->vma_list, obj_link) {
 			struct i915_hw_ppgtt *ppgtt;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a62354ba53d..282d89d097ae 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2229,6 +2229,12 @@ struct drm_i915_gem_object {
 };
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
+static inline struct drm_i915_private *
+__obj_to_i915(const struct drm_i915_gem_object *obj)
+{
+	return __to_i915(obj->base.dev);
+}
+
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits);
@@ -2498,6 +2504,8 @@ struct drm_i915_cmd_table {
 		__p = (struct drm_i915_private *)p; \
 	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
 		__p = __to_i915((struct drm_device *)p); \
+	else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_object)) \
+		__p = __obj_to_i915((struct drm_i915_gem_object *)p); \
 	else \
 		BUILD_BUG(); \
 	__p; \
@@ -3254,8 +3262,7 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
 static inline unsigned long
 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &to_i915(obj)->ggtt;
 
 	return i915_gem_obj_size(obj, &ggtt->base);
 }
@@ -3265,8 +3272,7 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 		      uint32_t alignment,
 		      unsigned flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &to_i915(obj)->ggtt;
 
 	return i915_gem_object_pin(obj, &ggtt->base,
 				   alignment, flags | PIN_GLOBAL);
@@ -3392,9 +3398,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
-	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+	return to_i915(obj)->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
 		obj->tiling_mode != I915_TILING_NONE;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2a0cb9b17ccd..bbd7bfadbaef 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -356,14 +356,12 @@ out:
 
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
+	return kmem_cache_zalloc(to_i915(dev)->objects, GFP_KERNEL);
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-	kmem_cache_free(dev_priv->objects, obj);
+	kmem_cache_free(to_i915(obj)->objects, obj);
 }
 
 static int
@@ -2019,7 +2017,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int ret;
 
 	if (drm_vma_node_has_offset(&obj->base.vma_node))
@@ -2240,7 +2238,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int page_count, i;
 	struct address_space *mapping;
 	struct sg_table *st;
@@ -2375,7 +2373,7 @@ err_pages:
 int
 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	const struct drm_i915_gem_object_ops *ops = obj->ops;
 	int ret;
 
@@ -2495,8 +2493,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 	 * so that we don't steal from recently used but inactive objects
 	 * (unless we are forced to ofc!)
 	 */
-	list_move_tail(&obj->global_list,
-		       &to_i915(obj->base.dev)->mm.bound_list);
+	list_move_tail(&obj->global_list, &to_i915(obj)->mm.bound_list);
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!list_empty(&vma->vm_link))
@@ -3218,7 +3215,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		return 0;
 
 	if (!i915_semaphore_is_enabled(obj->base.dev)) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+		struct drm_i915_private *i915 = to_i915(obj);
 		ret = __i915_wait_request(from_req,
 					  i915->mm.interruptible,
 					  NULL,
@@ -3354,7 +3351,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int ret;
 
 	if (list_empty(&vma->obj_link))
@@ -3758,9 +3755,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	uint32_t old_write_domain, old_read_domains;
 	struct i915_vma *vma;
 	int ret;
@@ -3815,7 +3809,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	vma = i915_gem_obj_to_ggtt(obj);
 	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
 		list_move_tail(&vma->vm_link,
-			       &ggtt->base.inactive_list);
+			       &to_i915(obj)->ggtt.base.inactive_list);
 
 	return 0;
 }
@@ -3836,7 +3830,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level)
 {
-	struct drm_device *dev = obj->base.dev;
 	struct i915_vma *vma, *next;
 	bool bound = false;
 	int ret = 0;
@@ -3882,7 +3875,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		if (ret)
 			return ret;
 
-		if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
+		if (!HAS_LLC(obj) && cache_level != I915_CACHE_NONE) {
 			/* Access to snoopable pages through the GTT is
 			 * incoherent and on some machines causes a hard
 			 * lockup. Relinquish the CPU mmaping to force
@@ -4053,7 +4046,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
 	 */
 	ret = i915_gem_object_set_cache_level(obj,
-					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
+					      HAS_WT(obj) ? I915_CACHE_WT : I915_CACHE_NONE);
 	if (ret)
 		goto err_unpin_display;
 
@@ -4250,7 +4243,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 		     (vma->node.start & (fence_alignment - 1)) == 0);
 
 	mappable = (vma->node.start + fence_size <=
-		    to_i915(obj->base.dev)->ggtt.mappable_end);
+		    to_i915(obj)->ggtt.mappable_end);
 
 	obj->map_and_fenceable = mappable && fenceable;
 }
@@ -4262,7 +4255,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		       uint32_t alignment,
 		       uint64_t flags)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_vma *vma;
 	unsigned bound;
 	int ret;
@@ -4344,9 +4337,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 uint32_t alignment,
 			 uint64_t flags)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &to_i915(obj)->ggtt;
 
 	BUG_ON(!view);
 
@@ -4497,7 +4488,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	obj->fence_reg = I915_FENCE_REG_NONE;
 	obj->madv = I915_MADV_WILLNEED;
 
-	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+	i915_gem_info_add_obj(to_i915(obj), obj->base.size);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4590,8 +4581,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_vma *vma, *next;
 
 	intel_runtime_pm_get(dev_priv);
@@ -4666,9 +4656,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
 					   const struct i915_ggtt_view *view)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &to_i915(obj)->ggtt;
 	struct i915_vma *vma;
 
 	BUG_ON(!view);
@@ -4693,7 +4681,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
 
 	list_del(&vma->obj_link);
 
-	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+	kmem_cache_free(to_i915(vma->obj)->vmas, vma);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6f4f2a6cdf93..86911dcafb6a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -241,7 +241,7 @@ static void eb_destroy(struct eb_vmas *eb)
 
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
-	return (HAS_LLC(obj->base.dev) ||
+	return (HAS_LLC(obj) ||
 		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
 		obj->cache_level != I915_CACHE_NONE);
 }
@@ -276,7 +276,6 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
 		   struct drm_i915_gem_relocation_entry *reloc,
 		   uint64_t target_offset)
 {
-	struct drm_device *dev = obj->base.dev;
 	uint32_t page_offset = offset_in_page(reloc->offset);
 	uint64_t delta = relocation_target(reloc, target_offset);
 	char *vaddr;
@@ -290,7 +289,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
 				reloc->offset >> PAGE_SHIFT));
 	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
 
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(obj)->gen >= 8) {
 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 
 		if (page_offset == 0) {
@@ -312,9 +311,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 		   struct drm_i915_gem_relocation_entry *reloc,
 		   uint64_t target_offset)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_ggtt *ggtt = &to_i915(obj)->ggtt;
 	uint64_t delta = relocation_target(reloc, target_offset);
 	uint64_t offset;
 	void __iomem *reloc_page;
@@ -335,7 +332,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 					      offset & PAGE_MASK);
 	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(obj)->gen >= 8) {
 		offset += sizeof(uint32_t);
 
 		if (offset_in_page(offset) == 0) {
@@ -368,7 +365,6 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
 		       struct drm_i915_gem_relocation_entry *reloc,
 		       uint64_t target_offset)
 {
-	struct drm_device *dev = obj->base.dev;
 	uint32_t page_offset = offset_in_page(reloc->offset);
 	uint64_t delta = relocation_target(reloc, target_offset);
 	char *vaddr;
@@ -382,7 +378,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
 				reloc->offset >> PAGE_SHIFT));
 	clflush_write32(vaddr + page_offset, lower_32_bits(delta));
 
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(obj)->gen >= 8) {
 		page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 
 		if (page_offset == 0) {
@@ -404,7 +400,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 				   struct eb_vmas *eb,
 				   struct drm_i915_gem_relocation_entry *reloc)
 {
-	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_object *target_obj;
 	struct drm_i915_gem_object *target_i915_obj;
 	struct i915_vma *target_vma;
@@ -423,7 +418,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
 	 * pipe_control writes because the gpu doesn't properly redirect them
 	 * through the ppgtt for non_secure batchbuffers. */
-	if (unlikely(IS_GEN6(dev) &&
+	if (unlikely(IS_GEN6(obj) &&
 	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
 		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
 				    PIN_GLOBAL);
@@ -465,7 +460,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 
 	/* Check that the relocation address is valid... */
 	if (unlikely(reloc->offset >
-		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
+		obj->base.size - (INTEL_INFO(obj)->gen >= 8 ? 8 : 4))) {
 		DRM_DEBUG("Relocation beyond object bounds: "
 			  "obj %p target %d offset %d size %d.\n",
 			  obj, reloc->target_handle,
@@ -673,7 +668,7 @@ need_reloc_mappable(struct i915_vma *vma)
 		return false;
 
 	/* See also use_cpu_reloc() */
-	if (HAS_LLC(vma->obj->base.dev))
+	if (HAS_LLC(vma->obj))
 		return false;
 
 	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a2b938ec01a7..b1a3a5270bad 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
 					 struct drm_i915_fence_reg *fence,
 					 bool enable)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int reg = fence_number(dev_priv, fence);
 
 	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct drm_i915_fence_reg *fence;
 	int ret;
 
@@ -433,7 +433,7 @@ bool
 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 {
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct drm_i915_private *dev_priv = to_i915(obj);
 		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
 
 		WARN_ON(!ggtt_vma ||
@@ -457,7 +457,7 @@ void
 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
 {
 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
-		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		struct drm_i915_private *dev_priv = to_i915(obj);
 		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
 		dev_priv->fence_regs[obj->fence_reg].pin_count--;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9f165feb54ae..627a4accc6e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3308,7 +3308,7 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
 		return ERR_PTR(-EINVAL);
 
-	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+	vma = kmem_cache_zalloc(to_i915(obj)->vmas, GFP_KERNEL);
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -3347,8 +3347,7 @@ struct i915_vma *
 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 				       const struct i915_ggtt_view *view)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index adde5d8c3c18..a261f60066f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -431,7 +431,7 @@ static bool can_migrate_page(struct drm_i915_gem_object *obj)
 
 static int do_migrate_page(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	int ret = 0;
 
 	if (!can_migrate_page(obj))
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index ea06da012d32..51da16d9eee0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -546,10 +546,8 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
 	if (obj->stolen) {
-		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
+		i915_gem_stolen_remove_node(to_i915(obj), obj->stolen);
 		kfree(obj->stolen);
 		obj->stolen = NULL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7410f6c962e7..bc4cb7f4fe80 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -122,10 +122,10 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 
-	if (INTEL_INFO(obj->base.dev)->gen >= 4)
+	if (INTEL_INFO(obj)->gen >= 4)
 		return true;
 
-	if (INTEL_INFO(obj->base.dev)->gen == 3) {
+	if (INTEL_INFO(obj)->gen == 3) {
 		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
 			return false;
 	} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32d9726e38b1..3692bdb0feeb 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -344,7 +344,7 @@ __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
 static int
 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+	struct drm_i915_private *dev_priv = to_i915(obj);
 	struct i915_mm_struct *mm;
 	int ret = 0;
 
@@ -368,7 +368,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
 		}
 
 		kref_init(&mm->kref);
-		mm->i915 = to_i915(obj->base.dev);
+		mm->i915 = to_i915(obj);
 
 		mm->mm = current->mm;
 		atomic_inc(&current->mm->mm_count);
@@ -417,7 +417,7 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
 
 	kref_put_mutex(&obj->userptr.mm->kref,
 		       __i915_mm_struct_free,
-		       &to_i915(obj->base.dev)->mm_lock);
+		       &to_i915(obj)->mm_lock);
 	obj->userptr.mm = NULL;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0d24494904ef..89811fbe723e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2083,7 +2083,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
-	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
+	if (HAS_LLC(ringbuf->obj) && !ringbuf->obj->stolen)
 		i915_gem_object_unpin_map(ringbuf->obj);
 	else
 		iounmap(ringbuf->virtual_start);
-- 
2.8.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2016-04-15 17:46 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-03-18 21:16 [PATCH 1/6] drm/i915: Rename the magic polymorphic macro __I915__ Chris Wilson
2016-03-18 21:16 ` [PATCH 2/6] drm/i915: Allow passing any known pointer to for_each_engine() Chris Wilson
2016-03-21 15:44   ` Dave Gordon
2016-03-18 21:16 ` [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object Chris Wilson
2016-03-21  9:47   ` Daniel Vetter
2016-03-21 13:01     ` Jani Nikula
2016-03-21 17:44       ` Daniel Vetter
2016-03-21  9:55   ` Tvrtko Ursulin
2016-03-21 10:04     ` Chris Wilson
2016-03-18 21:16 ` [PATCH 4/6] drm/i915: Use to_i915() instead of guc_to_i915() Chris Wilson
2016-03-22 10:55   ` Dave Gordon
2016-03-22 11:04     ` Chris Wilson
2016-03-18 21:16 ` [PATCH 5/6] drm/i915: Teach to_i915() how to extract drm_i915_private from requests Chris Wilson
2016-03-18 21:16 ` [PATCH 6/6] drm/i915: Teach to_i915() how to extract drm_i915_private from engines Chris Wilson
2016-03-21 12:13 ` ✗ Fi.CI.BAT: warning for series starting with [1/6] drm/i915: Rename the magic polymorphic macro __I915__ Patchwork
2016-04-15 17:45 Polymorphic to_i915() Chris Wilson
2016-04-15 17:46 ` [PATCH 3/6] drm/i915: Extend magic to_i915() to work with drm_i915_gem_object Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.