All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
@ 2022-09-23 14:31 Anshuman Gupta
  2022-09-23 17:02 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
                   ` (5 more replies)
  0 siblings, 6 replies; 15+ messages in thread
From: Anshuman Gupta @ 2022-09-23 14:31 UTC (permalink / raw)
  To: intel-gfx; +Cc: matthew.auld

We had already grabbed the rpm wakeref at obj destruction path,
but it also required to grab the wakeref when object moves.
When i915_gem_object_release_mmap_offset() gets called by
i915_ttm_move_notify(), it will release the mmap offset without
grabbing the wakeref. We want to avoid that therefore,
grab the wakreref at i915_ttm_unmap_virtual() accordingly.

While doing that also changed the lmem_userfault_lock from
mutex to spinlock, as spinlock widely used for list.

Also changed if (obj->userfault_count) to
GEM_BUG_ON(!obj->userfault_count).

Fixes: ad74457a6b5a ("drm/i915/dgfx: Release mmap on rpm suspend")
Suggested-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c | 19 +++++-------
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c  | 39 ++++++++++++++++--------
 drivers/gpu/drm/i915/gt/intel_gt.c       | 11 ++++++-
 drivers/gpu/drm/i915/gt/intel_gt_types.h |  2 +-
 4 files changed, 45 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 73d9eda1d6b7..9da561c19a47 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -557,11 +557,13 @@ void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *
 
 	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
 
-	if (obj->userfault_count) {
-		/* rpm wakeref provide exclusive access */
-		list_del(&obj->userfault_link);
-		obj->userfault_count = 0;
-	}
+	/*
+	 * We have exclusive access here via runtime suspend. All other callers
+	 * must first grab the rpm wakeref.
+	 */
+	GEM_BUG_ON(!obj->userfault_count);
+	list_del(&obj->userfault_link);
+	obj->userfault_count = 0;
 }
 
 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
@@ -587,13 +589,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 		spin_lock(&obj->mmo.lock);
 	}
 	spin_unlock(&obj->mmo.lock);
-
-	if (obj->userfault_count) {
-		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
-		list_del(&obj->userfault_link);
-		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
-		obj->userfault_count = 0;
-	}
 }
 
 static struct i915_mmap_offset *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index e3fc38dd5db0..0630eeca7316 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
 {
 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
-	intel_wakeref_t wakeref = 0;
 
 	if (bo->resource && likely(obj)) {
-		/* ttm_bo_release() already has dma_resv_lock */
-		if (i915_ttm_cpu_maps_iomem(bo->resource))
-			wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
-
 		__i915_gem_object_pages_fini(obj);
-
-		if (wakeref)
-			intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
-
 		i915_ttm_free_cached_io_rsgt(obj);
 	}
 }
@@ -1052,12 +1043,15 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
 		goto out_rpm;
 
-	/* ttm_bo_vm_reserve() already has dma_resv_lock */
+	/*
+	 * ttm_bo_vm_reserve() already has dma_resv_lock.
+	 * userfault_count is protected by dma_resv lock and rpm wakeref.
+	 */
 	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
 		obj->userfault_count = 1;
-		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+		spin_lock(to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
 		list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
-		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+		spin_unlock(to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
 	}
 
 	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
@@ -1123,7 +1117,28 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
 
 static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
 {
+	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+	intel_wakeref_t wakeref = 0;
+
+	assert_object_held_shared(obj);
+
+	if (i915_ttm_cpu_maps_iomem(bo->resource)) {
+		wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
+		/* userfault_count is protected by obj lock and rpm wakeref. */
+		if (obj->userfault_count) {
+			spin_lock(to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+			list_del(&obj->userfault_link);
+			spin_unlock(to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+			obj->userfault_count = 0;
+		}
+
+	}
+
 	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
+
+	if (wakeref)
+		intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index b367cfff48d5..1e63432d97bb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -41,7 +41,7 @@ void intel_gt_common_init_early(struct intel_gt *gt)
 	spin_lock_init(gt->irq_lock);
 
 	INIT_LIST_HEAD(&gt->lmem_userfault_list);
-	mutex_init(&gt->lmem_userfault_lock);
+	spin_lock_init(gt->lmem_userfault_lock);
 	INIT_LIST_HEAD(&gt->closed_vma);
 	spin_lock_init(&gt->closed_lock);
 
@@ -71,6 +71,10 @@ int intel_root_gt_init_early(struct drm_i915_private *i915)
 	if (!gt->irq_lock)
 		return -ENOMEM;
 
+	gt->lmem_userfault_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->lmem_userfault_lock), GFP_KERNEL);
+	if (!gt->lmem_userfault_lock)
+		return -ENOMEM;
+
 	intel_gt_common_init_early(gt);
 
 	return 0;
@@ -813,6 +817,11 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
 		gt->uncore = uncore;
 		gt->irq_lock = irq_lock;
 
+		gt->lmem_userfault_lock = drmm_kzalloc(&gt->i915->drm,
+						       sizeof(*gt->lmem_userfault_lock), GFP_KERNEL);
+		if (!gt->lmem_userfault_lock)
+			return -ENOMEM;
+
 		intel_gt_common_init_early(gt);
 	}
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 30003d68fd51..925775310b1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -153,7 +153,7 @@ struct intel_gt {
 	 *  but instead has exclusive access by virtue of all other accesses requiring
 	 *  holding the runtime pm wakeref.
 	 */
-	struct mutex lmem_userfault_lock;
+	spinlock_t *lmem_userfault_lock;
 	struct list_head lmem_userfault_list;
 
 	struct list_head closed_vma;
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-09-23 14:31 [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
@ 2022-09-23 17:02 ` Patchwork
  2022-09-23 17:02 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2022-09-23 17:02 UTC (permalink / raw)
  To: Anshuman Gupta; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
URL   : https://patchwork.freedesktop.org/series/108972/
State : warning

== Summary ==

Error: dim checkpatch failed
09fac79aedc6 drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
-:122: CHECK:BRACES: Blank lines aren't necessary before a close brace '}'
#122: FILE: drivers/gpu/drm/i915/gem/i915_gem_ttm.c:1136:
+
+	}

-:148: WARNING:LONG_LINE: line length of 105 exceeds 100 columns
#148: FILE: drivers/gpu/drm/i915/gt/intel_gt.c:74:
+	gt->lmem_userfault_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->lmem_userfault_lock), GFP_KERNEL);

-:160: WARNING:LONG_LINE: line length of 101 exceeds 100 columns
#160: FILE: drivers/gpu/drm/i915/gt/intel_gt.c:821:
+						       sizeof(*gt->lmem_userfault_lock), GFP_KERNEL);

-:176: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#176: FILE: drivers/gpu/drm/i915/gt/intel_gt_types.h:156:
+	spinlock_t *lmem_userfault_lock;

total: 0 errors, 2 warnings, 2 checks, 132 lines checked



^ permalink raw reply	[flat|nested] 15+ messages in thread

* [Intel-gfx] ✗ Fi.CI.SPARSE: warning for drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-09-23 14:31 [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
  2022-09-23 17:02 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
@ 2022-09-23 17:02 ` Patchwork
  2022-09-23 17:21 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2022-09-23 17:02 UTC (permalink / raw)
  To: Anshuman Gupta; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
URL   : https://patchwork.freedesktop.org/series/108972/
State : warning

== Summary ==

Error: dim sparse failed
Sparse version: v0.6.2
Fast mode used, each commit won't be checked separately.



^ permalink raw reply	[flat|nested] 15+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-09-23 14:31 [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
  2022-09-23 17:02 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
  2022-09-23 17:02 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
@ 2022-09-23 17:21 ` Patchwork
  2022-09-24  7:32 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2022-09-23 17:21 UTC (permalink / raw)
  To: Anshuman Gupta; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 3062 bytes --]

== Series Details ==

Series: drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
URL   : https://patchwork.freedesktop.org/series/108972/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_12175 -> Patchwork_108972v1
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/index.html

Participating hosts (46 -> 45)
------------------------------

  Missing    (1): fi-tgl-dsi 

Known issues
------------

  Here are the changes found in Patchwork_108972v1 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_suspend@basic-s3-without-i915:
    - fi-bdw-5557u:       [PASS][1] -> [INCOMPLETE][2] ([i915#146] / [i915#6712])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/fi-bdw-5557u/igt@i915_suspend@basic-s3-without-i915.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/fi-bdw-5557u/igt@i915_suspend@basic-s3-without-i915.html

  
#### Possible fixes ####

  * igt@gem_exec_suspend@basic-s3@smem:
    - {bat-adlm-1}:       [DMESG-WARN][3] ([i915#2867]) -> [PASS][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/bat-adlm-1/igt@gem_exec_suspend@basic-s3@smem.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/bat-adlm-1/igt@gem_exec_suspend@basic-s3@smem.html

  * igt@i915_selftest@live@gt_heartbeat:
    - fi-hsw-4770:        [DMESG-FAIL][5] -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/fi-hsw-4770/igt@i915_selftest@live@gt_heartbeat.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/fi-hsw-4770/igt@i915_selftest@live@gt_heartbeat.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [i915#146]: https://gitlab.freedesktop.org/drm/intel/issues/146
  [i915#2867]: https://gitlab.freedesktop.org/drm/intel/issues/2867
  [i915#4983]: https://gitlab.freedesktop.org/drm/intel/issues/4983
  [i915#5886]: https://gitlab.freedesktop.org/drm/intel/issues/5886
  [i915#6257]: https://gitlab.freedesktop.org/drm/intel/issues/6257
  [i915#6380]: https://gitlab.freedesktop.org/drm/intel/issues/6380
  [i915#6712]: https://gitlab.freedesktop.org/drm/intel/issues/6712
  [i915#6818]: https://gitlab.freedesktop.org/drm/intel/issues/6818


Build changes
-------------

  * Linux: CI_DRM_12175 -> Patchwork_108972v1

  CI-20190529: 20190529
  CI_DRM_12175: a916f2c47c5965886be7a023eb78e67470237fe3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6662: dcb1d7a8822e62935f4fe3f2e6a04caaee669369 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_108972v1: a916f2c47c5965886be7a023eb78e67470237fe3 @ git://anongit.freedesktop.org/gfx-ci/linux


### Linux commits

26360024b21d drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/index.html

[-- Attachment #2: Type: text/html, Size: 3392 bytes --]

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-09-23 14:31 [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
                   ` (2 preceding siblings ...)
  2022-09-23 17:21 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2022-09-24  7:32 ` Patchwork
  2022-09-26 16:22 ` [Intel-gfx] [PATCH] " Matthew Auld
  2022-10-21  9:55 ` [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm Anshuman Gupta
  5 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2022-09-24  7:32 UTC (permalink / raw)
  To: Anshuman Gupta; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 37834 bytes --]

== Series Details ==

Series: drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
URL   : https://patchwork.freedesktop.org/series/108972/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_12175_full -> Patchwork_108972v1_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Participating hosts (9 -> 10)
------------------------------

  Additional (1): shard-tglu 

Known issues
------------

  Here are the changes found in Patchwork_108972v1_full that come from known issues:

### CI changes ###

#### Possible fixes ####

  * boot:
    - shard-glk:          ([PASS][1], [PASS][2], [PASS][3], [PASS][4], [FAIL][5], [PASS][6], [PASS][7], [PASS][8], [PASS][9], [PASS][10], [PASS][11], [PASS][12], [PASS][13], [PASS][14], [PASS][15], [PASS][16], [PASS][17], [PASS][18], [PASS][19], [PASS][20], [PASS][21], [PASS][22], [PASS][23], [FAIL][24], [PASS][25]) ([i915#4392]) -> ([PASS][26], [PASS][27], [PASS][28], [PASS][29], [PASS][30], [PASS][31], [PASS][32], [PASS][33], [PASS][34], [PASS][35], [PASS][36], [PASS][37], [PASS][38], [PASS][39], [PASS][40], [PASS][41], [PASS][42], [PASS][43], [PASS][44], [PASS][45], [PASS][46], [PASS][47], [PASS][48], [PASS][49], [PASS][50])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk1/boot.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk1/boot.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk1/boot.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk2/boot.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk2/boot.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk2/boot.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk3/boot.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk3/boot.html
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk3/boot.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk5/boot.html
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk5/boot.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk5/boot.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk5/boot.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk6/boot.html
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk6/boot.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk6/boot.html
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk7/boot.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk7/boot.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk7/boot.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk8/boot.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk8/boot.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk8/boot.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk9/boot.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk9/boot.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk9/boot.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk1/boot.html
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk1/boot.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk1/boot.html
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk2/boot.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk2/boot.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk2/boot.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk3/boot.html
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk3/boot.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk3/boot.html
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk5/boot.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk5/boot.html
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk5/boot.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk6/boot.html
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk6/boot.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk6/boot.html
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk7/boot.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk7/boot.html
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk7/boot.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk8/boot.html
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk8/boot.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk8/boot.html
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk9/boot.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk9/boot.html
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk9/boot.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk9/boot.html

  

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_persistence@legacy-engines-cleanup:
    - shard-snb:          NOTRUN -> [SKIP][51] ([fdo#109271] / [i915#1099])
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-snb4/igt@gem_ctx_persistence@legacy-engines-cleanup.html

  * igt@gem_exec_balancer@parallel-out-fence:
    - shard-iclb:         [PASS][52] -> [SKIP][53] ([i915#4525])
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb4/igt@gem_exec_balancer@parallel-out-fence.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb3/igt@gem_exec_balancer@parallel-out-fence.html

  * igt@gem_exec_fair@basic-none-rrul@rcs0:
    - shard-glk:          NOTRUN -> [FAIL][54] ([i915#2842])
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk5/igt@gem_exec_fair@basic-none-rrul@rcs0.html

  * igt@gem_exec_fair@basic-none@vcs1:
    - shard-iclb:         NOTRUN -> [FAIL][55] ([i915#2842]) +1 similar issue
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb4/igt@gem_exec_fair@basic-none@vcs1.html

  * igt@gem_exec_fair@basic-throttle@rcs0:
    - shard-iclb:         [PASS][56] -> [FAIL][57] ([i915#2842]) +1 similar issue
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb3/igt@gem_exec_fair@basic-throttle@rcs0.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb8/igt@gem_exec_fair@basic-throttle@rcs0.html

  * igt@gem_exec_fence@syncobj-backward-timeline-chain-engines:
    - shard-snb:          NOTRUN -> [SKIP][58] ([fdo#109271]) +7 similar issues
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-snb4/igt@gem_exec_fence@syncobj-backward-timeline-chain-engines.html

  * igt@gem_exec_params@rsvd2-dirt:
    - shard-tglb:         NOTRUN -> [SKIP][59] ([fdo#109283])
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@gem_exec_params@rsvd2-dirt.html

  * igt@gem_lmem_swapping@heavy-verify-multi-ccs:
    - shard-tglb:         NOTRUN -> [SKIP][60] ([i915#4613]) +1 similar issue
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@gem_lmem_swapping@heavy-verify-multi-ccs.html
    - shard-glk:          NOTRUN -> [SKIP][61] ([fdo#109271] / [i915#4613]) +3 similar issues
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk1/igt@gem_lmem_swapping@heavy-verify-multi-ccs.html

  * igt@gem_lmem_swapping@verify-ccs:
    - shard-apl:          NOTRUN -> [SKIP][62] ([fdo#109271] / [i915#4613])
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl7/igt@gem_lmem_swapping@verify-ccs.html

  * igt@gem_mmap_gtt@coherency:
    - shard-iclb:         NOTRUN -> [SKIP][63] ([fdo#109292])
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb4/igt@gem_mmap_gtt@coherency.html

  * igt@gem_pwrite@basic-exhaustion:
    - shard-tglb:         NOTRUN -> [WARN][64] ([i915#2658])
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@gem_pwrite@basic-exhaustion.html
    - shard-glk:          NOTRUN -> [WARN][65] ([i915#2658])
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk1/igt@gem_pwrite@basic-exhaustion.html

  * igt@gem_render_copy@x-tiled-to-vebox-yf-tiled:
    - shard-apl:          NOTRUN -> [SKIP][66] ([fdo#109271]) +99 similar issues
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl7/igt@gem_render_copy@x-tiled-to-vebox-yf-tiled.html

  * igt@gen9_exec_parse@bb-start-out:
    - shard-tglb:         NOTRUN -> [SKIP][67] ([i915#2527] / [i915#2856])
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@gen9_exec_parse@bb-start-out.html

  * igt@i915_pm_rc6_residency@rc6-fence:
    - shard-tglb:         NOTRUN -> [WARN][68] ([i915#2681])
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@i915_pm_rc6_residency@rc6-fence.html

  * igt@kms_async_flips@alternate-sync-async-flip@pipe-a-edp-1:
    - shard-tglb:         [PASS][69] -> [FAIL][70] ([i915#2521])
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-tglb2/igt@kms_async_flips@alternate-sync-async-flip@pipe-a-edp-1.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb8/igt@kms_async_flips@alternate-sync-async-flip@pipe-a-edp-1.html

  * igt@kms_atomic@plane-primary-overlay-mutable-zpos:
    - shard-tglb:         NOTRUN -> [SKIP][71] ([i915#404])
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_atomic@plane-primary-overlay-mutable-zpos.html

  * igt@kms_big_fb@4-tiled-16bpp-rotate-90:
    - shard-tglb:         NOTRUN -> [SKIP][72] ([i915#5286])
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_big_fb@4-tiled-16bpp-rotate-90.html

  * igt@kms_big_fb@4-tiled-64bpp-rotate-90:
    - shard-iclb:         NOTRUN -> [SKIP][73] ([i915#5286]) +1 similar issue
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb4/igt@kms_big_fb@4-tiled-64bpp-rotate-90.html

  * igt@kms_big_fb@linear-16bpp-rotate-180:
    - shard-glk:          NOTRUN -> [DMESG-FAIL][74] ([i915#118] / [i915#1888])
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk1/igt@kms_big_fb@linear-16bpp-rotate-180.html

  * igt@kms_big_fb@x-tiled-32bpp-rotate-90:
    - shard-iclb:         NOTRUN -> [SKIP][75] ([fdo#110725] / [fdo#111614])
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb4/igt@kms_big_fb@x-tiled-32bpp-rotate-90.html

  * igt@kms_big_fb@yf-tiled-32bpp-rotate-180:
    - shard-tglb:         NOTRUN -> [SKIP][76] ([fdo#111615])
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_big_fb@yf-tiled-32bpp-rotate-180.html

  * igt@kms_ccs@pipe-a-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc:
    - shard-glk:          NOTRUN -> [SKIP][77] ([fdo#109271] / [i915#3886]) +3 similar issues
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk5/igt@kms_ccs@pipe-a-crc-sprite-planes-basic-y_tiled_gen12_rc_ccs_cc.html

  * igt@kms_ccs@pipe-c-bad-rotation-90-y_tiled_gen12_mc_ccs:
    - shard-tglb:         NOTRUN -> [SKIP][78] ([i915#3689] / [i915#3886]) +1 similar issue
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_ccs@pipe-c-bad-rotation-90-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-c-crc-primary-basic-y_tiled_gen12_mc_ccs:
    - shard-apl:          NOTRUN -> [SKIP][79] ([fdo#109271] / [i915#3886]) +1 similar issue
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl8/igt@kms_ccs@pipe-c-crc-primary-basic-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-d-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs:
    - shard-tglb:         NOTRUN -> [SKIP][80] ([i915#6095]) +1 similar issue
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_ccs@pipe-d-crc-sprite-planes-basic-4_tiled_dg2_mc_ccs.html

  * igt@kms_chamelium@hdmi-crc-nonplanar-formats:
    - shard-apl:          NOTRUN -> [SKIP][81] ([fdo#109271] / [fdo#111827]) +4 similar issues
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl2/igt@kms_chamelium@hdmi-crc-nonplanar-formats.html

  * igt@kms_chamelium@hdmi-hpd-after-suspend:
    - shard-tglb:         NOTRUN -> [SKIP][82] ([fdo#109284] / [fdo#111827]) +2 similar issues
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_chamelium@hdmi-hpd-after-suspend.html
    - shard-glk:          NOTRUN -> [SKIP][83] ([fdo#109271] / [fdo#111827]) +4 similar issues
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk1/igt@kms_chamelium@hdmi-hpd-after-suspend.html

  * igt@kms_content_protection@atomic-dpms:
    - shard-apl:          NOTRUN -> [TIMEOUT][84] ([i915#1319])
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl7/igt@kms_content_protection@atomic-dpms.html

  * igt@kms_content_protection@content_type_change:
    - shard-tglb:         NOTRUN -> [SKIP][85] ([i915#1063])
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_content_protection@content_type_change.html

  * igt@kms_cursor_crc@cursor-random-512x170:
    - shard-tglb:         NOTRUN -> [SKIP][86] ([i915#3359])
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_cursor_crc@cursor-random-512x170.html

  * igt@kms_cursor_crc@cursor-suspend@pipe-c-dp-1:
    - shard-apl:          [PASS][87] -> [DMESG-WARN][88] ([i915#180]) +2 similar issues
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-apl6/igt@kms_cursor_crc@cursor-suspend@pipe-c-dp-1.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl8/igt@kms_cursor_crc@cursor-suspend@pipe-c-dp-1.html

  * igt@kms_flip@2x-flip-vs-absolute-wf_vblank-interruptible:
    - shard-tglb:         NOTRUN -> [SKIP][89] ([fdo#109274] / [fdo#111825] / [i915#3637]) +2 similar issues
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_flip@2x-flip-vs-absolute-wf_vblank-interruptible.html

  * igt@kms_flip@2x-flip-vs-absolute-wf_vblank@bc-hdmi-a1-hdmi-a2:
    - shard-glk:          [PASS][90] -> [FAIL][91] ([i915#2122])
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk5/igt@kms_flip@2x-flip-vs-absolute-wf_vblank@bc-hdmi-a1-hdmi-a2.html
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk9/igt@kms_flip@2x-flip-vs-absolute-wf_vblank@bc-hdmi-a1-hdmi-a2.html

  * igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling@pipe-a-valid-mode:
    - shard-iclb:         NOTRUN -> [SKIP][92] ([i915#2587] / [i915#2672]) +3 similar issues
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb1/igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-32bpp-4tiledg2rcccs-downscaling@pipe-a-valid-mode.html

  * igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling@pipe-a-valid-mode:
    - shard-tglb:         NOTRUN -> [SKIP][93] ([i915#2587] / [i915#2672])
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_flip_scaled_crc@flip-32bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling@pipe-a-valid-mode.html

  * igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-downscaling@pipe-a-default-mode:
    - shard-iclb:         NOTRUN -> [SKIP][94] ([i915#3555]) +4 similar issues
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb2/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs-downscaling@pipe-a-default-mode.html

  * igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling@pipe-a-default-mode:
    - shard-iclb:         NOTRUN -> [SKIP][95] ([i915#2672]) +2 similar issues
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb2/igt@kms_flip_scaled_crc@flip-64bpp-4tile-to-32bpp-4tiledg2rcccs-upscaling@pipe-a-default-mode.html

  * igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling@pipe-a-valid-mode:
    - shard-iclb:         NOTRUN -> [SKIP][96] ([i915#2672] / [i915#3555])
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb1/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling@pipe-a-valid-mode.html

  * igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-move:
    - shard-iclb:         NOTRUN -> [SKIP][97] ([fdo#109280])
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-move.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc:
    - shard-tglb:         NOTRUN -> [SKIP][98] ([i915#6497]) +3 similar issues
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc.html

  * igt@kms_frontbuffer_tracking@fbcpsr-tiling-linear:
    - shard-glk:          NOTRUN -> [SKIP][99] ([fdo#109271]) +76 similar issues
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk1/igt@kms_frontbuffer_tracking@fbcpsr-tiling-linear.html

  * igt@kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-mmap-gtt:
    - shard-tglb:         NOTRUN -> [SKIP][100] ([fdo#109280] / [fdo#111825]) +4 similar issues
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-spr-indfb-draw-mmap-gtt.html

  * igt@kms_hdr@bpc-switch:
    - shard-tglb:         NOTRUN -> [SKIP][101] ([i915#3555])
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_hdr@bpc-switch.html

  * igt@kms_pipe_b_c_ivb@disable-pipe-b-enable-pipe-c:
    - shard-tglb:         NOTRUN -> [SKIP][102] ([fdo#109289])
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_pipe_b_c_ivb@disable-pipe-b-enable-pipe-c.html

  * igt@kms_plane_alpha_blend@pipe-c-alpha-basic:
    - shard-glk:          NOTRUN -> [FAIL][103] ([fdo#108145] / [i915#265]) +1 similar issue
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk5/igt@kms_plane_alpha_blend@pipe-c-alpha-basic.html

  * igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb:
    - shard-apl:          NOTRUN -> [FAIL][104] ([fdo#108145] / [i915#265])
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl7/igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb.html

  * igt@kms_plane_lowres@tiling-x@pipe-c-edp-1:
    - shard-tglb:         NOTRUN -> [SKIP][105] ([i915#3536]) +3 similar issues
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_plane_lowres@tiling-x@pipe-c-edp-1.html

  * igt@kms_plane_scaling@planes-downscale-factor-0-25@pipe-c-edp-1:
    - shard-tglb:         NOTRUN -> [SKIP][106] ([i915#5235]) +3 similar issues
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_plane_scaling@planes-downscale-factor-0-25@pipe-c-edp-1.html

  * igt@kms_psr2_sf@overlay-plane-move-continuous-exceed-sf:
    - shard-apl:          NOTRUN -> [SKIP][107] ([fdo#109271] / [i915#658])
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl7/igt@kms_psr2_sf@overlay-plane-move-continuous-exceed-sf.html

  * igt@kms_psr2_sf@overlay-plane-update-sf-dmg-area:
    - shard-glk:          NOTRUN -> [SKIP][108] ([fdo#109271] / [i915#658]) +1 similar issue
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk5/igt@kms_psr2_sf@overlay-plane-update-sf-dmg-area.html

  * igt@kms_psr2_sf@primary-plane-update-sf-dmg-area:
    - shard-tglb:         NOTRUN -> [SKIP][109] ([i915#2920])
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area.html

  * igt@kms_psr@psr2_primary_mmap_cpu:
    - shard-iclb:         [PASS][110] -> [SKIP][111] ([fdo#109441]) +1 similar issue
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb2/igt@kms_psr@psr2_primary_mmap_cpu.html
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb1/igt@kms_psr@psr2_primary_mmap_cpu.html

  * igt@kms_psr@psr2_sprite_plane_move:
    - shard-tglb:         NOTRUN -> [FAIL][112] ([i915#132] / [i915#3467])
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@kms_psr@psr2_sprite_plane_move.html

  * igt@kms_vblank@pipe-d-query-busy:
    - shard-iclb:         NOTRUN -> [SKIP][113] ([fdo#109278]) +3 similar issues
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb4/igt@kms_vblank@pipe-d-query-busy.html

  * igt@kms_vblank@pipe-d-wait-idle:
    - shard-apl:          NOTRUN -> [SKIP][114] ([fdo#109271] / [i915#533])
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl7/igt@kms_vblank@pipe-d-wait-idle.html

  * igt@kms_writeback@writeback-check-output:
    - shard-apl:          NOTRUN -> [SKIP][115] ([fdo#109271] / [i915#2437])
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl2/igt@kms_writeback@writeback-check-output.html

  * igt@prime_nv_test@i915_import_cpu_mmap:
    - shard-tglb:         NOTRUN -> [SKIP][116] ([fdo#109291])
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb1/igt@prime_nv_test@i915_import_cpu_mmap.html

  * igt@sysfs_clients@fair-3:
    - shard-apl:          NOTRUN -> [SKIP][117] ([fdo#109271] / [i915#2994])
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl8/igt@sysfs_clients@fair-3.html

  
#### Possible fixes ####

  * igt@gem_exec_balancer@parallel-contexts:
    - shard-iclb:         [SKIP][118] ([i915#4525]) -> [PASS][119] +2 similar issues
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb5/igt@gem_exec_balancer@parallel-contexts.html
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb2/igt@gem_exec_balancer@parallel-contexts.html

  * igt@gem_exec_fair@basic-pace-share@rcs0:
    - shard-tglb:         [FAIL][120] ([i915#2842]) -> [PASS][121]
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-tglb2/igt@gem_exec_fair@basic-pace-share@rcs0.html
   [121]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb2/igt@gem_exec_fair@basic-pace-share@rcs0.html

  * igt@gem_exec_fair@basic-pace-solo@rcs0:
    - shard-apl:          [FAIL][122] ([i915#2842]) -> [PASS][123]
   [122]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-apl6/igt@gem_exec_fair@basic-pace-solo@rcs0.html
   [123]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl8/igt@gem_exec_fair@basic-pace-solo@rcs0.html

  * igt@gem_exec_fair@basic-pace@rcs0:
    - shard-glk:          [FAIL][124] ([i915#2842]) -> [PASS][125]
   [124]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-glk5/igt@gem_exec_fair@basic-pace@rcs0.html
   [125]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-glk7/igt@gem_exec_fair@basic-pace@rcs0.html

  * igt@gem_huc_copy@huc-copy:
    - shard-tglb:         [SKIP][126] ([i915#2190]) -> [PASS][127]
   [126]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-tglb7/igt@gem_huc_copy@huc-copy.html
   [127]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb5/igt@gem_huc_copy@huc-copy.html

  * igt@gem_workarounds@suspend-resume:
    - shard-apl:          [DMESG-WARN][128] ([i915#180]) -> [PASS][129] +3 similar issues
   [128]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-apl2/igt@gem_workarounds@suspend-resume.html
   [129]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-apl8/igt@gem_workarounds@suspend-resume.html

  * igt@i915_pm_rpm@basic-pci-d3-state:
    - shard-iclb:         [INCOMPLETE][130] -> [PASS][131]
   [130]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb4/igt@i915_pm_rpm@basic-pci-d3-state.html
   [131]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb4/igt@i915_pm_rpm@basic-pci-d3-state.html

  * igt@i915_selftest@live@gem:
    - shard-snb:          [INCOMPLETE][132] -> [PASS][133]
   [132]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-snb6/igt@i915_selftest@live@gem.html
   [133]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-snb4/igt@i915_selftest@live@gem.html

  * igt@i915_selftest@live@hangcheck:
    - shard-tglb:         [DMESG-WARN][134] ([i915#5591]) -> [PASS][135]
   [134]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-tglb5/igt@i915_selftest@live@hangcheck.html
   [135]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-tglb5/igt@i915_selftest@live@hangcheck.html

  * igt@kms_cursor_legacy@cursor-vs-flip@toggle:
    - shard-iclb:         [FAIL][136] ([i915#5072]) -> [PASS][137]
   [136]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb7/igt@kms_cursor_legacy@cursor-vs-flip@toggle.html
   [137]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb4/igt@kms_cursor_legacy@cursor-vs-flip@toggle.html

  * igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-5@pipe-c-edp-1:
    - shard-iclb:         [SKIP][138] ([i915#5235]) -> [PASS][139] +2 similar issues
   [138]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb2/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-5@pipe-c-edp-1.html
   [139]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb1/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-5@pipe-c-edp-1.html

  * igt@kms_psr@psr2_cursor_mmap_gtt:
    - shard-iclb:         [SKIP][140] ([fdo#109441]) -> [PASS][141] +3 similar issues
   [140]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb6/igt@kms_psr@psr2_cursor_mmap_gtt.html
   [141]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_gtt.html

  
#### Warnings ####

  * igt@gem_exec_balancer@parallel-ordering:
    - shard-iclb:         [FAIL][142] ([i915#6117]) -> [SKIP][143] ([i915#4525])
   [142]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb1/igt@gem_exec_balancer@parallel-ordering.html
   [143]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb8/igt@gem_exec_balancer@parallel-ordering.html

  * igt@i915_pm_rc6_residency@rc6-idle@rcs0:
    - shard-iclb:         [WARN][144] ([i915#2684]) -> [FAIL][145] ([i915#2684])
   [144]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb2/igt@i915_pm_rc6_residency@rc6-idle@rcs0.html
   [145]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb1/igt@i915_pm_rc6_residency@rc6-idle@rcs0.html

  * igt@kms_psr2_sf@cursor-plane-move-continuous-exceed-fully-sf:
    - shard-iclb:         [SKIP][146] ([i915#2920]) -> [SKIP][147] ([i915#658])
   [146]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb2/igt@kms_psr2_sf@cursor-plane-move-continuous-exceed-fully-sf.html
   [147]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb1/igt@kms_psr2_sf@cursor-plane-move-continuous-exceed-fully-sf.html

  * igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area:
    - shard-iclb:         [SKIP][148] ([fdo#111068] / [i915#658]) -> [SKIP][149] ([i915#2920])
   [148]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb5/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area.html
   [149]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb2/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area.html

  * igt@kms_psr2_su@page_flip-p010:
    - shard-iclb:         [FAIL][150] ([i915#5939]) -> [SKIP][151] ([fdo#109642] / [fdo#111068] / [i915#658])
   [150]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_12175/shard-iclb2/igt@kms_psr2_su@page_flip-p010.html
   [151]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/shard-iclb4/igt@kms_psr2_su@page_flip-p010.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109274]: https://bugs.freedesktop.org/show_bug.cgi?id=109274
  [fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
  [fdo#109279]: https://bugs.freedesktop.org/show_bug.cgi?id=109279
  [fdo#109280]: https://bugs.freedesktop.org/show_bug.cgi?id=109280
  [fdo#109283]: https://bugs.freedesktop.org/show_bug.cgi?id=109283
  [fdo#109284]: https://bugs.freedesktop.org/show_bug.cgi?id=109284
  [fdo#109285]: https://bugs.freedesktop.org/show_bug.cgi?id=109285
  [fdo#109289]: https://bugs.freedesktop.org/show_bug.cgi?id=109289
  [fdo#109291]: https://bugs.freedesktop.org/show_bug.cgi?id=109291
  [fdo#109292]: https://bugs.freedesktop.org/show_bug.cgi?id=109292
  [fdo#109295]: https://bugs.freedesktop.org/show_bug.cgi?id=109295
  [fdo#109302]: https://bugs.freedesktop.org/show_bug.cgi?id=109302
  [fdo#109303]: https://bugs.freedesktop.org/show_bug.cgi?id=109303
  [fdo#109307]: https://bugs.freedesktop.org/show_bug.cgi?id=109307
  [fdo#109312]: https://bugs.freedesktop.org/show_bug.cgi?id=109312
  [fdo#109313]: https://bugs.freedesktop.org/show_bug.cgi?id=109313
  [fdo#109314]: https://bugs.freedesktop.org/show_bug.cgi?id=109314
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109506]: https://bugs.freedesktop.org/show_bug.cgi?id=109506
  [fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
  [fdo#110189]: https://bugs.freedesktop.org/show_bug.cgi?id=110189
  [fdo#110542]: https://bugs.freedesktop.org/show_bug.cgi?id=110542
  [fdo#110725]: https://bugs.freedesktop.org/show_bug.cgi?id=110725
  [fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
  [fdo#111614]: https://bugs.freedesktop.org/show_bug.cgi?id=111614
  [fdo#111615]: https://bugs.freedesktop.org/show_bug.cgi?id=111615
  [fdo#111644]: https://bugs.freedesktop.org/show_bug.cgi?id=111644
  [fdo#111656]: https://bugs.freedesktop.org/show_bug.cgi?id=111656
  [fdo#111825]: https://bugs.freedesktop.org/show_bug.cgi?id=111825
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [fdo#112054]: https://bugs.freedesktop.org/show_bug.cgi?id=112054
  [fdo#112283]: https://bugs.freedesktop.org/show_bug.cgi?id=112283
  [i915#1063]: https://gitlab.freedesktop.org/drm/intel/issues/1063
  [i915#1099]: https://gitlab.freedesktop.org/drm/intel/issues/1099
  [i915#1155]: https://gitlab.freedesktop.org/drm/intel/issues/1155
  [i915#118]: https://gitlab.freedesktop.org/drm/intel/issues/118
  [i915#1319]: https://gitlab.freedesktop.org/drm/intel/issues/1319
  [i915#132]: https://gitlab.freedesktop.org/drm/intel/issues/132
  [i915#1397]: https://gitlab.freedesktop.org/drm/intel/issues/1397
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#1839]: https://gitlab.freedesktop.org/drm/intel/issues/1839
  [i915#1888]: https://gitlab.freedesktop.org/drm/intel/issues/1888
  [i915#1902]: https://gitlab.freedesktop.org/drm/intel/issues/1902
  [i915#2122]: https://gitlab.freedesktop.org/drm/intel/issues/2122
  [i915#2190]: https://gitlab.freedesktop.org/drm/intel/issues/2190
  [i915#2410]: https://gitlab.freedesktop.org/drm/intel/issues/2410
  [i915#2437]: https://gitlab.freedesktop.org/drm/intel/issues/2437
  [i915#2521]: https://gitlab.freedesktop.org/drm/intel/issues/2521
  [i915#2527]: https://gitlab.freedesktop.org/drm/intel/issues/2527
  [i915#2530]: https://gitlab.freedesktop.org/drm/intel/issues/2530
  [i915#2587]: https://gitlab.freedesktop.org/drm/intel/issues/2587
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#2658]: https://gitlab.freedesktop.org/drm/intel/issues/2658
  [i915#2672]: https://gitlab.freedesktop.org/drm/intel/issues/2672
  [i915#2681]: https://gitlab.freedesktop.org/drm/intel/issues/2681
  [i915#2684]: https://gitlab.freedesktop.org/drm/intel/issues/2684
  [i915#2705]: https://gitlab.freedesktop.org/drm/intel/issues/2705
  [i915#280]: https://gitlab.freedesktop.org/drm/intel/issues/280
  [i915#284]: https://gitlab.freedesktop.org/drm/intel/issues/284
  [i915#2842]: https://gitlab.freedesktop.org/drm/intel/issues/2842
  [i915#2856]: https://gitlab.freedesktop.org/drm/intel/issues/2856
  [i915#2920]: https://gitlab.freedesktop.org/drm/intel/issues/2920
  [i915#2994]: https://gitlab.freedesktop.org/drm/intel/issues/2994
  [i915#3002]: https://gitlab.freedesktop.org/drm/intel/issues/3002
  [i915#3116]: https://gitlab.freedesktop.org/drm/intel/issues/3116
  [i915#315]: https://gitlab.freedesktop.org/drm/intel/issues/315
  [i915#3297]: https://gitlab.freedesktop.org/drm/intel/issues/3297
  [i915#3299]: https://gitlab.freedesktop.org/drm/intel/issues/3299
  [i915#3301]: https://gitlab.freedesktop.org/drm/intel/issues/3301
  [i915#3318]: https://gitlab.freedesktop.org/drm/intel/issues/3318
  [i915#3359]: https://gitlab.freedesktop.org/drm/intel/issues/3359
  [i915#3467]: https://gitlab.freedesktop.org/drm/intel/issues/3467
  [i915#3469]: https://gitlab.freedesktop.org/drm/intel/issues/3469
  [i915#3536]: https://gitlab.freedesktop.org/drm/intel/issues/3536
  [i915#3555]: https://gitlab.freedesktop.org/drm/intel/issues/3555
  [i915#3637]: https://gitlab.freedesktop.org/drm/intel/issues/3637
  [i915#3689]: https://gitlab.freedesktop.org/drm/intel/issues/3689
  [i915#3742]: https://gitlab.freedesktop.org/drm/intel/issues/3742
  [i915#3804]: https://gitlab.freedesktop.org/drm/intel/issues/3804
  [i915#3826]: https://gitlab.freedesktop.org/drm/intel/issues/3826
  [i915#3840]: https://gitlab.freedesktop.org/drm/intel/issues/3840
  [i915#3886]: https://gitlab.freedesktop.org/drm/intel/issues/3886
  [i915#404]: https://gitlab.freedesktop.org/drm/intel/issues/404
  [i915#4103]: https://gitlab.freedesktop.org/drm/intel/issues/4103
  [i915#426]: https://gitlab.freedesktop.org/drm/intel/issues/426
  [i915#4270]: https://gitlab.freedesktop.org/drm/intel/issues/4270
  [i915#4312]: https://gitlab.freedesktop.org/drm/intel/issues/4312
  [i915#4392]: https://gitlab.freedesktop.org/drm/intel/issues/4392
  [i915#4525]: https://gitlab.freedesktop.org/drm/intel/issues/4525
  [i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
  [i915#4877]: https://gitlab.freedesktop.org/drm/intel/issues/4877
  [i915#4991]: https://gitlab.freedesktop.org/drm/intel/issues/4991
  [i915#5072]: https://gitlab.freedesktop.org/drm/intel/issues/5072
  [i915#5176]: https://gitlab.freedesktop.org/drm/intel/issues/5176
  [i915#5235]: https://gitlab.freedesktop.org/drm/intel/issues/5235
  [i915#5286]: https://gitlab.freedesktop.org/drm/intel/issues/5286
  [i915#5288]: https://gitlab.freedesktop.org/drm/intel/issues/5288
  [i915#5289]: https://gitlab.freedesktop.org/drm/intel/issues/5289
  [i915#5325]: https://gitlab.freedesktop.org/drm/intel/issues/5325
  [i915#533]: https://gitlab.freedesktop.org/drm/intel/issues/533
  [i915#5439]: https://gitlab.freedesktop.org/drm/intel/issues/5439
  [i915#5461]: https://gitlab.freedesktop.org/drm/intel/issues/5461
  [i915#5591]: https://gitlab.freedesktop.org/drm/intel/issues/5591
  [i915#5723]: https://gitlab.freedesktop.org/drm/intel/issues/5723
  [i915#5939]: https://gitlab.freedesktop.org/drm/intel/issues/5939
  [i915#6095]: https://gitlab.freedesktop.org/drm/intel/issues/6095
  [i915#6117]: https://gitlab.freedesktop.org/drm/intel/issues/6117
  [i915#6227]: https://gitlab.freedesktop.org/drm/intel/issues/6227
  [i915#6230]: https://gitlab.freedesktop.org/drm/intel/issues/6230
  [i915#6245]: https://gitlab.freedesktop.org/drm/intel/issues/6245
  [i915#6301]: https://gitlab.freedesktop.org/drm/intel/issues/6301
  [i915#6334]: https://gitlab.freedesktop.org/drm/intel/issues/6334
  [i915#6344]: https://gitlab.freedesktop.org/drm/intel/issues/6344
  [i915#6412]: https://gitlab.freedesktop.org/drm/intel/issues/6412
  [i915#6433]: https://gitlab.freedesktop.org/drm/intel/issues/6433
  [i915#6497]: https://gitlab.freedesktop.org/drm/intel/issues/6497
  [i915#6524]: https://gitlab.freedesktop.org/drm/intel/issues/6524
  [i915#658]: https://gitlab.freedesktop.org/drm/intel/issues/658
  [i915#6590]: https://gitlab.freedesktop.org/drm/intel/issues/6590


Build changes
-------------

  * Linux: CI_DRM_12175 -> Patchwork_108972v1

  CI-20190529: 20190529
  CI_DRM_12175: a916f2c47c5965886be7a023eb78e67470237fe3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6662: dcb1d7a8822e62935f4fe3f2e6a04caaee669369 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_108972v1: a916f2c47c5965886be7a023eb78e67470237fe3 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_108972v1/index.html

[-- Attachment #2: Type: text/html, Size: 40017 bytes --]

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-09-23 14:31 [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
                   ` (3 preceding siblings ...)
  2022-09-24  7:32 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
@ 2022-09-26 16:22 ` Matthew Auld
  2022-09-27 12:30   ` Gupta, Anshuman
  2022-10-21  9:55 ` [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm Anshuman Gupta
  5 siblings, 1 reply; 15+ messages in thread
From: Matthew Auld @ 2022-09-26 16:22 UTC (permalink / raw)
  To: Anshuman Gupta, intel-gfx

On 23/09/2022 15:31, Anshuman Gupta wrote:
> We had already grabbed the rpm wakeref at obj destruction path,
> but it also required to grab the wakeref when object moves.
> When i915_gem_object_release_mmap_offset() gets called by
> i915_ttm_move_notify(), it will release the mmap offset without
> grabbing the wakeref. We want to avoid that therefore,
> grab the wakreref at i915_ttm_unmap_virtual() accordingly.
> 
> While doing that also changed the lmem_userfault_lock from
> mutex to spinlock, as spinlock widely used for list.
> 
> Also changed if (obj->userfault_count) to
> GEM_BUG_ON(!obj->userfault_count).
> 
> Fixes: ad74457a6b5a ("drm/i915/dgfx: Release mmap on rpm suspend")
> Suggested-by: Matthew Auld <matthew.auld@intel.com>
> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_mman.c | 19 +++++-------
>   drivers/gpu/drm/i915/gem/i915_gem_ttm.c  | 39 ++++++++++++++++--------
>   drivers/gpu/drm/i915/gt/intel_gt.c       | 11 ++++++-
>   drivers/gpu/drm/i915/gt/intel_gt_types.h |  2 +-
>   4 files changed, 45 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index 73d9eda1d6b7..9da561c19a47 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -557,11 +557,13 @@ void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *
>   
>   	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
>   
> -	if (obj->userfault_count) {
> -		/* rpm wakeref provide exclusive access */
> -		list_del(&obj->userfault_link);
> -		obj->userfault_count = 0;
> -	}
> +	/*
> +	 * We have exclusive access here via runtime suspend. All other callers
> +	 * must first grab the rpm wakeref.
> +	 */
> +	GEM_BUG_ON(!obj->userfault_count);
> +	list_del(&obj->userfault_link);
> +	obj->userfault_count = 0;
>   }
>   
>   void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
> @@ -587,13 +589,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
>   		spin_lock(&obj->mmo.lock);
>   	}
>   	spin_unlock(&obj->mmo.lock);
> -
> -	if (obj->userfault_count) {
> -		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> -		list_del(&obj->userfault_link);
> -		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> -		obj->userfault_count = 0;
> -	}
>   }
>   
>   static struct i915_mmap_offset *
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> index e3fc38dd5db0..0630eeca7316 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> @@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
>   static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
>   {
>   	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
> -	intel_wakeref_t wakeref = 0;
>   
>   	if (bo->resource && likely(obj)) {
> -		/* ttm_bo_release() already has dma_resv_lock */
> -		if (i915_ttm_cpu_maps_iomem(bo->resource))
> -			wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
> -
>   		__i915_gem_object_pages_fini(obj);
> -
> -		if (wakeref)
> -			intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
> -
>   		i915_ttm_free_cached_io_rsgt(obj);
>   	}
>   }
> @@ -1052,12 +1043,15 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
>   	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
>   		goto out_rpm;
>   
> -	/* ttm_bo_vm_reserve() already has dma_resv_lock */
> +	/*
> +	 * ttm_bo_vm_reserve() already has dma_resv_lock.
> +	 * userfault_count is protected by dma_resv lock and rpm wakeref.
> +	 */
>   	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
>   		obj->userfault_count = 1;
> -		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> +		spin_lock(to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
>   		list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
> -		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> +		spin_unlock(to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
>   	}
>   
>   	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> @@ -1123,7 +1117,28 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
>   
>   static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
>   {
> +	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
> +	intel_wakeref_t wakeref = 0;
> +
> +	assert_object_held_shared(obj);
> +
> +	if (i915_ttm_cpu_maps_iomem(bo->resource)) {
> +		wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
> +
> +		/* userfault_count is protected by obj lock and rpm wakeref. */
> +		if (obj->userfault_count) {
> +			spin_lock(to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> +			list_del(&obj->userfault_link);
> +			spin_unlock(to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> +			obj->userfault_count = 0;
> +		}
> +
> +	}
> +
>   	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
> +
> +	if (wakeref)
> +		intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
>   }
>   
>   static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
> index b367cfff48d5..1e63432d97bb 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt.c
> @@ -41,7 +41,7 @@ void intel_gt_common_init_early(struct intel_gt *gt)
>   	spin_lock_init(gt->irq_lock);
>   
>   	INIT_LIST_HEAD(&gt->lmem_userfault_list);
> -	mutex_init(&gt->lmem_userfault_lock);
> +	spin_lock_init(gt->lmem_userfault_lock);
>   	INIT_LIST_HEAD(&gt->closed_vma);
>   	spin_lock_init(&gt->closed_lock);
>   
> @@ -71,6 +71,10 @@ int intel_root_gt_init_early(struct drm_i915_private *i915)
>   	if (!gt->irq_lock)
>   		return -ENOMEM;
>   
> +	gt->lmem_userfault_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->lmem_userfault_lock), GFP_KERNEL);
> +	if (!gt->lmem_userfault_lock)
> +		return -ENOMEM;
> +
>   	intel_gt_common_init_early(gt);
>   
>   	return 0;
> @@ -813,6 +817,11 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
>   		gt->uncore = uncore;
>   		gt->irq_lock = irq_lock;
>   
> +		gt->lmem_userfault_lock = drmm_kzalloc(&gt->i915->drm,
> +						       sizeof(*gt->lmem_userfault_lock), GFP_KERNEL);
> +		if (!gt->lmem_userfault_lock)
> +			return -ENOMEM;
> +
>   		intel_gt_common_init_early(gt);
>   	}
>   
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> index 30003d68fd51..925775310b1e 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> @@ -153,7 +153,7 @@ struct intel_gt {
>   	 *  but instead has exclusive access by virtue of all other accesses requiring
>   	 *  holding the runtime pm wakeref.
>   	 */
> -	struct mutex lmem_userfault_lock;
> +	spinlock_t *lmem_userfault_lock;
>   	struct list_head lmem_userfault_list;

It looks like there were some comments off list about this. It doesn't 
look like runtime pm is really per gt, so maybe just stick all this in 
i915? Or was there some other reason for putting this in gt?

>   
>   	struct list_head closed_vma;

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-09-26 16:22 ` [Intel-gfx] [PATCH] " Matthew Auld
@ 2022-09-27 12:30   ` Gupta, Anshuman
  2022-09-27 12:45     ` Matthew Auld
  0 siblings, 1 reply; 15+ messages in thread
From: Gupta, Anshuman @ 2022-09-27 12:30 UTC (permalink / raw)
  To: Auld, Matthew, intel-gfx



> -----Original Message-----
> From: Auld, Matthew <matthew.auld@intel.com>
> Sent: Monday, September 26, 2022 9:52 PM
> To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
> gfx@lists.freedesktop.org
> Cc: joonas.lahtinen@linux.intel.com; tvrtko.ursulin@linux.intel.com
> Subject: Re: [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
> 
> On 23/09/2022 15:31, Anshuman Gupta wrote:
> > We had already grabbed the rpm wakeref at obj destruction path, but it
> > also required to grab the wakeref when object moves.
> > When i915_gem_object_release_mmap_offset() gets called by
> > i915_ttm_move_notify(), it will release the mmap offset without
> > grabbing the wakeref. We want to avoid that therefore, grab the
> > wakreref at i915_ttm_unmap_virtual() accordingly.
> >
> > While doing that also changed the lmem_userfault_lock from mutex to
> > spinlock, as spinlock widely used for list.
> >
> > Also changed if (obj->userfault_count) to
> > GEM_BUG_ON(!obj->userfault_count).
> >
> > Fixes: ad74457a6b5a ("drm/i915/dgfx: Release mmap on rpm suspend")
> > Suggested-by: Matthew Auld <matthew.auld@intel.com>
> > Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gem/i915_gem_mman.c | 19 +++++-------
> >   drivers/gpu/drm/i915/gem/i915_gem_ttm.c  | 39 ++++++++++++++++--------
> >   drivers/gpu/drm/i915/gt/intel_gt.c       | 11 ++++++-
> >   drivers/gpu/drm/i915/gt/intel_gt_types.h |  2 +-
> >   4 files changed, 45 insertions(+), 26 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > index 73d9eda1d6b7..9da561c19a47 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > @@ -557,11 +557,13 @@ void
> > i915_gem_object_runtime_pm_release_mmap_offset(struct
> > drm_i915_gem_object *
> >
> >   	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
> >
> > -	if (obj->userfault_count) {
> > -		/* rpm wakeref provide exclusive access */
> > -		list_del(&obj->userfault_link);
> > -		obj->userfault_count = 0;
> > -	}
> > +	/*
> > +	 * We have exclusive access here via runtime suspend. All other callers
> > +	 * must first grab the rpm wakeref.
> > +	 */
> > +	GEM_BUG_ON(!obj->userfault_count);
> > +	list_del(&obj->userfault_link);
> > +	obj->userfault_count = 0;
> >   }
> >
> >   void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object
> > *obj) @@ -587,13 +589,6 @@ void
> i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
> >   		spin_lock(&obj->mmo.lock);
> >   	}
> >   	spin_unlock(&obj->mmo.lock);
> > -
> > -	if (obj->userfault_count) {
> > -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > -		list_del(&obj->userfault_link);
> > -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > -		obj->userfault_count = 0;
> > -	}
> >   }
> >
> >   static struct i915_mmap_offset *
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > index e3fc38dd5db0..0630eeca7316 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > @@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct
> drm_i915_gem_object *obj, unsigned int flags)
> >   static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
> >   {
> >   	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
> > -	intel_wakeref_t wakeref = 0;
> >
> >   	if (bo->resource && likely(obj)) {
> > -		/* ttm_bo_release() already has dma_resv_lock */
> > -		if (i915_ttm_cpu_maps_iomem(bo->resource))
> > -			wakeref = intel_runtime_pm_get(&to_i915(obj-
> >base.dev)->runtime_pm);
> > -
> >   		__i915_gem_object_pages_fini(obj);
> > -
> > -		if (wakeref)
> > -			intel_runtime_pm_put(&to_i915(obj->base.dev)-
> >runtime_pm, wakeref);
> > -
> >   		i915_ttm_free_cached_io_rsgt(obj);
> >   	}
> >   }
> > @@ -1052,12 +1043,15 @@ static vm_fault_t vm_fault_ttm(struct vm_fault
> *vmf)
> >   	if (ret == VM_FAULT_RETRY && !(vmf->flags &
> FAULT_FLAG_RETRY_NOWAIT))
> >   		goto out_rpm;
> >
> > -	/* ttm_bo_vm_reserve() already has dma_resv_lock */
> > +	/*
> > +	 * ttm_bo_vm_reserve() already has dma_resv_lock.
> > +	 * userfault_count is protected by dma_resv lock and rpm wakeref.
> > +	 */
> >   	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
> >   		obj->userfault_count = 1;
> > -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > +		spin_lock(to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> >   		list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_list);
> > -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > +		spin_unlock(to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> >   	}
> >
> >   	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> > @@ -1123,7 +1117,28 @@ static u64 i915_ttm_mmap_offset(struct
> > drm_i915_gem_object *obj)
> >
> >   static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
> >   {
> > +	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
> > +	intel_wakeref_t wakeref = 0;
> > +
> > +	assert_object_held_shared(obj);
> > +
> > +	if (i915_ttm_cpu_maps_iomem(bo->resource)) {
> > +		wakeref =
> > +intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
> > +
> > +		/* userfault_count is protected by obj lock and rpm wakeref. */
> > +		if (obj->userfault_count) {
> > +			spin_lock(to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > +			list_del(&obj->userfault_link);
> > +			spin_unlock(to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > +			obj->userfault_count = 0;
> > +		}
> > +
> > +	}
> > +
> >   	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
> > +
> > +	if (wakeref)
> > +		intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm,
> wakeref);
> >   }
> >
> >   static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
> > diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c
> > b/drivers/gpu/drm/i915/gt/intel_gt.c
> > index b367cfff48d5..1e63432d97bb 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_gt.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_gt.c
> > @@ -41,7 +41,7 @@ void intel_gt_common_init_early(struct intel_gt *gt)
> >   	spin_lock_init(gt->irq_lock);
> >
> >   	INIT_LIST_HEAD(&gt->lmem_userfault_list);
> > -	mutex_init(&gt->lmem_userfault_lock);
> > +	spin_lock_init(gt->lmem_userfault_lock);
> >   	INIT_LIST_HEAD(&gt->closed_vma);
> >   	spin_lock_init(&gt->closed_lock);
> >
> > @@ -71,6 +71,10 @@ int intel_root_gt_init_early(struct drm_i915_private
> *i915)
> >   	if (!gt->irq_lock)
> >   		return -ENOMEM;
> >
> > +	gt->lmem_userfault_lock = drmm_kzalloc(&i915->drm, sizeof(*gt-
> >lmem_userfault_lock), GFP_KERNEL);
> > +	if (!gt->lmem_userfault_lock)
> > +		return -ENOMEM;
> > +
> >   	intel_gt_common_init_early(gt);
> >
> >   	return 0;
> > @@ -813,6 +817,11 @@ static int intel_gt_tile_setup(struct intel_gt *gt,
> phys_addr_t phys_addr)
> >   		gt->uncore = uncore;
> >   		gt->irq_lock = irq_lock;
> >
> > +		gt->lmem_userfault_lock = drmm_kzalloc(&gt->i915->drm,
> > +						       sizeof(*gt-
> >lmem_userfault_lock), GFP_KERNEL);
> > +		if (!gt->lmem_userfault_lock)
> > +			return -ENOMEM;
> > +
> >   		intel_gt_common_init_early(gt);
> >   	}
> >
> > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > index 30003d68fd51..925775310b1e 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > @@ -153,7 +153,7 @@ struct intel_gt {
> >   	 *  but instead has exclusive access by virtue of all other accesses
> requiring
> >   	 *  holding the runtime pm wakeref.
> >   	 */
> > -	struct mutex lmem_userfault_lock;
> > +	spinlock_t *lmem_userfault_lock;
> >   	struct list_head lmem_userfault_list;
> 
> It looks like there were some comments off list about this. It doesn't look like
> runtime pm is really per gt, so maybe just stick all this in i915? Or was there
> some other reason for putting this in gt?
Thanks for review, 
Yes runtime pm is not per GT , i had kept inside gt to align with GTT mmap releasing implementation,
As there it was encapsulated inside git->ggtt.
Also userfault_wakeref is also encapsulated with in GT.
Shall I move all userfault_wakeref, lmem_userfault_lock and lmem_userfault_list to i915 ?

Thanks ,
Anshuman Gupta.


 

> 
> >
> >   	struct list_head closed_vma;

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-09-27 12:30   ` Gupta, Anshuman
@ 2022-09-27 12:45     ` Matthew Auld
  2022-09-28 13:32       ` Gupta, Anshuman
  0 siblings, 1 reply; 15+ messages in thread
From: Matthew Auld @ 2022-09-27 12:45 UTC (permalink / raw)
  To: Gupta, Anshuman, intel-gfx

On 27/09/2022 13:30, Gupta, Anshuman wrote:
> 
> 
>> -----Original Message-----
>> From: Auld, Matthew <matthew.auld@intel.com>
>> Sent: Monday, September 26, 2022 9:52 PM
>> To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
>> gfx@lists.freedesktop.org
>> Cc: joonas.lahtinen@linux.intel.com; tvrtko.ursulin@linux.intel.com
>> Subject: Re: [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
>>
>> On 23/09/2022 15:31, Anshuman Gupta wrote:
>>> We had already grabbed the rpm wakeref at obj destruction path, but it
>>> also required to grab the wakeref when object moves.
>>> When i915_gem_object_release_mmap_offset() gets called by
>>> i915_ttm_move_notify(), it will release the mmap offset without
>>> grabbing the wakeref. We want to avoid that therefore, grab the
>>> wakreref at i915_ttm_unmap_virtual() accordingly.
>>>
>>> While doing that also changed the lmem_userfault_lock from mutex to
>>> spinlock, as spinlock widely used for list.
>>>
>>> Also changed if (obj->userfault_count) to
>>> GEM_BUG_ON(!obj->userfault_count).
>>>
>>> Fixes: ad74457a6b5a ("drm/i915/dgfx: Release mmap on rpm suspend")
>>> Suggested-by: Matthew Auld <matthew.auld@intel.com>
>>> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
>>> ---
>>>    drivers/gpu/drm/i915/gem/i915_gem_mman.c | 19 +++++-------
>>>    drivers/gpu/drm/i915/gem/i915_gem_ttm.c  | 39 ++++++++++++++++--------
>>>    drivers/gpu/drm/i915/gt/intel_gt.c       | 11 ++++++-
>>>    drivers/gpu/drm/i915/gt/intel_gt_types.h |  2 +-
>>>    4 files changed, 45 insertions(+), 26 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
>>> b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
>>> index 73d9eda1d6b7..9da561c19a47 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
>>> @@ -557,11 +557,13 @@ void
>>> i915_gem_object_runtime_pm_release_mmap_offset(struct
>>> drm_i915_gem_object *
>>>
>>>    	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
>>>
>>> -	if (obj->userfault_count) {
>>> -		/* rpm wakeref provide exclusive access */
>>> -		list_del(&obj->userfault_link);
>>> -		obj->userfault_count = 0;
>>> -	}
>>> +	/*
>>> +	 * We have exclusive access here via runtime suspend. All other callers
>>> +	 * must first grab the rpm wakeref.
>>> +	 */
>>> +	GEM_BUG_ON(!obj->userfault_count);
>>> +	list_del(&obj->userfault_link);
>>> +	obj->userfault_count = 0;
>>>    }
>>>
>>>    void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object
>>> *obj) @@ -587,13 +589,6 @@ void
>> i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
>>>    		spin_lock(&obj->mmo.lock);
>>>    	}
>>>    	spin_unlock(&obj->mmo.lock);
>>> -
>>> -	if (obj->userfault_count) {
>>> -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
>>> lmem_userfault_lock);
>>> -		list_del(&obj->userfault_link);
>>> -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
>>> lmem_userfault_lock);
>>> -		obj->userfault_count = 0;
>>> -	}
>>>    }
>>>
>>>    static struct i915_mmap_offset *
>>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
>>> b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
>>> index e3fc38dd5db0..0630eeca7316 100644
>>> --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
>>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
>>> @@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct
>> drm_i915_gem_object *obj, unsigned int flags)
>>>    static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
>>>    {
>>>    	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
>>> -	intel_wakeref_t wakeref = 0;
>>>
>>>    	if (bo->resource && likely(obj)) {
>>> -		/* ttm_bo_release() already has dma_resv_lock */
>>> -		if (i915_ttm_cpu_maps_iomem(bo->resource))
>>> -			wakeref = intel_runtime_pm_get(&to_i915(obj-
>>> base.dev)->runtime_pm);
>>> -
>>>    		__i915_gem_object_pages_fini(obj);
>>> -
>>> -		if (wakeref)
>>> -			intel_runtime_pm_put(&to_i915(obj->base.dev)-
>>> runtime_pm, wakeref);
>>> -
>>>    		i915_ttm_free_cached_io_rsgt(obj);
>>>    	}
>>>    }
>>> @@ -1052,12 +1043,15 @@ static vm_fault_t vm_fault_ttm(struct vm_fault
>> *vmf)
>>>    	if (ret == VM_FAULT_RETRY && !(vmf->flags &
>> FAULT_FLAG_RETRY_NOWAIT))
>>>    		goto out_rpm;
>>>
>>> -	/* ttm_bo_vm_reserve() already has dma_resv_lock */
>>> +	/*
>>> +	 * ttm_bo_vm_reserve() already has dma_resv_lock.
>>> +	 * userfault_count is protected by dma_resv lock and rpm wakeref.
>>> +	 */
>>>    	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
>>>    		obj->userfault_count = 1;
>>> -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
>>> lmem_userfault_lock);
>>> +		spin_lock(to_gt(to_i915(obj->base.dev))-
>>> lmem_userfault_lock);
>>>    		list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))-
>>> lmem_userfault_list);
>>> -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
>>> lmem_userfault_lock);
>>> +		spin_unlock(to_gt(to_i915(obj->base.dev))-
>>> lmem_userfault_lock);
>>>    	}
>>>
>>>    	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
>>> @@ -1123,7 +1117,28 @@ static u64 i915_ttm_mmap_offset(struct
>>> drm_i915_gem_object *obj)
>>>
>>>    static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
>>>    {
>>> +	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
>>> +	intel_wakeref_t wakeref = 0;
>>> +
>>> +	assert_object_held_shared(obj);
>>> +
>>> +	if (i915_ttm_cpu_maps_iomem(bo->resource)) {
>>> +		wakeref =
>>> +intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
>>> +
>>> +		/* userfault_count is protected by obj lock and rpm wakeref. */
>>> +		if (obj->userfault_count) {
>>> +			spin_lock(to_gt(to_i915(obj->base.dev))-
>>> lmem_userfault_lock);
>>> +			list_del(&obj->userfault_link);
>>> +			spin_unlock(to_gt(to_i915(obj->base.dev))-
>>> lmem_userfault_lock);
>>> +			obj->userfault_count = 0;
>>> +		}
>>> +
>>> +	}
>>> +
>>>    	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
>>> +
>>> +	if (wakeref)
>>> +		intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm,
>> wakeref);
>>>    }
>>>
>>>    static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c
>>> b/drivers/gpu/drm/i915/gt/intel_gt.c
>>> index b367cfff48d5..1e63432d97bb 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_gt.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_gt.c
>>> @@ -41,7 +41,7 @@ void intel_gt_common_init_early(struct intel_gt *gt)
>>>    	spin_lock_init(gt->irq_lock);
>>>
>>>    	INIT_LIST_HEAD(&gt->lmem_userfault_list);
>>> -	mutex_init(&gt->lmem_userfault_lock);
>>> +	spin_lock_init(gt->lmem_userfault_lock);
>>>    	INIT_LIST_HEAD(&gt->closed_vma);
>>>    	spin_lock_init(&gt->closed_lock);
>>>
>>> @@ -71,6 +71,10 @@ int intel_root_gt_init_early(struct drm_i915_private
>> *i915)
>>>    	if (!gt->irq_lock)
>>>    		return -ENOMEM;
>>>
>>> +	gt->lmem_userfault_lock = drmm_kzalloc(&i915->drm, sizeof(*gt-
>>> lmem_userfault_lock), GFP_KERNEL);
>>> +	if (!gt->lmem_userfault_lock)
>>> +		return -ENOMEM;
>>> +
>>>    	intel_gt_common_init_early(gt);
>>>
>>>    	return 0;
>>> @@ -813,6 +817,11 @@ static int intel_gt_tile_setup(struct intel_gt *gt,
>> phys_addr_t phys_addr)
>>>    		gt->uncore = uncore;
>>>    		gt->irq_lock = irq_lock;
>>>
>>> +		gt->lmem_userfault_lock = drmm_kzalloc(&gt->i915->drm,
>>> +						       sizeof(*gt-
>>> lmem_userfault_lock), GFP_KERNEL);
>>> +		if (!gt->lmem_userfault_lock)
>>> +			return -ENOMEM;
>>> +
>>>    		intel_gt_common_init_early(gt);
>>>    	}
>>>
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h
>>> b/drivers/gpu/drm/i915/gt/intel_gt_types.h
>>> index 30003d68fd51..925775310b1e 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
>>> +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
>>> @@ -153,7 +153,7 @@ struct intel_gt {
>>>    	 *  but instead has exclusive access by virtue of all other accesses
>> requiring
>>>    	 *  holding the runtime pm wakeref.
>>>    	 */
>>> -	struct mutex lmem_userfault_lock;
>>> +	spinlock_t *lmem_userfault_lock;
>>>    	struct list_head lmem_userfault_list;
>>
>> It looks like there were some comments off list about this. It doesn't look like
>> runtime pm is really per gt, so maybe just stick all this in i915? Or was there
>> some other reason for putting this in gt?
> Thanks for review,
> Yes runtime pm is not per GT , i had kept inside gt to align with GTT mmap releasing implementation,
> As there it was encapsulated inside git->ggtt.
> Also userfault_wakeref is also encapsulated with in GT.
> Shall I move all userfault_wakeref, lmem_userfault_lock and lmem_userfault_list to i915 ?

I think so. Or if this actually desired then we need to poke at the 
object lmem->gt in places like i915_ttm_unmap_virtual(), instead of 
looking at the root gt.

> 
> Thanks ,
> Anshuman Gupta.
> 
> 
>   
> 
>>
>>>
>>>    	struct list_head closed_vma;

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-09-27 12:45     ` Matthew Auld
@ 2022-09-28 13:32       ` Gupta, Anshuman
  0 siblings, 0 replies; 15+ messages in thread
From: Gupta, Anshuman @ 2022-09-28 13:32 UTC (permalink / raw)
  To: Auld, Matthew, intel-gfx, Nikula,  Jani, tvrtko.ursulin



> -----Original Message-----
> From: Auld, Matthew <matthew.auld@intel.com>
> Sent: Tuesday, September 27, 2022 6:15 PM
> To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
> gfx@lists.freedesktop.org
> Cc: joonas.lahtinen@linux.intel.com; tvrtko.ursulin@linux.intel.com
> Subject: Re: [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
> 
> On 27/09/2022 13:30, Gupta, Anshuman wrote:
> >
> >
> >> -----Original Message-----
> >> From: Auld, Matthew <matthew.auld@intel.com>
> >> Sent: Monday, September 26, 2022 9:52 PM
> >> To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
> >> gfx@lists.freedesktop.org
> >> Cc: joonas.lahtinen@linux.intel.com; tvrtko.ursulin@linux.intel.com
> >> Subject: Re: [PATCH] drm/i915/dgfx: Grab wakeref at
> >> i915_ttm_unmap_virtual
> >>
> >> On 23/09/2022 15:31, Anshuman Gupta wrote:
> >>> We had already grabbed the rpm wakeref at obj destruction path, but
> >>> it also required to grab the wakeref when object moves.
> >>> When i915_gem_object_release_mmap_offset() gets called by
> >>> i915_ttm_move_notify(), it will release the mmap offset without
> >>> grabbing the wakeref. We want to avoid that therefore, grab the
> >>> wakreref at i915_ttm_unmap_virtual() accordingly.
> >>>
> >>> While doing that also changed the lmem_userfault_lock from mutex to
> >>> spinlock, as spinlock widely used for list.
> >>>
> >>> Also changed if (obj->userfault_count) to
> >>> GEM_BUG_ON(!obj->userfault_count).
> >>>
> >>> Fixes: ad74457a6b5a ("drm/i915/dgfx: Release mmap on rpm suspend")
> >>> Suggested-by: Matthew Auld <matthew.auld@intel.com>
> >>> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
> >>> ---
> >>>    drivers/gpu/drm/i915/gem/i915_gem_mman.c | 19 +++++-------
> >>>    drivers/gpu/drm/i915/gem/i915_gem_ttm.c  | 39 ++++++++++++++++------
> --
> >>>    drivers/gpu/drm/i915/gt/intel_gt.c       | 11 ++++++-
> >>>    drivers/gpu/drm/i915/gt/intel_gt_types.h |  2 +-
> >>>    4 files changed, 45 insertions(+), 26 deletions(-)
> >>>
> >>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> >>> b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> >>> index 73d9eda1d6b7..9da561c19a47 100644
> >>> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> >>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> >>> @@ -557,11 +557,13 @@ void
> >>> i915_gem_object_runtime_pm_release_mmap_offset(struct
> >>> drm_i915_gem_object *
> >>>
> >>>    	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
> >>>
> >>> -	if (obj->userfault_count) {
> >>> -		/* rpm wakeref provide exclusive access */
> >>> -		list_del(&obj->userfault_link);
> >>> -		obj->userfault_count = 0;
> >>> -	}
> >>> +	/*
> >>> +	 * We have exclusive access here via runtime suspend. All other callers
> >>> +	 * must first grab the rpm wakeref.
> >>> +	 */
> >>> +	GEM_BUG_ON(!obj->userfault_count);
> >>> +	list_del(&obj->userfault_link);
> >>> +	obj->userfault_count = 0;
> >>>    }
> >>>
> >>>    void i915_gem_object_release_mmap_offset(struct
> >>> drm_i915_gem_object
> >>> *obj) @@ -587,13 +589,6 @@ void
> >> i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
> >>>    		spin_lock(&obj->mmo.lock);
> >>>    	}
> >>>    	spin_unlock(&obj->mmo.lock);
> >>> -
> >>> -	if (obj->userfault_count) {
> >>> -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
> >>> lmem_userfault_lock);
> >>> -		list_del(&obj->userfault_link);
> >>> -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
> >>> lmem_userfault_lock);
> >>> -		obj->userfault_count = 0;
> >>> -	}
> >>>    }
> >>>
> >>>    static struct i915_mmap_offset *
> >>> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> >>> b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> >>> index e3fc38dd5db0..0630eeca7316 100644
> >>> --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> >>> +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> >>> @@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct
> >> drm_i915_gem_object *obj, unsigned int flags)
> >>>    static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
> >>>    {
> >>>    	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
> >>> -	intel_wakeref_t wakeref = 0;
> >>>
> >>>    	if (bo->resource && likely(obj)) {
> >>> -		/* ttm_bo_release() already has dma_resv_lock */
> >>> -		if (i915_ttm_cpu_maps_iomem(bo->resource))
> >>> -			wakeref = intel_runtime_pm_get(&to_i915(obj-
> >>> base.dev)->runtime_pm);
> >>> -
> >>>    		__i915_gem_object_pages_fini(obj);
> >>> -
> >>> -		if (wakeref)
> >>> -			intel_runtime_pm_put(&to_i915(obj->base.dev)-
> >>> runtime_pm, wakeref);
> >>> -
> >>>    		i915_ttm_free_cached_io_rsgt(obj);
> >>>    	}
> >>>    }
> >>> @@ -1052,12 +1043,15 @@ static vm_fault_t vm_fault_ttm(struct
> >>> vm_fault
> >> *vmf)
> >>>    	if (ret == VM_FAULT_RETRY && !(vmf->flags &
> >> FAULT_FLAG_RETRY_NOWAIT))
> >>>    		goto out_rpm;
> >>>
> >>> -	/* ttm_bo_vm_reserve() already has dma_resv_lock */
> >>> +	/*
> >>> +	 * ttm_bo_vm_reserve() already has dma_resv_lock.
> >>> +	 * userfault_count is protected by dma_resv lock and rpm wakeref.
> >>> +	 */
> >>>    	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
> >>>    		obj->userfault_count = 1;
> >>> -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
> >>> lmem_userfault_lock);
> >>> +		spin_lock(to_gt(to_i915(obj->base.dev))-
> >>> lmem_userfault_lock);
> >>>    		list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))-
> >>> lmem_userfault_list);
> >>> -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
> >>> lmem_userfault_lock);
> >>> +		spin_unlock(to_gt(to_i915(obj->base.dev))-
> >>> lmem_userfault_lock);
> >>>    	}
> >>>
> >>>    	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> >>> @@ -1123,7 +1117,28 @@ static u64 i915_ttm_mmap_offset(struct
> >>> drm_i915_gem_object *obj)
> >>>
> >>>    static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
> >>>    {
> >>> +	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
> >>> +	intel_wakeref_t wakeref = 0;
> >>> +
> >>> +	assert_object_held_shared(obj);
> >>> +
> >>> +	if (i915_ttm_cpu_maps_iomem(bo->resource)) {
> >>> +		wakeref =
> >>> +intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
> >>> +
> >>> +		/* userfault_count is protected by obj lock and rpm wakeref. */
> >>> +		if (obj->userfault_count) {
> >>> +			spin_lock(to_gt(to_i915(obj->base.dev))-
> >>> lmem_userfault_lock);
> >>> +			list_del(&obj->userfault_link);
> >>> +			spin_unlock(to_gt(to_i915(obj->base.dev))-
> >>> lmem_userfault_lock);
> >>> +			obj->userfault_count = 0;
> >>> +		}
> >>> +
> >>> +	}
> >>> +
> >>>    	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
> >>> +
> >>> +	if (wakeref)
> >>> +		intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm,
> >> wakeref);
> >>>    }
> >>>
> >>>    static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops
> >>> = { diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c
> >>> b/drivers/gpu/drm/i915/gt/intel_gt.c
> >>> index b367cfff48d5..1e63432d97bb 100644
> >>> --- a/drivers/gpu/drm/i915/gt/intel_gt.c
> >>> +++ b/drivers/gpu/drm/i915/gt/intel_gt.c
> >>> @@ -41,7 +41,7 @@ void intel_gt_common_init_early(struct intel_gt *gt)
> >>>    	spin_lock_init(gt->irq_lock);
> >>>
> >>>    	INIT_LIST_HEAD(&gt->lmem_userfault_list);
> >>> -	mutex_init(&gt->lmem_userfault_lock);
> >>> +	spin_lock_init(gt->lmem_userfault_lock);
> >>>    	INIT_LIST_HEAD(&gt->closed_vma);
> >>>    	spin_lock_init(&gt->closed_lock);
> >>>
> >>> @@ -71,6 +71,10 @@ int intel_root_gt_init_early(struct
> >>> drm_i915_private
> >> *i915)
> >>>    	if (!gt->irq_lock)
> >>>    		return -ENOMEM;
> >>>
> >>> +	gt->lmem_userfault_lock = drmm_kzalloc(&i915->drm, sizeof(*gt-
> >>> lmem_userfault_lock), GFP_KERNEL);
> >>> +	if (!gt->lmem_userfault_lock)
> >>> +		return -ENOMEM;
> >>> +
> >>>    	intel_gt_common_init_early(gt);
> >>>
> >>>    	return 0;
> >>> @@ -813,6 +817,11 @@ static int intel_gt_tile_setup(struct intel_gt
> >>> *gt,
> >> phys_addr_t phys_addr)
> >>>    		gt->uncore = uncore;
> >>>    		gt->irq_lock = irq_lock;
> >>>
> >>> +		gt->lmem_userfault_lock = drmm_kzalloc(&gt->i915->drm,
> >>> +						       sizeof(*gt-
> >>> lmem_userfault_lock), GFP_KERNEL);
> >>> +		if (!gt->lmem_userfault_lock)
> >>> +			return -ENOMEM;
> >>> +
> >>>    		intel_gt_common_init_early(gt);
> >>>    	}
> >>>
> >>> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> >>> b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> >>> index 30003d68fd51..925775310b1e 100644
> >>> --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> >>> +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> >>> @@ -153,7 +153,7 @@ struct intel_gt {
> >>>    	 *  but instead has exclusive access by virtue of all other
> >>> accesses
> >> requiring
> >>>    	 *  holding the runtime pm wakeref.
> >>>    	 */
> >>> -	struct mutex lmem_userfault_lock;
> >>> +	spinlock_t *lmem_userfault_lock;
Hi Jani & Tvrtko,
Could you please provide your opinion whether to keep these lmem_userfault_lock, lmem_userfault_list
fields in drm_i915_private or intel_gt, there are required to handle runtime PM with lmem access.
What could the best place to keep these fields ?
Thanks,
Anshuman Gupta.
> >>>    	struct list_head lmem_userfault_list;
> >>
> >> It looks like there were some comments off list about this. It
> >> doesn't look like runtime pm is really per gt, so maybe just stick
> >> all this in i915? Or was there some other reason for putting this in gt?
> > Thanks for review,
> > Yes runtime pm is not per GT , i had kept inside gt to align with GTT
> > mmap releasing implementation, As there it was encapsulated inside git->ggtt.
> > Also userfault_wakeref is also encapsulated with in GT.
> > Shall I move all userfault_wakeref, lmem_userfault_lock and
> lmem_userfault_list to i915 ?
> 
> I think so. Or if this actually desired then we need to poke at the object lmem-
> >gt in places like i915_ttm_unmap_virtual(), instead of looking at the root gt.
> 
> >
> > Thanks ,
> > Anshuman Gupta.
> >
> >
> >
> >
> >>
> >>>
> >>>    	struct list_head closed_vma;

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm
  2022-09-23 14:31 [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
                   ` (4 preceding siblings ...)
  2022-09-26 16:22 ` [Intel-gfx] [PATCH] " Matthew Auld
@ 2022-10-21  9:55 ` Anshuman Gupta
  2022-10-21  9:55   ` [Intel-gfx] [PATCH v2 2/2] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
  2022-10-24 14:00   ` [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm Matthew Auld
  5 siblings, 2 replies; 15+ messages in thread
From: Anshuman Gupta @ 2022-10-21  9:55 UTC (permalink / raw)
  To: intel-gfx; +Cc: matthew.auld, rodrigo.vivi

Runtime pm is not really per GT, therefore it make sense to
move lmem_userfault_list, lmem_userfault_lock and
userfault_wakeref from intel_gt to intel_runtime_pm structure,
which is embedded to i915.

No functional change.

Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c |  6 +++---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c   |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c  |  8 ++++----
 drivers/gpu/drm/i915/gt/intel_gt.c       |  3 ---
 drivers/gpu/drm/i915/gt/intel_gt_types.h | 17 -----------------
 drivers/gpu/drm/i915/i915_gem.c          |  4 +---
 drivers/gpu/drm/i915/intel_runtime_pm.c  |  5 +++++
 drivers/gpu/drm/i915/intel_runtime_pm.h  | 22 ++++++++++++++++++++++
 8 files changed, 36 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 73d9eda1d6b7..fd29a9053582 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -413,7 +413,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
 	vma->mmo = mmo;
 
 	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
-		intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
+		intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
 
 	if (write) {
@@ -589,9 +589,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 	spin_unlock(&obj->mmo.lock);
 
 	if (obj->userfault_count) {
-		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
 		list_del(&obj->userfault_link);
-		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
 		obj->userfault_count = 0;
 	}
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index e5bfb6be9f7a..0d812f4d787d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -27,7 +27,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 
 	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
 
-	intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
+	intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
 	flush_workqueue(i915->wq);
 
 	/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 84c91a4228a1..50c70796ca38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -1101,13 +1101,13 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
 	/* ttm_bo_vm_reserve() already has dma_resv_lock */
 	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
 		obj->userfault_count = 1;
-		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
-		list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
-		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+		list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
+		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
 	}
 
 	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
-		intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
+		intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
 
 	i915_ttm_adjust_lru(obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 27dbb9e4bd6c..0ad8118ccbee 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -40,8 +40,6 @@ void intel_gt_common_init_early(struct intel_gt *gt)
 {
 	spin_lock_init(gt->irq_lock);
 
-	INIT_LIST_HEAD(&gt->lmem_userfault_list);
-	mutex_init(&gt->lmem_userfault_lock);
 	INIT_LIST_HEAD(&gt->closed_vma);
 	spin_lock_init(&gt->closed_lock);
 
@@ -859,7 +857,6 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
 	}
 
 	intel_uncore_init_early(gt->uncore, gt);
-	intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);
 
 	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 64aa2ba624fc..ff9251807f5c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -144,20 +144,6 @@ struct intel_gt {
 	struct intel_wakeref wakeref;
 	atomic_t user_wakeref;
 
-	/**
-	 *  Protects access to lmem usefault list.
-	 *  It is required, if we are outside of the runtime suspend path,
-	 *  access to @lmem_userfault_list requires always first grabbing the
-	 *  runtime pm, to ensure we can't race against runtime suspend.
-	 *  Once we have that we also need to grab @lmem_userfault_lock,
-	 *  at which point we have exclusive access.
-	 *  The runtime suspend path is special since it doesn't really hold any locks,
-	 *  but instead has exclusive access by virtue of all other accesses requiring
-	 *  holding the runtime pm wakeref.
-	 */
-	struct mutex lmem_userfault_lock;
-	struct list_head lmem_userfault_list;
-
 	struct list_head closed_vma;
 	spinlock_t closed_lock; /* guards the list of closed_vma */
 
@@ -173,9 +159,6 @@ struct intel_gt {
 	 */
 	intel_wakeref_t awake;
 
-	/* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
-	struct intel_wakeref_auto userfault_wakeref;
-
 	u32 clock_frequency;
 	u32 clock_period_ns;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 55d605c0c55d..299f94a9fb87 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -843,7 +843,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
 		__i915_gem_object_release_mmap_gtt(obj);
 
 	list_for_each_entry_safe(obj, on,
-				 &to_gt(i915)->lmem_userfault_list, userfault_link)
+				 &i915->runtime_pm.lmem_userfault_list, userfault_link)
 		i915_gem_object_runtime_pm_release_mmap_offset(obj);
 
 	/*
@@ -1227,8 +1227,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
 	struct intel_gt *gt;
 	unsigned int i;
 
-	intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
-
 	i915_gem_suspend_late(dev_priv);
 	for_each_gt(gt, dev_priv, i)
 		intel_gt_driver_remove(gt);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 744cca507946..bb74d4975cc8 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -633,6 +633,8 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
 						     runtime_pm);
 	int count = atomic_read(&rpm->wakeref_count);
 
+	intel_wakeref_auto_fini(&rpm->userfault_wakeref);
+
 	drm_WARN(&i915->drm, count,
 		 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
 		 intel_rpm_raw_wakeref_count(count),
@@ -652,4 +654,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
 	rpm->available = HAS_RUNTIME_PM(i915);
 
 	init_intel_runtime_pm_wakeref(rpm);
+	INIT_LIST_HEAD(&rpm->lmem_userfault_list);
+	mutex_init(&rpm->lmem_userfault_lock);
+	intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm);
 }
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index d9160e3ff4af..d0c04af2a6f3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -53,6 +53,28 @@ struct intel_runtime_pm {
 	bool irqs_enabled;
 	bool no_wakeref_tracking;
 
+	/**
+	 *  Protects access to lmem usefault list.
+	 *  It is required, if we are outside of the runtime suspend path,
+	 *  access to @lmem_userfault_list requires always first grabbing the
+	 *  runtime pm, to ensure we can't race against runtime suspend.
+	 *  Once we have that we also need to grab @lmem_userfault_lock,
+	 *  at which point we have exclusive access.
+	 *  The runtime suspend path is special since it doesn't really hold any locks,
+	 *  but instead has exclusive access by virtue of all other accesses requiring
+	 *  holding the runtime pm wakeref.
+	 */
+	struct mutex lmem_userfault_lock;
+
+	/*
+	 *  Keep list of userfaulted gem obj, which require to release their
+	 *  mmap mappings at runtime suspend path.
+	 */
+	struct list_head lmem_userfault_list;
+
+	/* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
+	struct intel_wakeref_auto userfault_wakeref;
+
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 	/*
 	 * To aide detection of wakeref leaks and general misuse, we
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [Intel-gfx] [PATCH v2 2/2] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-10-21  9:55 ` [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm Anshuman Gupta
@ 2022-10-21  9:55   ` Anshuman Gupta
  2022-10-24 14:03     ` Matthew Auld
  2022-10-24 14:00   ` [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm Matthew Auld
  1 sibling, 1 reply; 15+ messages in thread
From: Anshuman Gupta @ 2022-10-21  9:55 UTC (permalink / raw)
  To: intel-gfx; +Cc: matthew.auld, rodrigo.vivi

We had already grabbed the rpm wakeref at obj destruction path,
but it also required to grab the wakeref when object moves.
When i915_gem_object_release_mmap_offset() gets called by
i915_ttm_move_notify(), it will release the mmap offset without
grabbing the wakeref. We want to avoid that therefore,
grab the wakeref at i915_ttm_unmap_virtual() accordingly.

While doing that also changed the lmem_userfault_lock from
mutex to spinlock, as spinlock widely used for list.

Also changed if (obj->userfault_count) to
GEM_BUG_ON(!obj->userfault_count).

v2:
- Removed lmem_userfault_{list,lock} from intel_gt. [Matt Auld]

Fixes: ad74457a6b5a ("drm/i915/dgfx: Release mmap on rpm suspend")
Suggested-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c | 19 +++++-------
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c  | 38 ++++++++++++++++--------
 drivers/gpu/drm/i915/intel_runtime_pm.c  |  2 +-
 drivers/gpu/drm/i915/intel_runtime_pm.h  |  2 +-
 4 files changed, 35 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index fd29a9053582..e63329bc8065 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -557,11 +557,13 @@ void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *
 
 	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
 
-	if (obj->userfault_count) {
-		/* rpm wakeref provide exclusive access */
-		list_del(&obj->userfault_link);
-		obj->userfault_count = 0;
-	}
+	/*
+	 * We have exclusive access here via runtime suspend. All other callers
+	 * must first grab the rpm wakeref.
+	 */
+	GEM_BUG_ON(!obj->userfault_count);
+	list_del(&obj->userfault_link);
+	obj->userfault_count = 0;
 }
 
 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
@@ -587,13 +589,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 		spin_lock(&obj->mmo.lock);
 	}
 	spin_unlock(&obj->mmo.lock);
-
-	if (obj->userfault_count) {
-		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
-		list_del(&obj->userfault_link);
-		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
-		obj->userfault_count = 0;
-	}
 }
 
 static struct i915_mmap_offset *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 50c70796ca38..93639b2dd04f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
 {
 	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
-	intel_wakeref_t wakeref = 0;
 
 	if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
-		/* ttm_bo_release() already has dma_resv_lock */
-		if (i915_ttm_cpu_maps_iomem(bo->resource))
-			wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
-
 		__i915_gem_object_pages_fini(obj);
-
-		if (wakeref)
-			intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
-
 		i915_ttm_free_cached_io_rsgt(obj);
 	}
 }
@@ -1098,12 +1089,15 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
 	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
 		goto out_rpm;
 
-	/* ttm_bo_vm_reserve() already has dma_resv_lock */
+	/*
+	 * ttm_bo_vm_reserve() already has dma_resv_lock.
+	 * userfault_count is protected by dma_resv lock and rpm wakeref.
+	 */
 	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
 		obj->userfault_count = 1;
-		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+		spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
 		list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
-		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+		spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
 	}
 
 	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
@@ -1169,7 +1163,27 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
 
 static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
 {
+	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+	intel_wakeref_t wakeref = 0;
+
+	assert_object_held_shared(obj);
+
+	if (i915_ttm_cpu_maps_iomem(bo->resource)) {
+		wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
+		/* userfault_count is protected by obj lock and rpm wakeref. */
+		if (obj->userfault_count) {
+			spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+			list_del(&obj->userfault_link);
+			spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
+			obj->userfault_count = 0;
+		}
+	}
+
 	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
+
+	if (wakeref)
+		intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index bb74d4975cc8..129746713d07 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -655,6 +655,6 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
 
 	init_intel_runtime_pm_wakeref(rpm);
 	INIT_LIST_HEAD(&rpm->lmem_userfault_list);
-	mutex_init(&rpm->lmem_userfault_lock);
+	spin_lock_init(&rpm->lmem_userfault_lock);
 	intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm);
 }
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index d0c04af2a6f3..39b832ec1fa0 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -64,7 +64,7 @@ struct intel_runtime_pm {
 	 *  but instead has exclusive access by virtue of all other accesses requiring
 	 *  holding the runtime pm wakeref.
 	 */
-	struct mutex lmem_userfault_lock;
+	spinlock_t lmem_userfault_lock;
 
 	/*
 	 *  Keep list of userfaulted gem obj, which require to release their
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm
  2022-10-21  9:55 ` [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm Anshuman Gupta
  2022-10-21  9:55   ` [Intel-gfx] [PATCH v2 2/2] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
@ 2022-10-24 14:00   ` Matthew Auld
  2022-10-31  6:42     ` Gupta, Anshuman
  1 sibling, 1 reply; 15+ messages in thread
From: Matthew Auld @ 2022-10-24 14:00 UTC (permalink / raw)
  To: Anshuman Gupta, intel-gfx; +Cc: rodrigo.vivi

On 21/10/2022 10:55, Anshuman Gupta wrote:
> Runtime pm is not really per GT, therefore it make sense to
> move lmem_userfault_list, lmem_userfault_lock and
> userfault_wakeref from intel_gt to intel_runtime_pm structure,
> which is embedded to i915.
> 
> No functional change.
> 
> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_mman.c |  6 +++---
>   drivers/gpu/drm/i915/gem/i915_gem_pm.c   |  2 +-
>   drivers/gpu/drm/i915/gem/i915_gem_ttm.c  |  8 ++++----
>   drivers/gpu/drm/i915/gt/intel_gt.c       |  3 ---
>   drivers/gpu/drm/i915/gt/intel_gt_types.h | 17 -----------------
>   drivers/gpu/drm/i915/i915_gem.c          |  4 +---
>   drivers/gpu/drm/i915/intel_runtime_pm.c  |  5 +++++
>   drivers/gpu/drm/i915/intel_runtime_pm.h  | 22 ++++++++++++++++++++++
>   8 files changed, 36 insertions(+), 31 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index 73d9eda1d6b7..fd29a9053582 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -413,7 +413,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
>   	vma->mmo = mmo;
>   
>   	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> -		intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
> +		intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
>   				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
>   
>   	if (write) {
> @@ -589,9 +589,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
>   	spin_unlock(&obj->mmo.lock);
>   
>   	if (obj->userfault_count) {
> -		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> +		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
>   		list_del(&obj->userfault_link);
> -		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> +		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
>   		obj->userfault_count = 0;
>   	}
>   }
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> index e5bfb6be9f7a..0d812f4d787d 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> @@ -27,7 +27,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
>   
>   	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
>   
> -	intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
> +	intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
>   	flush_workqueue(i915->wq);
>   
>   	/*
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> index 84c91a4228a1..50c70796ca38 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> @@ -1101,13 +1101,13 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
>   	/* ttm_bo_vm_reserve() already has dma_resv_lock */
>   	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
>   		obj->userfault_count = 1;
> -		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> -		list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
> -		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
> +		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
> +		list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
> +		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
>   	}
>   
>   	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> -		intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
> +		intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
>   				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
>   
>   	i915_ttm_adjust_lru(obj);
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
> index 27dbb9e4bd6c..0ad8118ccbee 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt.c
> @@ -40,8 +40,6 @@ void intel_gt_common_init_early(struct intel_gt *gt)
>   {
>   	spin_lock_init(gt->irq_lock);
>   
> -	INIT_LIST_HEAD(&gt->lmem_userfault_list);
> -	mutex_init(&gt->lmem_userfault_lock);
>   	INIT_LIST_HEAD(&gt->closed_vma);
>   	spin_lock_init(&gt->closed_lock);
>   
> @@ -859,7 +857,6 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
>   	}
>   
>   	intel_uncore_init_early(gt->uncore, gt);
> -	intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);
>   
>   	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
>   	if (ret)
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> index 64aa2ba624fc..ff9251807f5c 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> @@ -144,20 +144,6 @@ struct intel_gt {
>   	struct intel_wakeref wakeref;
>   	atomic_t user_wakeref;
>   
> -	/**
> -	 *  Protects access to lmem usefault list.
> -	 *  It is required, if we are outside of the runtime suspend path,
> -	 *  access to @lmem_userfault_list requires always first grabbing the
> -	 *  runtime pm, to ensure we can't race against runtime suspend.
> -	 *  Once we have that we also need to grab @lmem_userfault_lock,
> -	 *  at which point we have exclusive access.
> -	 *  The runtime suspend path is special since it doesn't really hold any locks,
> -	 *  but instead has exclusive access by virtue of all other accesses requiring
> -	 *  holding the runtime pm wakeref.
> -	 */
> -	struct mutex lmem_userfault_lock;
> -	struct list_head lmem_userfault_list;
> -
>   	struct list_head closed_vma;
>   	spinlock_t closed_lock; /* guards the list of closed_vma */
>   
> @@ -173,9 +159,6 @@ struct intel_gt {
>   	 */
>   	intel_wakeref_t awake;
>   
> -	/* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
> -	struct intel_wakeref_auto userfault_wakeref;
> -
>   	u32 clock_frequency;
>   	u32 clock_period_ns;
>   
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 55d605c0c55d..299f94a9fb87 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -843,7 +843,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
>   		__i915_gem_object_release_mmap_gtt(obj);
>   
>   	list_for_each_entry_safe(obj, on,
> -				 &to_gt(i915)->lmem_userfault_list, userfault_link)
> +				 &i915->runtime_pm.lmem_userfault_list, userfault_link)
>   		i915_gem_object_runtime_pm_release_mmap_offset(obj);
>   
>   	/*
> @@ -1227,8 +1227,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
>   	struct intel_gt *gt;
>   	unsigned int i;
>   
> -	intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
> -
>   	i915_gem_suspend_late(dev_priv);
>   	for_each_gt(gt, dev_priv, i)
>   		intel_gt_driver_remove(gt);
> diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
> index 744cca507946..bb74d4975cc8 100644
> --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
> +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
> @@ -633,6 +633,8 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
>   						     runtime_pm);
>   	int count = atomic_read(&rpm->wakeref_count);
>   
> +	intel_wakeref_auto_fini(&rpm->userfault_wakeref);
> +
>   	drm_WARN(&i915->drm, count,
>   		 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
>   		 intel_rpm_raw_wakeref_count(count),
> @@ -652,4 +654,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
>   	rpm->available = HAS_RUNTIME_PM(i915);
>   
>   	init_intel_runtime_pm_wakeref(rpm);
> +	INIT_LIST_HEAD(&rpm->lmem_userfault_list);
> +	mutex_init(&rpm->lmem_userfault_lock);
> +	intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm);
>   }
> diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
> index d9160e3ff4af..d0c04af2a6f3 100644
> --- a/drivers/gpu/drm/i915/intel_runtime_pm.h
> +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
> @@ -53,6 +53,28 @@ struct intel_runtime_pm {
>   	bool irqs_enabled;
>   	bool no_wakeref_tracking;
>   
> +	/**

Nit: that's not properly formatted kernel-doc.

Reviewed-by: Matthew Auld <matthew.auld@intel.com>

> +	 *  Protects access to lmem usefault list.
> +	 *  It is required, if we are outside of the runtime suspend path,
> +	 *  access to @lmem_userfault_list requires always first grabbing the
> +	 *  runtime pm, to ensure we can't race against runtime suspend.
> +	 *  Once we have that we also need to grab @lmem_userfault_lock,
> +	 *  at which point we have exclusive access.
> +	 *  The runtime suspend path is special since it doesn't really hold any locks,
> +	 *  but instead has exclusive access by virtue of all other accesses requiring
> +	 *  holding the runtime pm wakeref.
> +	 */
> +	struct mutex lmem_userfault_lock;
> +
> +	/*
> +	 *  Keep list of userfaulted gem obj, which require to release their
> +	 *  mmap mappings at runtime suspend path.
> +	 */
> +	struct list_head lmem_userfault_list;
> +
> +	/* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
> +	struct intel_wakeref_auto userfault_wakeref;
> +
>   #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
>   	/*
>   	 * To aide detection of wakeref leaks and general misuse, we

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH v2 2/2] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual
  2022-10-21  9:55   ` [Intel-gfx] [PATCH v2 2/2] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
@ 2022-10-24 14:03     ` Matthew Auld
  0 siblings, 0 replies; 15+ messages in thread
From: Matthew Auld @ 2022-10-24 14:03 UTC (permalink / raw)
  To: Anshuman Gupta, intel-gfx; +Cc: rodrigo.vivi

On 21/10/2022 10:55, Anshuman Gupta wrote:
> We had already grabbed the rpm wakeref at obj destruction path,
> but it also required to grab the wakeref when object moves.
> When i915_gem_object_release_mmap_offset() gets called by
> i915_ttm_move_notify(), it will release the mmap offset without
> grabbing the wakeref. We want to avoid that therefore,
> grab the wakeref at i915_ttm_unmap_virtual() accordingly.
> 
> While doing that also changed the lmem_userfault_lock from
> mutex to spinlock, as spinlock widely used for list.
> 
> Also changed if (obj->userfault_count) to
> GEM_BUG_ON(!obj->userfault_count).
> 
> v2:
> - Removed lmem_userfault_{list,lock} from intel_gt. [Matt Auld]
> 
> Fixes: ad74457a6b5a ("drm/i915/dgfx: Release mmap on rpm suspend")
> Suggested-by: Matthew Auld <matthew.auld@intel.com>
> Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm
  2022-10-24 14:00   ` [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm Matthew Auld
@ 2022-10-31  6:42     ` Gupta, Anshuman
  2022-10-31  9:34       ` Rodrigo Vivi
  0 siblings, 1 reply; 15+ messages in thread
From: Gupta, Anshuman @ 2022-10-31  6:42 UTC (permalink / raw)
  To: Auld, Matthew, intel-gfx, Vivi, Rodrigo



> -----Original Message-----
> From: Auld, Matthew <matthew.auld@intel.com>
> Sent: Monday, October 24, 2022 7:30 PM
> To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
> gfx@lists.freedesktop.org
> Cc: tvrtko.ursulin@linux.intel.com; Vivi, Rodrigo <rodrigo.vivi@intel.com>
> Subject: Re: [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in
> intel_runtime_pm
> 
> On 21/10/2022 10:55, Anshuman Gupta wrote:
> > Runtime pm is not really per GT, therefore it make sense to move
> > lmem_userfault_list, lmem_userfault_lock and userfault_wakeref from
> > intel_gt to intel_runtime_pm structure, which is embedded to i915.
> >
> > No functional change.
@rodrigo could you please provide the Ack on the patch.
Thanks,
Anshuman.
> >
> > Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gem/i915_gem_mman.c |  6 +++---
> >   drivers/gpu/drm/i915/gem/i915_gem_pm.c   |  2 +-
> >   drivers/gpu/drm/i915/gem/i915_gem_ttm.c  |  8 ++++----
> >   drivers/gpu/drm/i915/gt/intel_gt.c       |  3 ---
> >   drivers/gpu/drm/i915/gt/intel_gt_types.h | 17 -----------------
> >   drivers/gpu/drm/i915/i915_gem.c          |  4 +---
> >   drivers/gpu/drm/i915/intel_runtime_pm.c  |  5 +++++
> >   drivers/gpu/drm/i915/intel_runtime_pm.h  | 22
> ++++++++++++++++++++++
> >   8 files changed, 36 insertions(+), 31 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > index 73d9eda1d6b7..fd29a9053582 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > @@ -413,7 +413,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault
> *vmf)
> >   	vma->mmo = mmo;
> >
> >   	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> > -		intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
> > +		intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
> >
> >
> msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> );
> >
> >   	if (write) {
> > @@ -589,9 +589,9 @@ void
> i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
> >   	spin_unlock(&obj->mmo.lock);
> >
> >   	if (obj->userfault_count) {
> > -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > +
> > +mutex_lock(&to_i915(obj->base.dev)-
> >runtime_pm.lmem_userfault_lock);
> >   		list_del(&obj->userfault_link);
> > -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > +
> > +mutex_unlock(&to_i915(obj->base.dev)-
> >runtime_pm.lmem_userfault_lock)
> > +;
> >   		obj->userfault_count = 0;
> >   	}
> >   }
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> > b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> > index e5bfb6be9f7a..0d812f4d787d 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> > @@ -27,7 +27,7 @@ void i915_gem_suspend(struct drm_i915_private
> *i915)
> >
> >   	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
> >
> > -	intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
> > +	intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
> >   	flush_workqueue(i915->wq);
> >
> >   	/*
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > index 84c91a4228a1..50c70796ca38 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > @@ -1101,13 +1101,13 @@ static vm_fault_t vm_fault_ttm(struct vm_fault
> *vmf)
> >   	/* ttm_bo_vm_reserve() already has dma_resv_lock */
> >   	if (ret == VM_FAULT_NOPAGE && wakeref && !obj-
> >userfault_count) {
> >   		obj->userfault_count = 1;
> > -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > -		list_add(&obj->userfault_link, &to_gt(to_i915(obj-
> >base.dev))->lmem_userfault_list);
> > -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
> >lmem_userfault_lock);
> > +		mutex_lock(&to_i915(obj->base.dev)-
> >runtime_pm.lmem_userfault_lock);
> > +		list_add(&obj->userfault_link, &to_i915(obj->base.dev)-
> >runtime_pm.lmem_userfault_list);
> > +
> > +mutex_unlock(&to_i915(obj->base.dev)-
> >runtime_pm.lmem_userfault_lock)
> > +;
> >   	}
> >
> >   	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> > -		intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))-
> >userfault_wakeref,
> > +
> > +intel_wakeref_auto(&to_i915(obj->base.dev)-
> >runtime_pm.userfault_wake
> > +ref,
> >
> >
> msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> );
> >
> >   	i915_ttm_adjust_lru(obj);
> > diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c
> > b/drivers/gpu/drm/i915/gt/intel_gt.c
> > index 27dbb9e4bd6c..0ad8118ccbee 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_gt.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_gt.c
> > @@ -40,8 +40,6 @@ void intel_gt_common_init_early(struct intel_gt *gt)
> >   {
> >   	spin_lock_init(gt->irq_lock);
> >
> > -	INIT_LIST_HEAD(&gt->lmem_userfault_list);
> > -	mutex_init(&gt->lmem_userfault_lock);
> >   	INIT_LIST_HEAD(&gt->closed_vma);
> >   	spin_lock_init(&gt->closed_lock);
> >
> > @@ -859,7 +857,6 @@ static int intel_gt_tile_setup(struct intel_gt *gt,
> phys_addr_t phys_addr)
> >   	}
> >
> >   	intel_uncore_init_early(gt->uncore, gt);
> > -	intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore-
> >rpm);
> >
> >   	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
> >   	if (ret)
> > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > index 64aa2ba624fc..ff9251807f5c 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > @@ -144,20 +144,6 @@ struct intel_gt {
> >   	struct intel_wakeref wakeref;
> >   	atomic_t user_wakeref;
> >
> > -	/**
> > -	 *  Protects access to lmem usefault list.
> > -	 *  It is required, if we are outside of the runtime suspend path,
> > -	 *  access to @lmem_userfault_list requires always first grabbing the
> > -	 *  runtime pm, to ensure we can't race against runtime suspend.
> > -	 *  Once we have that we also need to grab @lmem_userfault_lock,
> > -	 *  at which point we have exclusive access.
> > -	 *  The runtime suspend path is special since it doesn't really hold any
> locks,
> > -	 *  but instead has exclusive access by virtue of all other accesses
> requiring
> > -	 *  holding the runtime pm wakeref.
> > -	 */
> > -	struct mutex lmem_userfault_lock;
> > -	struct list_head lmem_userfault_list;
> > -
> >   	struct list_head closed_vma;
> >   	spinlock_t closed_lock; /* guards the list of closed_vma */
> >
> > @@ -173,9 +159,6 @@ struct intel_gt {
> >   	 */
> >   	intel_wakeref_t awake;
> >
> > -	/* Manual runtime pm autosuspend delay for user GGTT/lmem
> mmaps */
> > -	struct intel_wakeref_auto userfault_wakeref;
> > -
> >   	u32 clock_frequency;
> >   	u32 clock_period_ns;
> >
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c
> > b/drivers/gpu/drm/i915/i915_gem.c index 55d605c0c55d..299f94a9fb87
> > 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -843,7 +843,7 @@ void i915_gem_runtime_suspend(struct
> drm_i915_private *i915)
> >   		__i915_gem_object_release_mmap_gtt(obj);
> >
> >   	list_for_each_entry_safe(obj, on,
> > -				 &to_gt(i915)->lmem_userfault_list,
> userfault_link)
> > +				 &i915->runtime_pm.lmem_userfault_list,
> userfault_link)
> >   		i915_gem_object_runtime_pm_release_mmap_offset(obj);
> >
> >   	/*
> > @@ -1227,8 +1227,6 @@ void i915_gem_driver_remove(struct
> drm_i915_private *dev_priv)
> >   	struct intel_gt *gt;
> >   	unsigned int i;
> >
> > -	intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
> > -
> >   	i915_gem_suspend_late(dev_priv);
> >   	for_each_gt(gt, dev_priv, i)
> >   		intel_gt_driver_remove(gt);
> > diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c
> > b/drivers/gpu/drm/i915/intel_runtime_pm.c
> > index 744cca507946..bb74d4975cc8 100644
> > --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
> > +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
> > @@ -633,6 +633,8 @@ void intel_runtime_pm_driver_release(struct
> intel_runtime_pm *rpm)
> >   						     runtime_pm);
> >   	int count = atomic_read(&rpm->wakeref_count);
> >
> > +	intel_wakeref_auto_fini(&rpm->userfault_wakeref);
> > +
> >   	drm_WARN(&i915->drm, count,
> >   		 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
> >   		 intel_rpm_raw_wakeref_count(count),
> > @@ -652,4 +654,7 @@ void intel_runtime_pm_init_early(struct
> intel_runtime_pm *rpm)
> >   	rpm->available = HAS_RUNTIME_PM(i915);
> >
> >   	init_intel_runtime_pm_wakeref(rpm);
> > +	INIT_LIST_HEAD(&rpm->lmem_userfault_list);
> > +	mutex_init(&rpm->lmem_userfault_lock);
> > +	intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm);
> >   }
> > diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h
> > b/drivers/gpu/drm/i915/intel_runtime_pm.h
> > index d9160e3ff4af..d0c04af2a6f3 100644
> > --- a/drivers/gpu/drm/i915/intel_runtime_pm.h
> > +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
> > @@ -53,6 +53,28 @@ struct intel_runtime_pm {
> >   	bool irqs_enabled;
> >   	bool no_wakeref_tracking;
> >
> > +	/**
> 
> Nit: that's not properly formatted kernel-doc.
> 
> Reviewed-by: Matthew Auld <matthew.auld@intel.com>
> 
> > +	 *  Protects access to lmem usefault list.
> > +	 *  It is required, if we are outside of the runtime suspend path,
> > +	 *  access to @lmem_userfault_list requires always first grabbing the
> > +	 *  runtime pm, to ensure we can't race against runtime suspend.
> > +	 *  Once we have that we also need to grab @lmem_userfault_lock,
> > +	 *  at which point we have exclusive access.
> > +	 *  The runtime suspend path is special since it doesn't really hold any
> locks,
> > +	 *  but instead has exclusive access by virtue of all other accesses
> requiring
> > +	 *  holding the runtime pm wakeref.
> > +	 */
> > +	struct mutex lmem_userfault_lock;
> > +
> > +	/*
> > +	 *  Keep list of userfaulted gem obj, which require to release their
> > +	 *  mmap mappings at runtime suspend path.
> > +	 */
> > +	struct list_head lmem_userfault_list;
> > +
> > +	/* Manual runtime pm autosuspend delay for user GGTT/lmem
> mmaps */
> > +	struct intel_wakeref_auto userfault_wakeref;
> > +
> >   #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
> >   	/*
> >   	 * To aide detection of wakeref leaks and general misuse, we

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm
  2022-10-31  6:42     ` Gupta, Anshuman
@ 2022-10-31  9:34       ` Rodrigo Vivi
  0 siblings, 0 replies; 15+ messages in thread
From: Rodrigo Vivi @ 2022-10-31  9:34 UTC (permalink / raw)
  To: Gupta, Anshuman; +Cc: intel-gfx, Auld, Matthew

On Mon, Oct 31, 2022 at 02:42:09AM -0400, Gupta, Anshuman wrote:
> 
> 
> > -----Original Message-----
> > From: Auld, Matthew <matthew.auld@intel.com>
> > Sent: Monday, October 24, 2022 7:30 PM
> > To: Gupta, Anshuman <anshuman.gupta@intel.com>; intel-
> > gfx@lists.freedesktop.org
> > Cc: tvrtko.ursulin@linux.intel.com; Vivi, Rodrigo <rodrigo.vivi@intel.com>
> > Subject: Re: [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in
> > intel_runtime_pm
> > 
> > On 21/10/2022 10:55, Anshuman Gupta wrote:
> > > Runtime pm is not really per GT, therefore it make sense to move
> > > lmem_userfault_list, lmem_userfault_lock and userfault_wakeref from
> > > intel_gt to intel_runtime_pm structure, which is embedded to i915.
> > >
> > > No functional change.
> @rodrigo could you please provide the Ack on the patch.

not needed :)

I had seen the patch and it does make sense to me, but I have
not comment because it already had the rv-b from Matt.


Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>


> Thanks,
> Anshuman.
> > >
> > > Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com>
> > > ---
> > >   drivers/gpu/drm/i915/gem/i915_gem_mman.c |  6 +++---
> > >   drivers/gpu/drm/i915/gem/i915_gem_pm.c   |  2 +-
> > >   drivers/gpu/drm/i915/gem/i915_gem_ttm.c  |  8 ++++----
> > >   drivers/gpu/drm/i915/gt/intel_gt.c       |  3 ---
> > >   drivers/gpu/drm/i915/gt/intel_gt_types.h | 17 -----------------
> > >   drivers/gpu/drm/i915/i915_gem.c          |  4 +---
> > >   drivers/gpu/drm/i915/intel_runtime_pm.c  |  5 +++++
> > >   drivers/gpu/drm/i915/intel_runtime_pm.h  | 22
> > ++++++++++++++++++++++
> > >   8 files changed, 36 insertions(+), 31 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > > b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > > index 73d9eda1d6b7..fd29a9053582 100644
> > > --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > > +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> > > @@ -413,7 +413,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault
> > *vmf)
> > >   	vma->mmo = mmo;
> > >
> > >   	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> > > -		intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
> > > +		intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
> > >
> > >
> > msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> > );
> > >
> > >   	if (write) {
> > > @@ -589,9 +589,9 @@ void
> > i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
> > >   	spin_unlock(&obj->mmo.lock);
> > >
> > >   	if (obj->userfault_count) {
> > > -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
> > >lmem_userfault_lock);
> > > +
> > > +mutex_lock(&to_i915(obj->base.dev)-
> > >runtime_pm.lmem_userfault_lock);
> > >   		list_del(&obj->userfault_link);
> > > -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
> > >lmem_userfault_lock);
> > > +
> > > +mutex_unlock(&to_i915(obj->base.dev)-
> > >runtime_pm.lmem_userfault_lock)
> > > +;
> > >   		obj->userfault_count = 0;
> > >   	}
> > >   }
> > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> > > b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> > > index e5bfb6be9f7a..0d812f4d787d 100644
> > > --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> > > +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> > > @@ -27,7 +27,7 @@ void i915_gem_suspend(struct drm_i915_private
> > *i915)
> > >
> > >   	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
> > >
> > > -	intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
> > > +	intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
> > >   	flush_workqueue(i915->wq);
> > >
> > >   	/*
> > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > > b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > > index 84c91a4228a1..50c70796ca38 100644
> > > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
> > > @@ -1101,13 +1101,13 @@ static vm_fault_t vm_fault_ttm(struct vm_fault
> > *vmf)
> > >   	/* ttm_bo_vm_reserve() already has dma_resv_lock */
> > >   	if (ret == VM_FAULT_NOPAGE && wakeref && !obj-
> > >userfault_count) {
> > >   		obj->userfault_count = 1;
> > > -		mutex_lock(&to_gt(to_i915(obj->base.dev))-
> > >lmem_userfault_lock);
> > > -		list_add(&obj->userfault_link, &to_gt(to_i915(obj-
> > >base.dev))->lmem_userfault_list);
> > > -		mutex_unlock(&to_gt(to_i915(obj->base.dev))-
> > >lmem_userfault_lock);
> > > +		mutex_lock(&to_i915(obj->base.dev)-
> > >runtime_pm.lmem_userfault_lock);
> > > +		list_add(&obj->userfault_link, &to_i915(obj->base.dev)-
> > >runtime_pm.lmem_userfault_list);
> > > +
> > > +mutex_unlock(&to_i915(obj->base.dev)-
> > >runtime_pm.lmem_userfault_lock)
> > > +;
> > >   	}
> > >
> > >   	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> > > -		intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))-
> > >userfault_wakeref,
> > > +
> > > +intel_wakeref_auto(&to_i915(obj->base.dev)-
> > >runtime_pm.userfault_wake
> > > +ref,
> > >
> > >
> > msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
> > );
> > >
> > >   	i915_ttm_adjust_lru(obj);
> > > diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c
> > > b/drivers/gpu/drm/i915/gt/intel_gt.c
> > > index 27dbb9e4bd6c..0ad8118ccbee 100644
> > > --- a/drivers/gpu/drm/i915/gt/intel_gt.c
> > > +++ b/drivers/gpu/drm/i915/gt/intel_gt.c
> > > @@ -40,8 +40,6 @@ void intel_gt_common_init_early(struct intel_gt *gt)
> > >   {
> > >   	spin_lock_init(gt->irq_lock);
> > >
> > > -	INIT_LIST_HEAD(&gt->lmem_userfault_list);
> > > -	mutex_init(&gt->lmem_userfault_lock);
> > >   	INIT_LIST_HEAD(&gt->closed_vma);
> > >   	spin_lock_init(&gt->closed_lock);
> > >
> > > @@ -859,7 +857,6 @@ static int intel_gt_tile_setup(struct intel_gt *gt,
> > phys_addr_t phys_addr)
> > >   	}
> > >
> > >   	intel_uncore_init_early(gt->uncore, gt);
> > > -	intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore-
> > >rpm);
> > >
> > >   	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
> > >   	if (ret)
> > > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > > b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > > index 64aa2ba624fc..ff9251807f5c 100644
> > > --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > > +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> > > @@ -144,20 +144,6 @@ struct intel_gt {
> > >   	struct intel_wakeref wakeref;
> > >   	atomic_t user_wakeref;
> > >
> > > -	/**
> > > -	 *  Protects access to lmem usefault list.
> > > -	 *  It is required, if we are outside of the runtime suspend path,
> > > -	 *  access to @lmem_userfault_list requires always first grabbing the
> > > -	 *  runtime pm, to ensure we can't race against runtime suspend.
> > > -	 *  Once we have that we also need to grab @lmem_userfault_lock,
> > > -	 *  at which point we have exclusive access.
> > > -	 *  The runtime suspend path is special since it doesn't really hold any
> > locks,
> > > -	 *  but instead has exclusive access by virtue of all other accesses
> > requiring
> > > -	 *  holding the runtime pm wakeref.
> > > -	 */
> > > -	struct mutex lmem_userfault_lock;
> > > -	struct list_head lmem_userfault_list;
> > > -
> > >   	struct list_head closed_vma;
> > >   	spinlock_t closed_lock; /* guards the list of closed_vma */
> > >
> > > @@ -173,9 +159,6 @@ struct intel_gt {
> > >   	 */
> > >   	intel_wakeref_t awake;
> > >
> > > -	/* Manual runtime pm autosuspend delay for user GGTT/lmem
> > mmaps */
> > > -	struct intel_wakeref_auto userfault_wakeref;
> > > -
> > >   	u32 clock_frequency;
> > >   	u32 clock_period_ns;
> > >
> > > diff --git a/drivers/gpu/drm/i915/i915_gem.c
> > > b/drivers/gpu/drm/i915/i915_gem.c index 55d605c0c55d..299f94a9fb87
> > > 100644
> > > --- a/drivers/gpu/drm/i915/i915_gem.c
> > > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > > @@ -843,7 +843,7 @@ void i915_gem_runtime_suspend(struct
> > drm_i915_private *i915)
> > >   		__i915_gem_object_release_mmap_gtt(obj);
> > >
> > >   	list_for_each_entry_safe(obj, on,
> > > -				 &to_gt(i915)->lmem_userfault_list,
> > userfault_link)
> > > +				 &i915->runtime_pm.lmem_userfault_list,
> > userfault_link)
> > >   		i915_gem_object_runtime_pm_release_mmap_offset(obj);
> > >
> > >   	/*
> > > @@ -1227,8 +1227,6 @@ void i915_gem_driver_remove(struct
> > drm_i915_private *dev_priv)
> > >   	struct intel_gt *gt;
> > >   	unsigned int i;
> > >
> > > -	intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
> > > -
> > >   	i915_gem_suspend_late(dev_priv);
> > >   	for_each_gt(gt, dev_priv, i)
> > >   		intel_gt_driver_remove(gt);
> > > diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c
> > > b/drivers/gpu/drm/i915/intel_runtime_pm.c
> > > index 744cca507946..bb74d4975cc8 100644
> > > --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
> > > +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
> > > @@ -633,6 +633,8 @@ void intel_runtime_pm_driver_release(struct
> > intel_runtime_pm *rpm)
> > >   						     runtime_pm);
> > >   	int count = atomic_read(&rpm->wakeref_count);
> > >
> > > +	intel_wakeref_auto_fini(&rpm->userfault_wakeref);
> > > +
> > >   	drm_WARN(&i915->drm, count,
> > >   		 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
> > >   		 intel_rpm_raw_wakeref_count(count),
> > > @@ -652,4 +654,7 @@ void intel_runtime_pm_init_early(struct
> > intel_runtime_pm *rpm)
> > >   	rpm->available = HAS_RUNTIME_PM(i915);
> > >
> > >   	init_intel_runtime_pm_wakeref(rpm);
> > > +	INIT_LIST_HEAD(&rpm->lmem_userfault_list);
> > > +	mutex_init(&rpm->lmem_userfault_lock);
> > > +	intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm);
> > >   }
> > > diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h
> > > b/drivers/gpu/drm/i915/intel_runtime_pm.h
> > > index d9160e3ff4af..d0c04af2a6f3 100644
> > > --- a/drivers/gpu/drm/i915/intel_runtime_pm.h
> > > +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
> > > @@ -53,6 +53,28 @@ struct intel_runtime_pm {
> > >   	bool irqs_enabled;
> > >   	bool no_wakeref_tracking;
> > >
> > > +	/**
> > 
> > Nit: that's not properly formatted kernel-doc.
> > 
> > Reviewed-by: Matthew Auld <matthew.auld@intel.com>
> > 
> > > +	 *  Protects access to lmem usefault list.
> > > +	 *  It is required, if we are outside of the runtime suspend path,
> > > +	 *  access to @lmem_userfault_list requires always first grabbing the
> > > +	 *  runtime pm, to ensure we can't race against runtime suspend.
> > > +	 *  Once we have that we also need to grab @lmem_userfault_lock,
> > > +	 *  at which point we have exclusive access.
> > > +	 *  The runtime suspend path is special since it doesn't really hold any
> > locks,
> > > +	 *  but instead has exclusive access by virtue of all other accesses
> > requiring
> > > +	 *  holding the runtime pm wakeref.
> > > +	 */
> > > +	struct mutex lmem_userfault_lock;
> > > +
> > > +	/*
> > > +	 *  Keep list of userfaulted gem obj, which require to release their
> > > +	 *  mmap mappings at runtime suspend path.
> > > +	 */
> > > +	struct list_head lmem_userfault_list;
> > > +
> > > +	/* Manual runtime pm autosuspend delay for user GGTT/lmem
> > mmaps */
> > > +	struct intel_wakeref_auto userfault_wakeref;
> > > +
> > >   #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
> > >   	/*
> > >   	 * To aide detection of wakeref leaks and general misuse, we

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2022-10-31  9:34 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-23 14:31 [Intel-gfx] [PATCH] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
2022-09-23 17:02 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2022-09-23 17:02 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2022-09-23 17:21 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2022-09-24  7:32 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
2022-09-26 16:22 ` [Intel-gfx] [PATCH] " Matthew Auld
2022-09-27 12:30   ` Gupta, Anshuman
2022-09-27 12:45     ` Matthew Auld
2022-09-28 13:32       ` Gupta, Anshuman
2022-10-21  9:55 ` [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm Anshuman Gupta
2022-10-21  9:55   ` [Intel-gfx] [PATCH v2 2/2] drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual Anshuman Gupta
2022-10-24 14:03     ` Matthew Auld
2022-10-24 14:00   ` [Intel-gfx] [PATCH v2 1/2] drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm Matthew Auld
2022-10-31  6:42     ` Gupta, Anshuman
2022-10-31  9:34       ` Rodrigo Vivi

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.