* [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
@ 2022-01-11 16:30 ` Matthew Brost
0 siblings, 0 replies; 18+ messages in thread
From: Matthew Brost @ 2022-01-11 16:30 UTC (permalink / raw)
To: intel-gfx, dri-devel
Move the multi-lrc guc_id from the lower allocation partition (0 to
number of multi-lrc guc_ids) to upper allocation partition (number of
single-lrc to max guc_ids).
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
1 file changed, 42 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9989d121127df..1bacc9621cea8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
*/
#define NUMBER_MULTI_LRC_GUC_ID(guc) \
((guc)->submission_state.num_guc_ids / 16)
+#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
+ ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
/*
* Below is a set of functions which control the GuC scheduling state which
@@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
INIT_WORK(&guc->submission_state.destroyed_worker,
destroyed_worker_func);
- guc->submission_state.guc_ids_bitmap =
- bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
- if (!guc->submission_state.guc_ids_bitmap)
- return -ENOMEM;
-
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
@@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy(guc);
i915_sched_engine_put(guc->sched_engine);
- bitmap_free(guc->submission_state.guc_ids_bitmap);
+ if (guc->submission_state.guc_ids_bitmap)
+ bitmap_free(guc->submission_state.guc_ids_bitmap);
}
static inline void queue_request(struct i915_sched_engine *sched_engine,
@@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+ GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
+
+ ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ order_base_2(ce->parallel.number_children
+ + 1));
+ if (likely(!(ret < 0)))
+ ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
+
+ return ret;
+}
+
+static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ GEM_BUG_ON(intel_context_is_parent(ce));
+
+ return ida_simple_get(&guc->submission_state.guc_ids,
+ 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN);
+}
+
static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
int ret;
@@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
GEM_BUG_ON(intel_context_is_child(ce));
if (intel_context_is_parent(ce))
- ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
- NUMBER_MULTI_LRC_GUC_ID(guc),
- order_base_2(ce->parallel.number_children
- + 1));
+ ret = new_mlrc_guc_id(guc, ce);
else
- ret = ida_simple_get(&guc->submission_state.guc_ids,
- NUMBER_MULTI_LRC_GUC_ID(guc),
- guc->submission_state.num_guc_ids,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL |
- __GFP_NOWARN);
+ ret = new_slrc_guc_id(guc, ce);
+
if (unlikely(ret < 0))
return ret;
@@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
+ if (unlikely(intel_context_is_parent(ce) &&
+ !guc->submission_state.guc_ids_bitmap)) {
+ guc->submission_state.guc_ids_bitmap =
+ bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
+ if (!guc->submission_state.guc_ids_bitmap)
+ return -ENOMEM;
+ }
+
try_again:
spin_lock_irqsave(&guc->submission_state.lock, flags);
--
2.34.1
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH] drm/i915: Flip guc_id allocation partition
@ 2022-01-11 16:30 ` Matthew Brost
0 siblings, 0 replies; 18+ messages in thread
From: Matthew Brost @ 2022-01-11 16:30 UTC (permalink / raw)
To: intel-gfx, dri-devel
Cc: daniele.ceraolospurio, john.c.harrison, michal.wajdeczko
Move the multi-lrc guc_id from the lower allocation partition (0 to
number of multi-lrc guc_ids) to upper allocation partition (number of
single-lrc to max guc_ids).
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
1 file changed, 42 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9989d121127df..1bacc9621cea8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
*/
#define NUMBER_MULTI_LRC_GUC_ID(guc) \
((guc)->submission_state.num_guc_ids / 16)
+#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
+ ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
/*
* Below is a set of functions which control the GuC scheduling state which
@@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
INIT_WORK(&guc->submission_state.destroyed_worker,
destroyed_worker_func);
- guc->submission_state.guc_ids_bitmap =
- bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
- if (!guc->submission_state.guc_ids_bitmap)
- return -ENOMEM;
-
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
@@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy(guc);
i915_sched_engine_put(guc->sched_engine);
- bitmap_free(guc->submission_state.guc_ids_bitmap);
+ if (guc->submission_state.guc_ids_bitmap)
+ bitmap_free(guc->submission_state.guc_ids_bitmap);
}
static inline void queue_request(struct i915_sched_engine *sched_engine,
@@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+ GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
+
+ ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ order_base_2(ce->parallel.number_children
+ + 1));
+ if (likely(!(ret < 0)))
+ ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
+
+ return ret;
+}
+
+static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+ GEM_BUG_ON(intel_context_is_parent(ce));
+
+ return ida_simple_get(&guc->submission_state.guc_ids,
+ 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN);
+}
+
static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
int ret;
@@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
GEM_BUG_ON(intel_context_is_child(ce));
if (intel_context_is_parent(ce))
- ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
- NUMBER_MULTI_LRC_GUC_ID(guc),
- order_base_2(ce->parallel.number_children
- + 1));
+ ret = new_mlrc_guc_id(guc, ce);
else
- ret = ida_simple_get(&guc->submission_state.guc_ids,
- NUMBER_MULTI_LRC_GUC_ID(guc),
- guc->submission_state.num_guc_ids,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL |
- __GFP_NOWARN);
+ ret = new_slrc_guc_id(guc, ce);
+
if (unlikely(ret < 0))
return ret;
@@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
+ if (unlikely(intel_context_is_parent(ce) &&
+ !guc->submission_state.guc_ids_bitmap)) {
+ guc->submission_state.guc_ids_bitmap =
+ bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
+ if (!guc->submission_state.guc_ids_bitmap)
+ return -ENOMEM;
+ }
+
try_again:
spin_lock_irqsave(&guc->submission_state.lock, flags);
--
2.34.1
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Flip guc_id allocation partition
2022-01-11 16:30 ` Matthew Brost
(?)
@ 2022-01-11 19:17 ` Patchwork
-1 siblings, 0 replies; 18+ messages in thread
From: Patchwork @ 2022-01-11 19:17 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-gfx
== Series Details ==
Series: drm/i915: Flip guc_id allocation partition
URL : https://patchwork.freedesktop.org/series/98751/
State : warning
== Summary ==
$ dim checkpatch origin/drm-tip
7c34b8acae82 drm/i915: Flip guc_id allocation partition
-:20: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'guc' - possible side-effects?
#20: FILE: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:150:
+#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
+ ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
total: 0 errors, 0 warnings, 1 checks, 94 lines checked
^ permalink raw reply [flat|nested] 18+ messages in thread
* [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915: Flip guc_id allocation partition
2022-01-11 16:30 ` Matthew Brost
(?)
(?)
@ 2022-01-11 19:33 ` Patchwork
-1 siblings, 0 replies; 18+ messages in thread
From: Patchwork @ 2022-01-11 19:33 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-gfx
[-- Attachment #1: Type: text/plain, Size: 5868 bytes --]
== Series Details ==
Series: drm/i915: Flip guc_id allocation partition
URL : https://patchwork.freedesktop.org/series/98751/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_11066 -> Patchwork_21970
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/index.html
Participating hosts (43 -> 42)
------------------------------
Additional (1): bat-adlp-4
Missing (2): fi-bsw-cyan fi-bdw-samus
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in Patchwork_21970:
### IGT changes ###
#### Suppressed ####
The following results come from untrusted machines, tests, or statuses.
They do not affect the overall result.
* igt@i915_selftest@live@late_gt_pm:
- {bat-adlp-6}: [PASS][1] -> [FAIL][2]
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/bat-adlp-6/igt@i915_selftest@live@late_gt_pm.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/bat-adlp-6/igt@i915_selftest@live@late_gt_pm.html
Known issues
------------
Here are the changes found in Patchwork_21970 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_flink_basic@bad-flink:
- fi-skl-6600u: NOTRUN -> [INCOMPLETE][3] ([i915#4547])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/fi-skl-6600u/igt@gem_flink_basic@bad-flink.html
* igt@gem_lmem_swapping@basic:
- bat-adlp-4: NOTRUN -> [SKIP][4] ([i915#4613]) +3 similar issues
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/bat-adlp-4/igt@gem_lmem_swapping@basic.html
* igt@gem_tiled_pread_basic:
- bat-adlp-4: NOTRUN -> [SKIP][5] ([i915#3282])
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/bat-adlp-4/igt@gem_tiled_pread_basic.html
* igt@kms_chamelium@dp-crc-fast:
- bat-adlp-4: NOTRUN -> [SKIP][6] ([fdo#111827]) +8 similar issues
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/bat-adlp-4/igt@kms_chamelium@dp-crc-fast.html
* igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy:
- bat-adlp-4: NOTRUN -> [SKIP][7] ([i915#4103]) +1 similar issue
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/bat-adlp-4/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy.html
* igt@kms_force_connector_basic@force-load-detect:
- bat-adlp-4: NOTRUN -> [SKIP][8] ([fdo#109285])
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/bat-adlp-4/igt@kms_force_connector_basic@force-load-detect.html
* igt@prime_vgem@basic-fence-read:
- bat-adlp-4: NOTRUN -> [SKIP][9] ([i915#3291] / [i915#3708]) +2 similar issues
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/bat-adlp-4/igt@prime_vgem@basic-fence-read.html
* igt@prime_vgem@basic-userptr:
- bat-adlp-4: NOTRUN -> [SKIP][10] ([i915#3301] / [i915#3708])
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/bat-adlp-4/igt@prime_vgem@basic-userptr.html
#### Possible fixes ####
* igt@gem_exec_suspend@basic-s0@smem:
- fi-tgl-1115g4: [FAIL][11] ([i915#1888]) -> [PASS][12]
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/fi-tgl-1115g4/igt@gem_exec_suspend@basic-s0@smem.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/fi-tgl-1115g4/igt@gem_exec_suspend@basic-s0@smem.html
* igt@kms_flip@basic-flip-vs-wf_vblank@a-vga1:
- fi-bwr-2160: [FAIL][13] ([i915#2122]) -> [PASS][14]
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/fi-bwr-2160/igt@kms_flip@basic-flip-vs-wf_vblank@a-vga1.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/fi-bwr-2160/igt@kms_flip@basic-flip-vs-wf_vblank@a-vga1.html
#### Warnings ####
* igt@runner@aborted:
- fi-skl-6600u: [FAIL][15] ([i915#4312]) -> [FAIL][16] ([i915#2722] / [i915#4312])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/fi-skl-6600u/igt@runner@aborted.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/fi-skl-6600u/igt@runner@aborted.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[fdo#109285]: https://bugs.freedesktop.org/show_bug.cgi?id=109285
[fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
[i915#1888]: https://gitlab.freedesktop.org/drm/intel/issues/1888
[i915#2122]: https://gitlab.freedesktop.org/drm/intel/issues/2122
[i915#2722]: https://gitlab.freedesktop.org/drm/intel/issues/2722
[i915#3282]: https://gitlab.freedesktop.org/drm/intel/issues/3282
[i915#3291]: https://gitlab.freedesktop.org/drm/intel/issues/3291
[i915#3301]: https://gitlab.freedesktop.org/drm/intel/issues/3301
[i915#3708]: https://gitlab.freedesktop.org/drm/intel/issues/3708
[i915#4103]: https://gitlab.freedesktop.org/drm/intel/issues/4103
[i915#4312]: https://gitlab.freedesktop.org/drm/intel/issues/4312
[i915#4547]: https://gitlab.freedesktop.org/drm/intel/issues/4547
[i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
Build changes
-------------
* Linux: CI_DRM_11066 -> Patchwork_21970
CI-20190529: 20190529
CI_DRM_11066: fc076e8fc52ed40fee33f416a4475a57219011a5 @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_6326: ec75f64fcbcf4aac58fbf1bf629e8f59b19db4ce @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
Patchwork_21970: 7c34b8acae82a4c45c1b408cf97912dfa07bf3a2 @ git://anongit.freedesktop.org/gfx-ci/linux
== Linux commits ==
7c34b8acae82 drm/i915: Flip guc_id allocation partition
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/index.html
[-- Attachment #2: Type: text/html, Size: 6910 bytes --]
^ permalink raw reply [flat|nested] 18+ messages in thread
* [Intel-gfx] ✓ Fi.CI.IGT: success for drm/i915: Flip guc_id allocation partition
2022-01-11 16:30 ` Matthew Brost
` (2 preceding siblings ...)
(?)
@ 2022-01-12 1:14 ` Patchwork
-1 siblings, 0 replies; 18+ messages in thread
From: Patchwork @ 2022-01-12 1:14 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-gfx
[-- Attachment #1: Type: text/plain, Size: 30265 bytes --]
== Series Details ==
Series: drm/i915: Flip guc_id allocation partition
URL : https://patchwork.freedesktop.org/series/98751/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_11066_full -> Patchwork_21970_full
====================================================
Summary
-------
**SUCCESS**
No regressions found.
Participating hosts (10 -> 10)
------------------------------
No changes in participating hosts
Known issues
------------
Here are the changes found in Patchwork_21970_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@feature_discovery@psr2:
- shard-iclb: [PASS][1] -> [SKIP][2] ([i915#658])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb2/igt@feature_discovery@psr2.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb5/igt@feature_discovery@psr2.html
* igt@gem_create@create-massive:
- shard-apl: NOTRUN -> [DMESG-WARN][3] ([i915#3002])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl2/igt@gem_create@create-massive.html
* igt@gem_ctx_isolation@preservation-s3@vcs0:
- shard-tglb: [PASS][4] -> [DMESG-WARN][5] ([i915#2411] / [i915#2867]) +2 similar issues
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-tglb6/igt@gem_ctx_isolation@preservation-s3@vcs0.html
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb5/igt@gem_ctx_isolation@preservation-s3@vcs0.html
* igt@gem_eio@unwedge-stress:
- shard-tglb: [PASS][6] -> [FAIL][7] ([i915#232])
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-tglb6/igt@gem_eio@unwedge-stress.html
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb5/igt@gem_eio@unwedge-stress.html
* igt@gem_exec_balancer@parallel-out-fence:
- shard-iclb: [PASS][8] -> [SKIP][9] ([i915#4525]) +1 similar issue
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb4/igt@gem_exec_balancer@parallel-out-fence.html
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb6/igt@gem_exec_balancer@parallel-out-fence.html
* igt@gem_exec_capture@pi@vcs0:
- shard-skl: NOTRUN -> [INCOMPLETE][10] ([i915#4547])
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl9/igt@gem_exec_capture@pi@vcs0.html
* igt@gem_exec_fair@basic-none-solo@rcs0:
- shard-glk: NOTRUN -> [FAIL][11] ([i915#2842])
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk7/igt@gem_exec_fair@basic-none-solo@rcs0.html
* igt@gem_exec_fair@basic-none-vip@rcs0:
- shard-kbl: [PASS][12] -> [FAIL][13] ([i915#2842]) +3 similar issues
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl6/igt@gem_exec_fair@basic-none-vip@rcs0.html
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl7/igt@gem_exec_fair@basic-none-vip@rcs0.html
* igt@gem_exec_whisper@basic-forked:
- shard-glk: [PASS][14] -> [DMESG-WARN][15] ([i915#118]) +1 similar issue
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-glk2/igt@gem_exec_whisper@basic-forked.html
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk2/igt@gem_exec_whisper@basic-forked.html
* igt@gem_lmem_swapping@heavy-multi:
- shard-skl: NOTRUN -> [SKIP][16] ([fdo#109271] / [i915#4613]) +1 similar issue
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl4/igt@gem_lmem_swapping@heavy-multi.html
* igt@gem_lmem_swapping@random:
- shard-apl: NOTRUN -> [SKIP][17] ([fdo#109271] / [i915#4613])
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl2/igt@gem_lmem_swapping@random.html
* igt@gem_pread@exhaustion:
- shard-glk: NOTRUN -> [WARN][18] ([i915#2658])
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk7/igt@gem_pread@exhaustion.html
* igt@gem_pxp@verify-pxp-execution-after-suspend-resume:
- shard-tglb: NOTRUN -> [SKIP][19] ([i915#4270]) +1 similar issue
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@gem_pxp@verify-pxp-execution-after-suspend-resume.html
* igt@gem_render_copy@linear-to-vebox-y-tiled:
- shard-apl: NOTRUN -> [SKIP][20] ([fdo#109271]) +35 similar issues
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl2/igt@gem_render_copy@linear-to-vebox-y-tiled.html
* igt@gem_render_copy@y-tiled-to-vebox-x-tiled:
- shard-glk: NOTRUN -> [SKIP][21] ([fdo#109271]) +37 similar issues
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk7/igt@gem_render_copy@y-tiled-to-vebox-x-tiled.html
* igt@gem_userptr_blits@dmabuf-sync:
- shard-iclb: NOTRUN -> [SKIP][22] ([i915#3323])
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb5/igt@gem_userptr_blits@dmabuf-sync.html
- shard-tglb: NOTRUN -> [SKIP][23] ([i915#3323])
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb6/igt@gem_userptr_blits@dmabuf-sync.html
* igt@gem_userptr_blits@unsync-overlap:
- shard-tglb: NOTRUN -> [SKIP][24] ([i915#3297])
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@gem_userptr_blits@unsync-overlap.html
* igt@gen9_exec_parse@shadow-peek:
- shard-tglb: NOTRUN -> [SKIP][25] ([i915#2527] / [i915#2856]) +1 similar issue
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb6/igt@gen9_exec_parse@shadow-peek.html
- shard-iclb: NOTRUN -> [SKIP][26] ([i915#2856])
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb5/igt@gen9_exec_parse@shadow-peek.html
* igt@i915_pm_lpsp@kms-lpsp@kms-lpsp-dp:
- shard-kbl: NOTRUN -> [SKIP][27] ([fdo#109271] / [i915#1937])
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl1/igt@i915_pm_lpsp@kms-lpsp@kms-lpsp-dp.html
* igt@i915_pm_rc6_residency@media-rc6-accuracy:
- shard-tglb: NOTRUN -> [SKIP][28] ([fdo#109289] / [fdo#111719])
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@i915_pm_rc6_residency@media-rc6-accuracy.html
* igt@kms_big_fb@y-tiled-8bpp-rotate-90:
- shard-tglb: NOTRUN -> [SKIP][29] ([fdo#111614])
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_big_fb@y-tiled-8bpp-rotate-90.html
* igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-async-flip:
- shard-skl: NOTRUN -> [FAIL][30] ([i915#3743])
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl7/igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-async-flip.html
* igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-hflip:
- shard-skl: NOTRUN -> [SKIP][31] ([fdo#109271] / [i915#3777])
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl4/igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-hflip.html
* igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180-hflip:
- shard-apl: NOTRUN -> [SKIP][32] ([fdo#109271] / [i915#3777])
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl2/igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-180-hflip.html
* igt@kms_big_fb@yf-tiled-64bpp-rotate-90:
- shard-tglb: NOTRUN -> [SKIP][33] ([fdo#111615])
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_big_fb@yf-tiled-64bpp-rotate-90.html
* igt@kms_ccs@pipe-b-bad-aux-stride-y_tiled_gen12_rc_ccs:
- shard-iclb: NOTRUN -> [SKIP][34] ([fdo#109278]) +1 similar issue
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb5/igt@kms_ccs@pipe-b-bad-aux-stride-y_tiled_gen12_rc_ccs.html
* igt@kms_ccs@pipe-b-crc-primary-basic-y_tiled_gen12_mc_ccs:
- shard-kbl: NOTRUN -> [SKIP][35] ([fdo#109271] / [i915#3886]) +1 similar issue
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl1/igt@kms_ccs@pipe-b-crc-primary-basic-y_tiled_gen12_mc_ccs.html
* igt@kms_ccs@pipe-b-crc-primary-basic-y_tiled_gen12_rc_ccs_cc:
- shard-glk: NOTRUN -> [SKIP][36] ([fdo#109271] / [i915#3886])
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk7/igt@kms_ccs@pipe-b-crc-primary-basic-y_tiled_gen12_rc_ccs_cc.html
* igt@kms_ccs@pipe-b-crc-primary-rotation-180-y_tiled_gen12_mc_ccs:
- shard-apl: NOTRUN -> [SKIP][37] ([fdo#109271] / [i915#3886])
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl2/igt@kms_ccs@pipe-b-crc-primary-rotation-180-y_tiled_gen12_mc_ccs.html
* igt@kms_ccs@pipe-b-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc:
- shard-skl: NOTRUN -> [SKIP][38] ([fdo#109271] / [i915#3886]) +5 similar issues
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl4/igt@kms_ccs@pipe-b-crc-primary-rotation-180-y_tiled_gen12_rc_ccs_cc.html
* igt@kms_ccs@pipe-c-crc-primary-basic-y_tiled_gen12_mc_ccs:
- shard-tglb: NOTRUN -> [SKIP][39] ([i915#3689] / [i915#3886])
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_ccs@pipe-c-crc-primary-basic-y_tiled_gen12_mc_ccs.html
* igt@kms_ccs@pipe-d-bad-aux-stride-y_tiled_gen12_mc_ccs:
- shard-tglb: NOTRUN -> [SKIP][40] ([i915#3689]) +1 similar issue
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_ccs@pipe-d-bad-aux-stride-y_tiled_gen12_mc_ccs.html
* igt@kms_ccs@pipe-d-bad-pixel-format-yf_tiled_ccs:
- shard-kbl: NOTRUN -> [SKIP][41] ([fdo#109271]) +20 similar issues
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl1/igt@kms_ccs@pipe-d-bad-pixel-format-yf_tiled_ccs.html
* igt@kms_ccs@pipe-d-crc-primary-rotation-180-yf_tiled_ccs:
- shard-tglb: NOTRUN -> [SKIP][42] ([fdo#111615] / [i915#3689]) +2 similar issues
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_ccs@pipe-d-crc-primary-rotation-180-yf_tiled_ccs.html
* igt@kms_chamelium@common-hpd-after-suspend:
- shard-apl: NOTRUN -> [SKIP][43] ([fdo#109271] / [fdo#111827]) +3 similar issues
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl4/igt@kms_chamelium@common-hpd-after-suspend.html
* igt@kms_chamelium@vga-hpd-after-suspend:
- shard-glk: NOTRUN -> [SKIP][44] ([fdo#109271] / [fdo#111827]) +2 similar issues
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk7/igt@kms_chamelium@vga-hpd-after-suspend.html
* igt@kms_color_chamelium@pipe-a-ctm-0-75:
- shard-kbl: NOTRUN -> [SKIP][45] ([fdo#109271] / [fdo#111827])
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl1/igt@kms_color_chamelium@pipe-a-ctm-0-75.html
* igt@kms_color_chamelium@pipe-d-ctm-red-to-blue:
- shard-tglb: NOTRUN -> [SKIP][46] ([fdo#109284] / [fdo#111827]) +3 similar issues
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb6/igt@kms_color_chamelium@pipe-d-ctm-red-to-blue.html
- shard-skl: NOTRUN -> [SKIP][47] ([fdo#109271] / [fdo#111827]) +7 similar issues
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl10/igt@kms_color_chamelium@pipe-d-ctm-red-to-blue.html
- shard-iclb: NOTRUN -> [SKIP][48] ([fdo#109278] / [fdo#109284] / [fdo#111827])
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb5/igt@kms_color_chamelium@pipe-d-ctm-red-to-blue.html
* igt@kms_content_protection@srm:
- shard-tglb: NOTRUN -> [SKIP][49] ([i915#1063])
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_content_protection@srm.html
* igt@kms_cursor_crc@pipe-b-cursor-32x10-sliding:
- shard-tglb: NOTRUN -> [SKIP][50] ([i915#3359]) +2 similar issues
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb1/igt@kms_cursor_crc@pipe-b-cursor-32x10-sliding.html
* igt@kms_cursor_crc@pipe-b-cursor-512x170-random:
- shard-tglb: NOTRUN -> [SKIP][51] ([fdo#109279] / [i915#3359])
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_cursor_crc@pipe-b-cursor-512x170-random.html
* igt@kms_cursor_crc@pipe-c-cursor-32x32-sliding:
- shard-tglb: NOTRUN -> [SKIP][52] ([i915#3319])
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_cursor_crc@pipe-c-cursor-32x32-sliding.html
* igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy:
- shard-tglb: NOTRUN -> [SKIP][53] ([i915#4103])
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy.html
* igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size:
- shard-skl: [PASS][54] -> [FAIL][55] ([i915#2346] / [i915#533])
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-skl7/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl6/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html
* igt@kms_flip@2x-flip-vs-expired-vblank@ab-hdmi-a1-hdmi-a2:
- shard-glk: [PASS][56] -> [FAIL][57] ([i915#79])
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-glk5/igt@kms_flip@2x-flip-vs-expired-vblank@ab-hdmi-a1-hdmi-a2.html
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk8/igt@kms_flip@2x-flip-vs-expired-vblank@ab-hdmi-a1-hdmi-a2.html
* igt@kms_flip@2x-flip-vs-panning-interruptible:
- shard-iclb: NOTRUN -> [SKIP][58] ([fdo#109274])
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb5/igt@kms_flip@2x-flip-vs-panning-interruptible.html
* igt@kms_flip@2x-plain-flip-fb-recreate:
- shard-tglb: NOTRUN -> [SKIP][59] ([fdo#109274] / [fdo#111825]) +3 similar issues
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_flip@2x-plain-flip-fb-recreate.html
* igt@kms_flip@flip-vs-suspend@b-dp1:
- shard-apl: [PASS][60] -> [DMESG-WARN][61] ([i915#180])
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-apl6/igt@kms_flip@flip-vs-suspend@b-dp1.html
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl1/igt@kms_flip@flip-vs-suspend@b-dp1.html
* igt@kms_flip@plain-flip-ts-check-interruptible@a-edp1:
- shard-skl: [PASS][62] -> [FAIL][63] ([i915#2122])
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-skl10/igt@kms_flip@plain-flip-ts-check-interruptible@a-edp1.html
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl2/igt@kms_flip@plain-flip-ts-check-interruptible@a-edp1.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling:
- shard-tglb: NOTRUN -> [SKIP][64] ([i915#2587])
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb1/igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling.html
* igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-gtt:
- shard-tglb: NOTRUN -> [SKIP][65] ([fdo#109280] / [fdo#111825]) +8 similar issues
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-mmap-gtt.html
* igt@kms_hdr@static-toggle-dpms:
- shard-skl: NOTRUN -> [SKIP][66] ([fdo#109271]) +90 similar issues
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl4/igt@kms_hdr@static-toggle-dpms.html
* igt@kms_pipe_crc_basic@compare-crc-sanitycheck-pipe-d:
- shard-glk: NOTRUN -> [SKIP][67] ([fdo#109271] / [i915#533])
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk7/igt@kms_pipe_crc_basic@compare-crc-sanitycheck-pipe-d.html
* igt@kms_pipe_crc_basic@suspend-read-crc-pipe-c:
- shard-kbl: [PASS][68] -> [INCOMPLETE][69] ([i915#2828])
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl7/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-c.html
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl4/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-c.html
* igt@kms_plane@plane-panning-bottom-right-suspend@pipe-b-planes:
- shard-kbl: [PASS][70] -> [DMESG-WARN][71] ([i915#180]) +1 similar issue
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl4/igt@kms_plane@plane-panning-bottom-right-suspend@pipe-b-planes.html
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl4/igt@kms_plane@plane-panning-bottom-right-suspend@pipe-b-planes.html
* igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb:
- shard-apl: NOTRUN -> [FAIL][72] ([fdo#108145] / [i915#265])
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl4/igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb.html
* igt@kms_plane_lowres@pipe-b-tiling-none:
- shard-tglb: NOTRUN -> [SKIP][73] ([i915#3536])
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_plane_lowres@pipe-b-tiling-none.html
* igt@kms_plane_lowres@pipe-c-tiling-yf:
- shard-tglb: NOTRUN -> [SKIP][74] ([fdo#111615] / [fdo#112054])
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_plane_lowres@pipe-c-tiling-yf.html
* igt@kms_psr2_sf@overlay-plane-update-continuous-sf:
- shard-tglb: NOTRUN -> [SKIP][75] ([i915#2920])
[75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_psr2_sf@overlay-plane-update-continuous-sf.html
* igt@kms_psr2_su@page_flip-p010:
- shard-skl: NOTRUN -> [SKIP][76] ([fdo#109271] / [i915#658]) +1 similar issue
[76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl9/igt@kms_psr2_su@page_flip-p010.html
* igt@kms_psr@psr2_cursor_blt:
- shard-iclb: [PASS][77] -> [SKIP][78] ([fdo#109441]) +1 similar issue
[77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb2/igt@kms_psr@psr2_cursor_blt.html
[78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb5/igt@kms_psr@psr2_cursor_blt.html
* igt@kms_psr@psr2_primary_render:
- shard-tglb: NOTRUN -> [FAIL][79] ([i915#132] / [i915#3467])
[79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@kms_psr@psr2_primary_render.html
* igt@kms_setmode@basic:
- shard-glk: [PASS][80] -> [FAIL][81] ([i915#31])
[80]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-glk3/igt@kms_setmode@basic.html
[81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk2/igt@kms_setmode@basic.html
* igt@kms_sysfs_edid_timing:
- shard-apl: NOTRUN -> [FAIL][82] ([IGT#2])
[82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl4/igt@kms_sysfs_edid_timing.html
- shard-skl: NOTRUN -> [FAIL][83] ([IGT#2])
[83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl9/igt@kms_sysfs_edid_timing.html
* igt@nouveau_crc@pipe-a-source-outp-complete:
- shard-tglb: NOTRUN -> [SKIP][84] ([i915#2530])
[84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@nouveau_crc@pipe-a-source-outp-complete.html
* igt@perf@gen8-unprivileged-single-ctx-counters:
- shard-tglb: NOTRUN -> [SKIP][85] ([fdo#109289])
[85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@perf@gen8-unprivileged-single-ctx-counters.html
* igt@perf@polling-parameterized:
- shard-skl: [PASS][86] -> [FAIL][87] ([i915#1542])
[86]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-skl10/igt@perf@polling-parameterized.html
[87]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl2/igt@perf@polling-parameterized.html
* igt@prime_nv_test@i915_nv_sharing:
- shard-tglb: NOTRUN -> [SKIP][88] ([fdo#109291]) +2 similar issues
[88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@prime_nv_test@i915_nv_sharing.html
* igt@sysfs_clients@fair-0:
- shard-skl: NOTRUN -> [SKIP][89] ([fdo#109271] / [i915#2994]) +1 similar issue
[89]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl7/igt@sysfs_clients@fair-0.html
* igt@sysfs_clients@fair-1:
- shard-glk: NOTRUN -> [SKIP][90] ([fdo#109271] / [i915#2994])
[90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk7/igt@sysfs_clients@fair-1.html
* igt@sysfs_clients@sema-10:
- shard-tglb: NOTRUN -> [SKIP][91] ([i915#2994])
[91]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@sysfs_clients@sema-10.html
#### Possible fixes ####
* igt@gem_ctx_persistence@many-contexts:
- shard-tglb: [FAIL][92] ([i915#2410]) -> [PASS][93]
[92]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-tglb3/igt@gem_ctx_persistence@many-contexts.html
[93]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb7/igt@gem_ctx_persistence@many-contexts.html
* igt@gem_exec_balancer@parallel-balancer:
- shard-iclb: [SKIP][94] ([i915#4525]) -> [PASS][95] +1 similar issue
[94]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb8/igt@gem_exec_balancer@parallel-balancer.html
[95]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb2/igt@gem_exec_balancer@parallel-balancer.html
* igt@gem_exec_fair@basic-flow@rcs0:
- shard-tglb: [FAIL][96] ([i915#2842]) -> [PASS][97]
[96]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-tglb2/igt@gem_exec_fair@basic-flow@rcs0.html
[97]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-tglb6/igt@gem_exec_fair@basic-flow@rcs0.html
* igt@gem_exec_fair@basic-none@vcs0:
- shard-kbl: [FAIL][98] ([i915#2842]) -> [PASS][99] +1 similar issue
[98]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl6/igt@gem_exec_fair@basic-none@vcs0.html
[99]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl1/igt@gem_exec_fair@basic-none@vcs0.html
* igt@gem_exec_fair@basic-pace@vcs0:
- shard-glk: [FAIL][100] ([i915#2842]) -> [PASS][101] +1 similar issue
[100]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-glk7/igt@gem_exec_fair@basic-pace@vcs0.html
[101]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk9/igt@gem_exec_fair@basic-pace@vcs0.html
* igt@gem_exec_fair@basic-throttle@rcs0:
- shard-iclb: [FAIL][102] ([i915#2849]) -> [PASS][103]
[102]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb3/igt@gem_exec_fair@basic-throttle@rcs0.html
[103]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb3/igt@gem_exec_fair@basic-throttle@rcs0.html
* igt@gem_exec_parallel@engines@basic:
- shard-glk: [DMESG-WARN][104] ([i915#118]) -> [PASS][105] +1 similar issue
[104]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-glk5/igt@gem_exec_parallel@engines@basic.html
[105]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk8/igt@gem_exec_parallel@engines@basic.html
* igt@kms_cursor_legacy@flip-vs-cursor-toggle:
- shard-iclb: [FAIL][106] ([i915#2346]) -> [PASS][107]
[106]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb7/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
[107]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb7/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
* igt@kms_flip@flip-vs-suspend-interruptible@b-dp1:
- shard-apl: [DMESG-WARN][108] ([i915#180]) -> [PASS][109] +1 similar issue
[108]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-apl4/igt@kms_flip@flip-vs-suspend-interruptible@b-dp1.html
[109]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl2/igt@kms_flip@flip-vs-suspend-interruptible@b-dp1.html
* igt@kms_flip@flip-vs-suspend@c-dp1:
- shard-kbl: [DMESG-WARN][110] ([i915#180]) -> [PASS][111] +3 similar issues
[110]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl4/igt@kms_flip@flip-vs-suspend@c-dp1.html
[111]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl1/igt@kms_flip@flip-vs-suspend@c-dp1.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling:
- shard-glk: [FAIL][112] ([i915#4911]) -> [PASS][113]
[112]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-glk8/igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling.html
[113]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-glk1/igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling.html
* igt@kms_psr@psr2_no_drrs:
- shard-iclb: [SKIP][114] ([fdo#109441]) -> [PASS][115]
[114]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb8/igt@kms_psr@psr2_no_drrs.html
[115]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb2/igt@kms_psr@psr2_no_drrs.html
#### Warnings ####
* igt@gem_exec_balancer@parallel-ordering:
- shard-iclb: [SKIP][116] ([i915#4525]) -> [FAIL][117] ([i915#4916])
[116]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb8/igt@gem_exec_balancer@parallel-ordering.html
[117]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb4/igt@gem_exec_balancer@parallel-ordering.html
* igt@gem_exec_fair@basic-pace@rcs0:
- shard-kbl: [FAIL][118] ([i915#2851]) -> [FAIL][119] ([i915#2842])
[118]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl6/igt@gem_exec_fair@basic-pace@rcs0.html
[119]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl7/igt@gem_exec_fair@basic-pace@rcs0.html
* igt@i915_pm_rc6_residency@rc6-idle:
- shard-iclb: [WARN][120] ([i915#1804] / [i915#2684]) -> [WARN][121] ([i915#2684])
[120]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb4/igt@i915_pm_rc6_residency@rc6-idle.html
[121]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb8/igt@i915_pm_rc6_residency@rc6-idle.html
* igt@kms_psr2_sf@plane-move-sf-dmg-area:
- shard-iclb: [SKIP][122] ([fdo#111068] / [i915#658]) -> [SKIP][123] ([i915#2920])
[122]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-iclb6/igt@kms_psr2_sf@plane-move-sf-dmg-area.html
[123]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-iclb2/igt@kms_psr2_sf@plane-move-sf-dmg-area.html
* igt@runner@aborted:
- shard-kbl: ([FAIL][124], [FAIL][125], [FAIL][126], [FAIL][127], [FAIL][128]) ([i915#180] / [i915#1814] / [i915#3002] / [i915#4312]) -> ([FAIL][129], [FAIL][130], [FAIL][131], [FAIL][132], [FAIL][133]) ([i915#1436] / [i915#180] / [i915#1814] / [i915#3002] / [i915#4312])
[124]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl4/igt@runner@aborted.html
[125]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl3/igt@runner@aborted.html
[126]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl4/igt@runner@aborted.html
[127]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl4/igt@runner@aborted.html
[128]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-kbl1/igt@runner@aborted.html
[129]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl6/igt@runner@aborted.html
[130]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl4/igt@runner@aborted.html
[131]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl4/igt@runner@aborted.html
[132]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl1/igt@runner@aborted.html
[133]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-kbl4/igt@runner@aborted.html
- shard-apl: ([FAIL][134], [FAIL][135], [FAIL][136]) ([i915#180] / [i915#1814] / [i915#3002] / [i915#4312]) -> ([FAIL][137], [FAIL][138], [FAIL][139]) ([i915#180] / [i915#3002] / [i915#4312])
[134]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-apl4/igt@runner@aborted.html
[135]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-apl6/igt@runner@aborted.html
[136]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-apl4/igt@runner@aborted.html
[137]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl7/igt@runner@aborted.html
[138]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl2/igt@runner@aborted.html
[139]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-apl1/igt@runner@aborted.html
- shard-skl: ([FAIL][140], [FAIL][141], [FAIL][142], [FAIL][143], [FAIL][144]) ([i915#1814] / [i915#2029] / [i915#3002] / [i915#4312]) -> ([FAIL][145], [FAIL][146]) ([i915#3002] / [i915#4312])
[140]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-skl1/igt@runner@aborted.html
[141]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-skl4/igt@runner@aborted.html
[142]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-skl2/igt@runner@aborted.html
[143]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-skl3/igt@runner@aborted.html
[144]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11066/shard-skl3/igt@runner@aborted.html
[145]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl8/igt@runner@aborted.html
[146]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/shard-skl9/igt@runner@aborted.html
[IGT#2]: https://gitlab.freedesktop.o
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_21970/index.html
[-- Attachment #2: Type: text/html, Size: 34012 bytes --]
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
2022-01-11 16:30 ` Matthew Brost
` (3 preceding siblings ...)
(?)
@ 2022-01-12 8:54 ` Tvrtko Ursulin
2022-01-12 17:09 ` Piotr Piórkowski
-1 siblings, 1 reply; 18+ messages in thread
From: Tvrtko Ursulin @ 2022-01-12 8:54 UTC (permalink / raw)
To: Matthew Brost, intel-gfx, dri-devel
On 11/01/2022 16:30, Matthew Brost wrote:
> Move the multi-lrc guc_id from the lower allocation partition (0 to
> number of multi-lrc guc_ids) to upper allocation partition (number of
> single-lrc to max guc_ids).
Just a reminder that best practice for commit messages is to include the
"why" as well.
Regards,
Tvrtko
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> 1 file changed, 42 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 9989d121127df..1bacc9621cea8 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> */
> #define NUMBER_MULTI_LRC_GUC_ID(guc) \
> ((guc)->submission_state.num_guc_ids / 16)
> +#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
> + ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
>
> /*
> * Below is a set of functions which control the GuC scheduling state which
> @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> INIT_WORK(&guc->submission_state.destroyed_worker,
> destroyed_worker_func);
>
> - guc->submission_state.guc_ids_bitmap =
> - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> - if (!guc->submission_state.guc_ids_bitmap)
> - return -ENOMEM;
> -
> spin_lock_init(&guc->timestamp.lock);
> INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> guc_flush_destroyed_contexts(guc);
> guc_lrc_desc_pool_destroy(guc);
> i915_sched_engine_put(guc->sched_engine);
> - bitmap_free(guc->submission_state.guc_ids_bitmap);
> + if (guc->submission_state.guc_ids_bitmap)
> + bitmap_free(guc->submission_state.guc_ids_bitmap);
> }
>
> static inline void queue_request(struct i915_sched_engine *sched_engine,
> @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> spin_unlock_irqrestore(&sched_engine->lock, flags);
> }
>
> +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> +{
> + int ret;
> +
> + GEM_BUG_ON(!intel_context_is_parent(ce));
> + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> +
> + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> + NUMBER_MULTI_LRC_GUC_ID(guc),
> + order_base_2(ce->parallel.number_children
> + + 1));
> + if (likely(!(ret < 0)))
> + ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
> +
> + return ret;
> +}
> +
> +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> +{
> + GEM_BUG_ON(intel_context_is_parent(ce));
> +
> + return ida_simple_get(&guc->submission_state.guc_ids,
> + 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
> + GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> + __GFP_NOWARN);
> +}
> +
> static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> {
> int ret;
> @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> GEM_BUG_ON(intel_context_is_child(ce));
>
> if (intel_context_is_parent(ce))
> - ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> - NUMBER_MULTI_LRC_GUC_ID(guc),
> - order_base_2(ce->parallel.number_children
> - + 1));
> + ret = new_mlrc_guc_id(guc, ce);
> else
> - ret = ida_simple_get(&guc->submission_state.guc_ids,
> - NUMBER_MULTI_LRC_GUC_ID(guc),
> - guc->submission_state.num_guc_ids,
> - GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> - __GFP_NOWARN);
> + ret = new_slrc_guc_id(guc, ce);
> +
> if (unlikely(ret < 0))
> return ret;
>
> @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
>
> GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
>
> + if (unlikely(intel_context_is_parent(ce) &&
> + !guc->submission_state.guc_ids_bitmap)) {
> + guc->submission_state.guc_ids_bitmap =
> + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> + if (!guc->submission_state.guc_ids_bitmap)
> + return -ENOMEM;
> + }
> +
> try_again:
> spin_lock_irqsave(&guc->submission_state.lock, flags);
>
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
2022-01-12 8:54 ` [Intel-gfx] [PATCH] " Tvrtko Ursulin
@ 2022-01-12 17:09 ` Piotr Piórkowski
0 siblings, 0 replies; 18+ messages in thread
From: Piotr Piórkowski @ 2022-01-12 17:09 UTC (permalink / raw)
To: Matthew Brost; +Cc: Tvrtko Ursulin, intel-gfx, dri-devel
Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> wrote on śro [2022-sty-12 08:54:19 +0000]:
>
> On 11/01/2022 16:30, Matthew Brost wrote:
> > Move the multi-lrc guc_id from the lower allocation partition (0 to
> > number of multi-lrc guc_ids) to upper allocation partition (number of
> > single-lrc to max guc_ids).
>
> Just a reminder that best practice for commit messages is to include the
> "why" as well.
>
> Regards,
>
> Tvrtko
>
In my opinion this patch is good step forward.
Lazy allocation of the bitmap for MLRC and moving the MLRC pool to the
end will allow easier development contexts for SR-IOV.
Introduction of two new helpers (new_mlrc_guc_id and new_slrc_guc_id) cleans up the code.
I agree with Tvrtko's comment that you should expand your commit
message.
One thing I personally don't like is this NUMBER_SINGLE_LRC_GUC_ID definition (same for MLRC)
In my opinion it should be inline function and this value 1/16 defined as constant
- Piotr
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> > 1 file changed, 42 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > index 9989d121127df..1bacc9621cea8 100644
> > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> > */
> > #define NUMBER_MULTI_LRC_GUC_ID(guc) \
> > ((guc)->submission_state.num_guc_ids / 16)
> > +#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
> > + ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
> > /*
> > * Below is a set of functions which control the GuC scheduling state which
> > @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> > INIT_WORK(&guc->submission_state.destroyed_worker,
> > destroyed_worker_func);
> > - guc->submission_state.guc_ids_bitmap =
> > - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > - if (!guc->submission_state.guc_ids_bitmap)
> > - return -ENOMEM;
> > -
> > spin_lock_init(&guc->timestamp.lock);
> > INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> > guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> > @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> > guc_flush_destroyed_contexts(guc);
> > guc_lrc_desc_pool_destroy(guc);
> > i915_sched_engine_put(guc->sched_engine);
> > - bitmap_free(guc->submission_state.guc_ids_bitmap);
> > + if (guc->submission_state.guc_ids_bitmap)
> > + bitmap_free(guc->submission_state.guc_ids_bitmap);
> > }
> > static inline void queue_request(struct i915_sched_engine *sched_engine,
> > @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> > spin_unlock_irqrestore(&sched_engine->lock, flags);
> > }
> > +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > + int ret;
> > +
> > + GEM_BUG_ON(!intel_context_is_parent(ce));
> > + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> > +
> > + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > + NUMBER_MULTI_LRC_GUC_ID(guc),
> > + order_base_2(ce->parallel.number_children
> > + + 1));
> > + if (likely(!(ret < 0)))
> > + ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
> > +
> > + return ret;
> > +}
> > +
> > +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > + GEM_BUG_ON(intel_context_is_parent(ce));
> > +
> > + return ida_simple_get(&guc->submission_state.guc_ids,
> > + 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
> > + GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > + __GFP_NOWARN);
> > +}
> > +
> > static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > {
> > int ret;
> > @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > GEM_BUG_ON(intel_context_is_child(ce));
> > if (intel_context_is_parent(ce))
> > - ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > - order_base_2(ce->parallel.number_children
> > - + 1));
> > + ret = new_mlrc_guc_id(guc, ce);
> > else
> > - ret = ida_simple_get(&guc->submission_state.guc_ids,
> > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > - guc->submission_state.num_guc_ids,
> > - GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > - __GFP_NOWARN);
> > + ret = new_slrc_guc_id(guc, ce);
> > +
> > if (unlikely(ret < 0))
> > return ret;
> > @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
> > + if (unlikely(intel_context_is_parent(ce) &&
> > + !guc->submission_state.guc_ids_bitmap)) {
> > + guc->submission_state.guc_ids_bitmap =
> > + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > + if (!guc->submission_state.guc_ids_bitmap)
> > + return -ENOMEM;
> > + }
> > +
> > try_again:
> > spin_lock_irqsave(&guc->submission_state.lock, flags);
> >
--
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
@ 2022-01-12 17:09 ` Piotr Piórkowski
0 siblings, 0 replies; 18+ messages in thread
From: Piotr Piórkowski @ 2022-01-12 17:09 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-gfx, dri-devel
Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> wrote on śro [2022-sty-12 08:54:19 +0000]:
>
> On 11/01/2022 16:30, Matthew Brost wrote:
> > Move the multi-lrc guc_id from the lower allocation partition (0 to
> > number of multi-lrc guc_ids) to upper allocation partition (number of
> > single-lrc to max guc_ids).
>
> Just a reminder that best practice for commit messages is to include the
> "why" as well.
>
> Regards,
>
> Tvrtko
>
In my opinion this patch is good step forward.
Lazy allocation of the bitmap for MLRC and moving the MLRC pool to the
end will allow easier development contexts for SR-IOV.
Introduction of two new helpers (new_mlrc_guc_id and new_slrc_guc_id) cleans up the code.
I agree with Tvrtko's comment that you should expand your commit
message.
One thing I personally don't like is this NUMBER_SINGLE_LRC_GUC_ID definition (same for MLRC)
In my opinion it should be inline function and this value 1/16 defined as constant
- Piotr
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> > 1 file changed, 42 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > index 9989d121127df..1bacc9621cea8 100644
> > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> > */
> > #define NUMBER_MULTI_LRC_GUC_ID(guc) \
> > ((guc)->submission_state.num_guc_ids / 16)
> > +#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
> > + ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
> > /*
> > * Below is a set of functions which control the GuC scheduling state which
> > @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> > INIT_WORK(&guc->submission_state.destroyed_worker,
> > destroyed_worker_func);
> > - guc->submission_state.guc_ids_bitmap =
> > - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > - if (!guc->submission_state.guc_ids_bitmap)
> > - return -ENOMEM;
> > -
> > spin_lock_init(&guc->timestamp.lock);
> > INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> > guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> > @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> > guc_flush_destroyed_contexts(guc);
> > guc_lrc_desc_pool_destroy(guc);
> > i915_sched_engine_put(guc->sched_engine);
> > - bitmap_free(guc->submission_state.guc_ids_bitmap);
> > + if (guc->submission_state.guc_ids_bitmap)
> > + bitmap_free(guc->submission_state.guc_ids_bitmap);
> > }
> > static inline void queue_request(struct i915_sched_engine *sched_engine,
> > @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> > spin_unlock_irqrestore(&sched_engine->lock, flags);
> > }
> > +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > + int ret;
> > +
> > + GEM_BUG_ON(!intel_context_is_parent(ce));
> > + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> > +
> > + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > + NUMBER_MULTI_LRC_GUC_ID(guc),
> > + order_base_2(ce->parallel.number_children
> > + + 1));
> > + if (likely(!(ret < 0)))
> > + ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
> > +
> > + return ret;
> > +}
> > +
> > +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > + GEM_BUG_ON(intel_context_is_parent(ce));
> > +
> > + return ida_simple_get(&guc->submission_state.guc_ids,
> > + 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
> > + GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > + __GFP_NOWARN);
> > +}
> > +
> > static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > {
> > int ret;
> > @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > GEM_BUG_ON(intel_context_is_child(ce));
> > if (intel_context_is_parent(ce))
> > - ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > - order_base_2(ce->parallel.number_children
> > - + 1));
> > + ret = new_mlrc_guc_id(guc, ce);
> > else
> > - ret = ida_simple_get(&guc->submission_state.guc_ids,
> > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > - guc->submission_state.num_guc_ids,
> > - GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > - __GFP_NOWARN);
> > + ret = new_slrc_guc_id(guc, ce);
> > +
> > if (unlikely(ret < 0))
> > return ret;
> > @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
> > + if (unlikely(intel_context_is_parent(ce) &&
> > + !guc->submission_state.guc_ids_bitmap)) {
> > + guc->submission_state.guc_ids_bitmap =
> > + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > + if (!guc->submission_state.guc_ids_bitmap)
> > + return -ENOMEM;
> > + }
> > +
> > try_again:
> > spin_lock_irqsave(&guc->submission_state.lock, flags);
> >
--
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
2022-01-12 17:09 ` Piotr Piórkowski
@ 2022-01-12 17:23 ` Matthew Brost
-1 siblings, 0 replies; 18+ messages in thread
From: Matthew Brost @ 2022-01-12 17:23 UTC (permalink / raw)
To: Piotr Piórkowski; +Cc: Tvrtko Ursulin, intel-gfx, dri-devel
On Wed, Jan 12, 2022 at 06:09:06PM +0100, Piotr Piórkowski wrote:
> Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> wrote on śro [2022-sty-12 08:54:19 +0000]:
> >
> > On 11/01/2022 16:30, Matthew Brost wrote:
> > > Move the multi-lrc guc_id from the lower allocation partition (0 to
> > > number of multi-lrc guc_ids) to upper allocation partition (number of
> > > single-lrc to max guc_ids).
> >
> > Just a reminder that best practice for commit messages is to include the
> > "why" as well.
> >
> > Regards,
> >
> > Tvrtko
> >
>
> In my opinion this patch is good step forward.
> Lazy allocation of the bitmap for MLRC and moving the MLRC pool to the
> end will allow easier development contexts for SR-IOV.
> Introduction of two new helpers (new_mlrc_guc_id and new_slrc_guc_id) cleans up the code.
>
> I agree with Tvrtko's comment that you should expand your commit
> message.
>
Agree. Didn't know if I could talk about SR-IOV publicly but clearly
can so add an explaination in the next rev.
> One thing I personally don't like is this NUMBER_SINGLE_LRC_GUC_ID definition (same for MLRC)
> In my opinion it should be inline function and this value 1/16 defined as constant
Agree. I'll move these to functions in next rev.
Matt
>
> - Piotr
> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > > ---
> > > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> > > 1 file changed, 42 insertions(+), 15 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > > index 9989d121127df..1bacc9621cea8 100644
> > > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > > @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> > > */
> > > #define NUMBER_MULTI_LRC_GUC_ID(guc) \
> > > ((guc)->submission_state.num_guc_ids / 16)
> > > +#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
> > > + ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
> > > /*
> > > * Below is a set of functions which control the GuC scheduling state which
> > > @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> > > INIT_WORK(&guc->submission_state.destroyed_worker,
> > > destroyed_worker_func);
> > > - guc->submission_state.guc_ids_bitmap =
> > > - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > > - if (!guc->submission_state.guc_ids_bitmap)
> > > - return -ENOMEM;
> > > -
> > > spin_lock_init(&guc->timestamp.lock);
> > > INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> > > guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> > > @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> > > guc_flush_destroyed_contexts(guc);
> > > guc_lrc_desc_pool_destroy(guc);
> > > i915_sched_engine_put(guc->sched_engine);
> > > - bitmap_free(guc->submission_state.guc_ids_bitmap);
> > > + if (guc->submission_state.guc_ids_bitmap)
> > > + bitmap_free(guc->submission_state.guc_ids_bitmap);
> > > }
> > > static inline void queue_request(struct i915_sched_engine *sched_engine,
> > > @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> > > spin_unlock_irqrestore(&sched_engine->lock, flags);
> > > }
> > > +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > +{
> > > + int ret;
> > > +
> > > + GEM_BUG_ON(!intel_context_is_parent(ce));
> > > + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> > > +
> > > + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > > + NUMBER_MULTI_LRC_GUC_ID(guc),
> > > + order_base_2(ce->parallel.number_children
> > > + + 1));
> > > + if (likely(!(ret < 0)))
> > > + ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
> > > +
> > > + return ret;
> > > +}
> > > +
> > > +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > +{
> > > + GEM_BUG_ON(intel_context_is_parent(ce));
> > > +
> > > + return ida_simple_get(&guc->submission_state.guc_ids,
> > > + 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
> > > + GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > > + __GFP_NOWARN);
> > > +}
> > > +
> > > static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > {
> > > int ret;
> > > @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > GEM_BUG_ON(intel_context_is_child(ce));
> > > if (intel_context_is_parent(ce))
> > > - ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > > - order_base_2(ce->parallel.number_children
> > > - + 1));
> > > + ret = new_mlrc_guc_id(guc, ce);
> > > else
> > > - ret = ida_simple_get(&guc->submission_state.guc_ids,
> > > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > > - guc->submission_state.num_guc_ids,
> > > - GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > > - __GFP_NOWARN);
> > > + ret = new_slrc_guc_id(guc, ce);
> > > +
> > > if (unlikely(ret < 0))
> > > return ret;
> > > @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
> > > + if (unlikely(intel_context_is_parent(ce) &&
> > > + !guc->submission_state.guc_ids_bitmap)) {
> > > + guc->submission_state.guc_ids_bitmap =
> > > + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > > + if (!guc->submission_state.guc_ids_bitmap)
> > > + return -ENOMEM;
> > > + }
> > > +
> > > try_again:
> > > spin_lock_irqsave(&guc->submission_state.lock, flags);
> > >
>
> --
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
@ 2022-01-12 17:23 ` Matthew Brost
0 siblings, 0 replies; 18+ messages in thread
From: Matthew Brost @ 2022-01-12 17:23 UTC (permalink / raw)
To: Piotr Piórkowski; +Cc: intel-gfx, dri-devel
On Wed, Jan 12, 2022 at 06:09:06PM +0100, Piotr Piórkowski wrote:
> Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> wrote on śro [2022-sty-12 08:54:19 +0000]:
> >
> > On 11/01/2022 16:30, Matthew Brost wrote:
> > > Move the multi-lrc guc_id from the lower allocation partition (0 to
> > > number of multi-lrc guc_ids) to upper allocation partition (number of
> > > single-lrc to max guc_ids).
> >
> > Just a reminder that best practice for commit messages is to include the
> > "why" as well.
> >
> > Regards,
> >
> > Tvrtko
> >
>
> In my opinion this patch is good step forward.
> Lazy allocation of the bitmap for MLRC and moving the MLRC pool to the
> end will allow easier development contexts for SR-IOV.
> Introduction of two new helpers (new_mlrc_guc_id and new_slrc_guc_id) cleans up the code.
>
> I agree with Tvrtko's comment that you should expand your commit
> message.
>
Agree. Didn't know if I could talk about SR-IOV publicly but clearly
can so add an explaination in the next rev.
> One thing I personally don't like is this NUMBER_SINGLE_LRC_GUC_ID definition (same for MLRC)
> In my opinion it should be inline function and this value 1/16 defined as constant
Agree. I'll move these to functions in next rev.
Matt
>
> - Piotr
> > > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > > ---
> > > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> > > 1 file changed, 42 insertions(+), 15 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > > index 9989d121127df..1bacc9621cea8 100644
> > > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > > @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> > > */
> > > #define NUMBER_MULTI_LRC_GUC_ID(guc) \
> > > ((guc)->submission_state.num_guc_ids / 16)
> > > +#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
> > > + ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
> > > /*
> > > * Below is a set of functions which control the GuC scheduling state which
> > > @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> > > INIT_WORK(&guc->submission_state.destroyed_worker,
> > > destroyed_worker_func);
> > > - guc->submission_state.guc_ids_bitmap =
> > > - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > > - if (!guc->submission_state.guc_ids_bitmap)
> > > - return -ENOMEM;
> > > -
> > > spin_lock_init(&guc->timestamp.lock);
> > > INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> > > guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> > > @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> > > guc_flush_destroyed_contexts(guc);
> > > guc_lrc_desc_pool_destroy(guc);
> > > i915_sched_engine_put(guc->sched_engine);
> > > - bitmap_free(guc->submission_state.guc_ids_bitmap);
> > > + if (guc->submission_state.guc_ids_bitmap)
> > > + bitmap_free(guc->submission_state.guc_ids_bitmap);
> > > }
> > > static inline void queue_request(struct i915_sched_engine *sched_engine,
> > > @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> > > spin_unlock_irqrestore(&sched_engine->lock, flags);
> > > }
> > > +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > +{
> > > + int ret;
> > > +
> > > + GEM_BUG_ON(!intel_context_is_parent(ce));
> > > + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> > > +
> > > + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > > + NUMBER_MULTI_LRC_GUC_ID(guc),
> > > + order_base_2(ce->parallel.number_children
> > > + + 1));
> > > + if (likely(!(ret < 0)))
> > > + ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
> > > +
> > > + return ret;
> > > +}
> > > +
> > > +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > +{
> > > + GEM_BUG_ON(intel_context_is_parent(ce));
> > > +
> > > + return ida_simple_get(&guc->submission_state.guc_ids,
> > > + 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
> > > + GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > > + __GFP_NOWARN);
> > > +}
> > > +
> > > static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > {
> > > int ret;
> > > @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > GEM_BUG_ON(intel_context_is_child(ce));
> > > if (intel_context_is_parent(ce))
> > > - ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > > - order_base_2(ce->parallel.number_children
> > > - + 1));
> > > + ret = new_mlrc_guc_id(guc, ce);
> > > else
> > > - ret = ida_simple_get(&guc->submission_state.guc_ids,
> > > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > > - guc->submission_state.num_guc_ids,
> > > - GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > > - __GFP_NOWARN);
> > > + ret = new_slrc_guc_id(guc, ce);
> > > +
> > > if (unlikely(ret < 0))
> > > return ret;
> > > @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > > GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
> > > + if (unlikely(intel_context_is_parent(ce) &&
> > > + !guc->submission_state.guc_ids_bitmap)) {
> > > + guc->submission_state.guc_ids_bitmap =
> > > + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > > + if (!guc->submission_state.guc_ids_bitmap)
> > > + return -ENOMEM;
> > > + }
> > > +
> > > try_again:
> > > spin_lock_irqsave(&guc->submission_state.lock, flags);
> > >
>
> --
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH] drm/i915: Flip guc_id allocation partition
2022-01-11 16:30 ` Matthew Brost
@ 2022-01-12 23:21 ` Michal Wajdeczko
-1 siblings, 0 replies; 18+ messages in thread
From: Michal Wajdeczko @ 2022-01-12 23:21 UTC (permalink / raw)
To: Matthew Brost, intel-gfx, dri-devel
Cc: daniele.ceraolospurio, john.c.harrison
On 11.01.2022 17:30, Matthew Brost wrote:
> Move the multi-lrc guc_id from the lower allocation partition (0 to
> number of multi-lrc guc_ids) to upper allocation partition (number of
> single-lrc to max guc_ids).
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> 1 file changed, 42 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 9989d121127df..1bacc9621cea8 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> */
> #define NUMBER_MULTI_LRC_GUC_ID(guc) \
> ((guc)->submission_state.num_guc_ids / 16)
> +#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
> + ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
above two will likely look better if converted into inline functions, or
even better if we explicitly store slrc/mlrc upper/lower id limits under
guc submission state
>
> /*
> * Below is a set of functions which control the GuC scheduling state which
> @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> INIT_WORK(&guc->submission_state.destroyed_worker,
> destroyed_worker_func);
>
> - guc->submission_state.guc_ids_bitmap =
> - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> - if (!guc->submission_state.guc_ids_bitmap)
> - return -ENOMEM;
> -
> spin_lock_init(&guc->timestamp.lock);
> INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> guc_flush_destroyed_contexts(guc);
> guc_lrc_desc_pool_destroy(guc);
> i915_sched_engine_put(guc->sched_engine);
> - bitmap_free(guc->submission_state.guc_ids_bitmap);
> + if (guc->submission_state.guc_ids_bitmap)
> + bitmap_free(guc->submission_state.guc_ids_bitmap);
it should be fine to pass NULL to bitmap_free, no?
> }
>
> static inline void queue_request(struct i915_sched_engine *sched_engine,
> @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> spin_unlock_irqrestore(&sched_engine->lock, flags);
> }
>
> +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> +{
> + int ret;
> +
> + GEM_BUG_ON(!intel_context_is_parent(ce));
> + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> +
> + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> + NUMBER_MULTI_LRC_GUC_ID(guc),
> + order_base_2(ce->parallel.number_children
> + + 1));
btw, is there any requirement (GuC ABI ?) that allocated ids need
to be allocated with power of 2 alignment ? I don't think that we
must optimize that hard and in some cases waste extra ids (as we might
be limited on some configs)
> + if (likely(!(ret < 0)))
> + ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
nit: more readable would be
if (unlikely(ret < 0))
return ret;
return ret + guc->submission_state.mlrc_base;
> +
> + return ret;
> +}
> +
> +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> +{
> + GEM_BUG_ON(intel_context_is_parent(ce));
do we really need ce here ?
> +
> + return ida_simple_get(&guc->submission_state.guc_ids,
> + 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
if we change the logic of NUMBER_SINGLE/MULTI_LRC_GUC_ID macros from
static split into more dynamic, then we could likely implement lazy
increase of available slrc/mlrc id limits on demand, within available
range, without deciding upfront of the hardcoded split 15 : 1
but this can be done next time ;)
> + GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> + __GFP_NOWARN);
> +}
> +
> static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> {
> int ret;
> @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> GEM_BUG_ON(intel_context_is_child(ce));
>
> if (intel_context_is_parent(ce))
> - ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> - NUMBER_MULTI_LRC_GUC_ID(guc),
> - order_base_2(ce->parallel.number_children
> - + 1));
> + ret = new_mlrc_guc_id(guc, ce);
> else
> - ret = ida_simple_get(&guc->submission_state.guc_ids,
> - NUMBER_MULTI_LRC_GUC_ID(guc),
> - guc->submission_state.num_guc_ids,
> - GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> - __GFP_NOWARN);
> + ret = new_slrc_guc_id(guc, ce);
> +
with above helpers introduced, shouldn't we move code from new_guc_id()
to assign_guc_id() ?
> if (unlikely(ret < 0))
> return ret;
>
> @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
>
> GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
>
> + if (unlikely(intel_context_is_parent(ce) &&
> + !guc->submission_state.guc_ids_bitmap)) {
> + guc->submission_state.guc_ids_bitmap =
> + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> + if (!guc->submission_state.guc_ids_bitmap)
> + return -ENOMEM;
> + }
maybe move this chunk to new_mlrc_guc_id() ?
or we can't due to the spin_lock below ?
but then how do you protect guc_ids_bitmap pointer itself ?
-Michal
> +
> try_again:
> spin_lock_irqsave(&guc->submission_state.lock, flags);
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
@ 2022-01-12 23:21 ` Michal Wajdeczko
0 siblings, 0 replies; 18+ messages in thread
From: Michal Wajdeczko @ 2022-01-12 23:21 UTC (permalink / raw)
To: Matthew Brost, intel-gfx, dri-devel
On 11.01.2022 17:30, Matthew Brost wrote:
> Move the multi-lrc guc_id from the lower allocation partition (0 to
> number of multi-lrc guc_ids) to upper allocation partition (number of
> single-lrc to max guc_ids).
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> 1 file changed, 42 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 9989d121127df..1bacc9621cea8 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> */
> #define NUMBER_MULTI_LRC_GUC_ID(guc) \
> ((guc)->submission_state.num_guc_ids / 16)
> +#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
> + ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
above two will likely look better if converted into inline functions, or
even better if we explicitly store slrc/mlrc upper/lower id limits under
guc submission state
>
> /*
> * Below is a set of functions which control the GuC scheduling state which
> @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> INIT_WORK(&guc->submission_state.destroyed_worker,
> destroyed_worker_func);
>
> - guc->submission_state.guc_ids_bitmap =
> - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> - if (!guc->submission_state.guc_ids_bitmap)
> - return -ENOMEM;
> -
> spin_lock_init(&guc->timestamp.lock);
> INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> guc_flush_destroyed_contexts(guc);
> guc_lrc_desc_pool_destroy(guc);
> i915_sched_engine_put(guc->sched_engine);
> - bitmap_free(guc->submission_state.guc_ids_bitmap);
> + if (guc->submission_state.guc_ids_bitmap)
> + bitmap_free(guc->submission_state.guc_ids_bitmap);
it should be fine to pass NULL to bitmap_free, no?
> }
>
> static inline void queue_request(struct i915_sched_engine *sched_engine,
> @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> spin_unlock_irqrestore(&sched_engine->lock, flags);
> }
>
> +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> +{
> + int ret;
> +
> + GEM_BUG_ON(!intel_context_is_parent(ce));
> + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> +
> + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> + NUMBER_MULTI_LRC_GUC_ID(guc),
> + order_base_2(ce->parallel.number_children
> + + 1));
btw, is there any requirement (GuC ABI ?) that allocated ids need
to be allocated with power of 2 alignment ? I don't think that we
must optimize that hard and in some cases waste extra ids (as we might
be limited on some configs)
> + if (likely(!(ret < 0)))
> + ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
nit: more readable would be
if (unlikely(ret < 0))
return ret;
return ret + guc->submission_state.mlrc_base;
> +
> + return ret;
> +}
> +
> +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> +{
> + GEM_BUG_ON(intel_context_is_parent(ce));
do we really need ce here ?
> +
> + return ida_simple_get(&guc->submission_state.guc_ids,
> + 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
if we change the logic of NUMBER_SINGLE/MULTI_LRC_GUC_ID macros from
static split into more dynamic, then we could likely implement lazy
increase of available slrc/mlrc id limits on demand, within available
range, without deciding upfront of the hardcoded split 15 : 1
but this can be done next time ;)
> + GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> + __GFP_NOWARN);
> +}
> +
> static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> {
> int ret;
> @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> GEM_BUG_ON(intel_context_is_child(ce));
>
> if (intel_context_is_parent(ce))
> - ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> - NUMBER_MULTI_LRC_GUC_ID(guc),
> - order_base_2(ce->parallel.number_children
> - + 1));
> + ret = new_mlrc_guc_id(guc, ce);
> else
> - ret = ida_simple_get(&guc->submission_state.guc_ids,
> - NUMBER_MULTI_LRC_GUC_ID(guc),
> - guc->submission_state.num_guc_ids,
> - GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> - __GFP_NOWARN);
> + ret = new_slrc_guc_id(guc, ce);
> +
with above helpers introduced, shouldn't we move code from new_guc_id()
to assign_guc_id() ?
> if (unlikely(ret < 0))
> return ret;
>
> @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
>
> GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
>
> + if (unlikely(intel_context_is_parent(ce) &&
> + !guc->submission_state.guc_ids_bitmap)) {
> + guc->submission_state.guc_ids_bitmap =
> + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> + if (!guc->submission_state.guc_ids_bitmap)
> + return -ENOMEM;
> + }
maybe move this chunk to new_mlrc_guc_id() ?
or we can't due to the spin_lock below ?
but then how do you protect guc_ids_bitmap pointer itself ?
-Michal
> +
> try_again:
> spin_lock_irqsave(&guc->submission_state.lock, flags);
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH] drm/i915: Flip guc_id allocation partition
2022-01-12 23:21 ` [Intel-gfx] " Michal Wajdeczko
@ 2022-01-12 23:26 ` Matthew Brost
-1 siblings, 0 replies; 18+ messages in thread
From: Matthew Brost @ 2022-01-12 23:26 UTC (permalink / raw)
To: Michal Wajdeczko
Cc: intel-gfx, daniele.ceraolospurio, john.c.harrison, dri-devel
On Thu, Jan 13, 2022 at 12:21:17AM +0100, Michal Wajdeczko wrote:
>
>
> On 11.01.2022 17:30, Matthew Brost wrote:
> > Move the multi-lrc guc_id from the lower allocation partition (0 to
> > number of multi-lrc guc_ids) to upper allocation partition (number of
> > single-lrc to max guc_ids).
> >
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> > 1 file changed, 42 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > index 9989d121127df..1bacc9621cea8 100644
> > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> > */
> > #define NUMBER_MULTI_LRC_GUC_ID(guc) \
> > ((guc)->submission_state.num_guc_ids / 16)
> > +#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
> > + ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
>
> above two will likely look better if converted into inline functions, or
> even better if we explicitly store slrc/mlrc upper/lower id limits under
> guc submission state
>
Definitely inline functions, or I guess variables work too but that
might be overkill. Let me play around with this and see how it looks.
> >
> > /*
> > * Below is a set of functions which control the GuC scheduling state which
> > @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> > INIT_WORK(&guc->submission_state.destroyed_worker,
> > destroyed_worker_func);
> >
> > - guc->submission_state.guc_ids_bitmap =
> > - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > - if (!guc->submission_state.guc_ids_bitmap)
> > - return -ENOMEM;
> > -
> > spin_lock_init(&guc->timestamp.lock);
> > INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> > guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> > @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> > guc_flush_destroyed_contexts(guc);
> > guc_lrc_desc_pool_destroy(guc);
> > i915_sched_engine_put(guc->sched_engine);
> > - bitmap_free(guc->submission_state.guc_ids_bitmap);
> > + if (guc->submission_state.guc_ids_bitmap)
> > + bitmap_free(guc->submission_state.guc_ids_bitmap);
>
> it should be fine to pass NULL to bitmap_free, no?
>
Probably? I'll double check on this.
> > }
> >
> > static inline void queue_request(struct i915_sched_engine *sched_engine,
> > @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> > spin_unlock_irqrestore(&sched_engine->lock, flags);
> > }
> >
> > +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > + int ret;
> > +
> > + GEM_BUG_ON(!intel_context_is_parent(ce));
> > + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> > +
> > + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > + NUMBER_MULTI_LRC_GUC_ID(guc),
> > + order_base_2(ce->parallel.number_children
> > + + 1));
>
> btw, is there any requirement (GuC ABI ?) that allocated ids need
> to be allocated with power of 2 alignment ? I don't think that we
> must optimize that hard and in some cases waste extra ids (as we might
> be limited on some configs)
>
No pow2 requirement in GuC ABI, bitmaps only work on pow2 alignment and
didn't optmize this.
> > + if (likely(!(ret < 0)))
> > + ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
>
> nit: more readable would be
>
> if (unlikely(ret < 0))
> return ret;
>
> return ret + guc->submission_state.mlrc_base;
>
Sure.
> > +
> > + return ret;
> > +}
> > +
> > +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > + GEM_BUG_ON(intel_context_is_parent(ce));
>
> do we really need ce here ?
>
Just for the GEM_BUG_ON... Can remove if it is a big deal.
> > +
> > + return ida_simple_get(&guc->submission_state.guc_ids,
> > + 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
>
> if we change the logic of NUMBER_SINGLE/MULTI_LRC_GUC_ID macros from
> static split into more dynamic, then we could likely implement lazy
> increase of available slrc/mlrc id limits on demand, within available
> range, without deciding upfront of the hardcoded split 15 : 1
>
> but this can be done next time ;)
>
Yea I guess. Doubt we need anything beyond a static split tho.
> > + GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > + __GFP_NOWARN);
> > +}
> > +
> > static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > {
> > int ret;
> > @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > GEM_BUG_ON(intel_context_is_child(ce));
> >
> > if (intel_context_is_parent(ce))
> > - ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > - order_base_2(ce->parallel.number_children
> > - + 1));
> > + ret = new_mlrc_guc_id(guc, ce);
> > else
> > - ret = ida_simple_get(&guc->submission_state.guc_ids,
> > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > - guc->submission_state.num_guc_ids,
> > - GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > - __GFP_NOWARN);
> > + ret = new_slrc_guc_id(guc, ce);
> > +
>
> with above helpers introduced, shouldn't we move code from new_guc_id()
> to assign_guc_id() ?
>
Why add inline to code to assign_guc_id?
> > if (unlikely(ret < 0))
> > return ret;
> >
> > @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
> >
> > GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
> >
> > + if (unlikely(intel_context_is_parent(ce) &&
> > + !guc->submission_state.guc_ids_bitmap)) {
> > + guc->submission_state.guc_ids_bitmap =
> > + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > + if (!guc->submission_state.guc_ids_bitmap)
> > + return -ENOMEM;
> > + }
>
> maybe move this chunk to new_mlrc_guc_id() ?
> or we can't due to the spin_lock below ?
> but then how do you protect guc_ids_bitmap pointer itself ?
>
Can't use GFP_KERNEL inside a spin lock...
Matt
> -Michal
>
> > +
> > try_again:
> > spin_lock_irqsave(&guc->submission_state.lock, flags);
> >
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
@ 2022-01-12 23:26 ` Matthew Brost
0 siblings, 0 replies; 18+ messages in thread
From: Matthew Brost @ 2022-01-12 23:26 UTC (permalink / raw)
To: Michal Wajdeczko; +Cc: intel-gfx, dri-devel
On Thu, Jan 13, 2022 at 12:21:17AM +0100, Michal Wajdeczko wrote:
>
>
> On 11.01.2022 17:30, Matthew Brost wrote:
> > Move the multi-lrc guc_id from the lower allocation partition (0 to
> > number of multi-lrc guc_ids) to upper allocation partition (number of
> > single-lrc to max guc_ids).
> >
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> > .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 57 ++++++++++++++-----
> > 1 file changed, 42 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > index 9989d121127df..1bacc9621cea8 100644
> > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > @@ -147,6 +147,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
> > */
> > #define NUMBER_MULTI_LRC_GUC_ID(guc) \
> > ((guc)->submission_state.num_guc_ids / 16)
> > +#define NUMBER_SINGLE_LRC_GUC_ID(guc) \
> > + ((guc)->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc))
>
> above two will likely look better if converted into inline functions, or
> even better if we explicitly store slrc/mlrc upper/lower id limits under
> guc submission state
>
Definitely inline functions, or I guess variables work too but that
might be overkill. Let me play around with this and see how it looks.
> >
> > /*
> > * Below is a set of functions which control the GuC scheduling state which
> > @@ -1776,11 +1778,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
> > INIT_WORK(&guc->submission_state.destroyed_worker,
> > destroyed_worker_func);
> >
> > - guc->submission_state.guc_ids_bitmap =
> > - bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > - if (!guc->submission_state.guc_ids_bitmap)
> > - return -ENOMEM;
> > -
> > spin_lock_init(&guc->timestamp.lock);
> > INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
> > guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
> > @@ -1796,7 +1793,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
> > guc_flush_destroyed_contexts(guc);
> > guc_lrc_desc_pool_destroy(guc);
> > i915_sched_engine_put(guc->sched_engine);
> > - bitmap_free(guc->submission_state.guc_ids_bitmap);
> > + if (guc->submission_state.guc_ids_bitmap)
> > + bitmap_free(guc->submission_state.guc_ids_bitmap);
>
> it should be fine to pass NULL to bitmap_free, no?
>
Probably? I'll double check on this.
> > }
> >
> > static inline void queue_request(struct i915_sched_engine *sched_engine,
> > @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> > spin_unlock_irqrestore(&sched_engine->lock, flags);
> > }
> >
> > +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > + int ret;
> > +
> > + GEM_BUG_ON(!intel_context_is_parent(ce));
> > + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> > +
> > + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > + NUMBER_MULTI_LRC_GUC_ID(guc),
> > + order_base_2(ce->parallel.number_children
> > + + 1));
>
> btw, is there any requirement (GuC ABI ?) that allocated ids need
> to be allocated with power of 2 alignment ? I don't think that we
> must optimize that hard and in some cases waste extra ids (as we might
> be limited on some configs)
>
No pow2 requirement in GuC ABI, bitmaps only work on pow2 alignment and
didn't optmize this.
> > + if (likely(!(ret < 0)))
> > + ret += NUMBER_SINGLE_LRC_GUC_ID(guc);
>
> nit: more readable would be
>
> if (unlikely(ret < 0))
> return ret;
>
> return ret + guc->submission_state.mlrc_base;
>
Sure.
> > +
> > + return ret;
> > +}
> > +
> > +static int new_slrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > +{
> > + GEM_BUG_ON(intel_context_is_parent(ce));
>
> do we really need ce here ?
>
Just for the GEM_BUG_ON... Can remove if it is a big deal.
> > +
> > + return ida_simple_get(&guc->submission_state.guc_ids,
> > + 0, NUMBER_SINGLE_LRC_GUC_ID(guc),
>
> if we change the logic of NUMBER_SINGLE/MULTI_LRC_GUC_ID macros from
> static split into more dynamic, then we could likely implement lazy
> increase of available slrc/mlrc id limits on demand, within available
> range, without deciding upfront of the hardcoded split 15 : 1
>
> but this can be done next time ;)
>
Yea I guess. Doubt we need anything beyond a static split tho.
> > + GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > + __GFP_NOWARN);
> > +}
> > +
> > static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > {
> > int ret;
> > @@ -1870,16 +1895,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
> > GEM_BUG_ON(intel_context_is_child(ce));
> >
> > if (intel_context_is_parent(ce))
> > - ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > - order_base_2(ce->parallel.number_children
> > - + 1));
> > + ret = new_mlrc_guc_id(guc, ce);
> > else
> > - ret = ida_simple_get(&guc->submission_state.guc_ids,
> > - NUMBER_MULTI_LRC_GUC_ID(guc),
> > - guc->submission_state.num_guc_ids,
> > - GFP_KERNEL | __GFP_RETRY_MAYFAIL |
> > - __GFP_NOWARN);
> > + ret = new_slrc_guc_id(guc, ce);
> > +
>
> with above helpers introduced, shouldn't we move code from new_guc_id()
> to assign_guc_id() ?
>
Why add inline to code to assign_guc_id?
> > if (unlikely(ret < 0))
> > return ret;
> >
> > @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
> >
> > GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
> >
> > + if (unlikely(intel_context_is_parent(ce) &&
> > + !guc->submission_state.guc_ids_bitmap)) {
> > + guc->submission_state.guc_ids_bitmap =
> > + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> > + if (!guc->submission_state.guc_ids_bitmap)
> > + return -ENOMEM;
> > + }
>
> maybe move this chunk to new_mlrc_guc_id() ?
> or we can't due to the spin_lock below ?
> but then how do you protect guc_ids_bitmap pointer itself ?
>
Can't use GFP_KERNEL inside a spin lock...
Matt
> -Michal
>
> > +
> > try_again:
> > spin_lock_irqsave(&guc->submission_state.lock, flags);
> >
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH] drm/i915: Flip guc_id allocation partition
2022-01-12 23:26 ` [Intel-gfx] " Matthew Brost
@ 2022-01-13 14:18 ` Michal Wajdeczko
-1 siblings, 0 replies; 18+ messages in thread
From: Michal Wajdeczko @ 2022-01-13 14:18 UTC (permalink / raw)
To: Matthew Brost
Cc: intel-gfx, daniele.ceraolospurio, john.c.harrison, dri-devel
On 13.01.2022 00:26, Matthew Brost wrote:
> On Thu, Jan 13, 2022 at 12:21:17AM +0100, Michal Wajdeczko wrote:
>> On 11.01.2022 17:30, Matthew Brost wrote:
...
>>> @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
>>> spin_unlock_irqrestore(&sched_engine->lock, flags);
>>> }
>>>
>>> +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
>>> +{
>>> + int ret;
>>> +
>>> + GEM_BUG_ON(!intel_context_is_parent(ce));
>>> + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
>>> +
>>> + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
>>> + NUMBER_MULTI_LRC_GUC_ID(guc),
>>> + order_base_2(ce->parallel.number_children
>>> + + 1));
>>
>> btw, is there any requirement (GuC ABI ?) that allocated ids need
>> to be allocated with power of 2 alignment ? I don't think that we
>> must optimize that hard and in some cases waste extra ids (as we might
>> be limited on some configs)
>>
>
> No pow2 requirement in GuC ABI, bitmaps only work on pow2 alignment and
> didn't optmize this.
>
there is a slower variant of "find" function:
bitmap_find_next_zero_area - find a contiguous aligned zero area
that does not have this limitation
..
>>> @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
>>>
>>> GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
>>>
>>> + if (unlikely(intel_context_is_parent(ce) &&
>>> + !guc->submission_state.guc_ids_bitmap)) {
>>> + guc->submission_state.guc_ids_bitmap =
>>> + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
>>> + if (!guc->submission_state.guc_ids_bitmap)
>>> + return -ENOMEM;
>>> + }
>>
>> maybe move this chunk to new_mlrc_guc_id() ?
>> or we can't due to the spin_lock below ?
>> but then how do you protect guc_ids_bitmap pointer itself ?
>>
>
> Can't use GFP_KERNEL inside a spin lock...
>
ok, but what if there will be two or more parallel calls to pin_guc_id()
with all being first parent context? each will see NULL guc_ids_bitmap..
or there is another layer of synchronization?
-Michal
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
@ 2022-01-13 14:18 ` Michal Wajdeczko
0 siblings, 0 replies; 18+ messages in thread
From: Michal Wajdeczko @ 2022-01-13 14:18 UTC (permalink / raw)
To: Matthew Brost; +Cc: intel-gfx, dri-devel
On 13.01.2022 00:26, Matthew Brost wrote:
> On Thu, Jan 13, 2022 at 12:21:17AM +0100, Michal Wajdeczko wrote:
>> On 11.01.2022 17:30, Matthew Brost wrote:
...
>>> @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
>>> spin_unlock_irqrestore(&sched_engine->lock, flags);
>>> }
>>>
>>> +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
>>> +{
>>> + int ret;
>>> +
>>> + GEM_BUG_ON(!intel_context_is_parent(ce));
>>> + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
>>> +
>>> + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
>>> + NUMBER_MULTI_LRC_GUC_ID(guc),
>>> + order_base_2(ce->parallel.number_children
>>> + + 1));
>>
>> btw, is there any requirement (GuC ABI ?) that allocated ids need
>> to be allocated with power of 2 alignment ? I don't think that we
>> must optimize that hard and in some cases waste extra ids (as we might
>> be limited on some configs)
>>
>
> No pow2 requirement in GuC ABI, bitmaps only work on pow2 alignment and
> didn't optmize this.
>
there is a slower variant of "find" function:
bitmap_find_next_zero_area - find a contiguous aligned zero area
that does not have this limitation
..
>>> @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
>>>
>>> GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
>>>
>>> + if (unlikely(intel_context_is_parent(ce) &&
>>> + !guc->submission_state.guc_ids_bitmap)) {
>>> + guc->submission_state.guc_ids_bitmap =
>>> + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
>>> + if (!guc->submission_state.guc_ids_bitmap)
>>> + return -ENOMEM;
>>> + }
>>
>> maybe move this chunk to new_mlrc_guc_id() ?
>> or we can't due to the spin_lock below ?
>> but then how do you protect guc_ids_bitmap pointer itself ?
>>
>
> Can't use GFP_KERNEL inside a spin lock...
>
ok, but what if there will be two or more parallel calls to pin_guc_id()
with all being first parent context? each will see NULL guc_ids_bitmap..
or there is another layer of synchronization?
-Michal
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH] drm/i915: Flip guc_id allocation partition
2022-01-13 14:18 ` [Intel-gfx] " Michal Wajdeczko
@ 2022-01-13 16:00 ` Matthew Brost
-1 siblings, 0 replies; 18+ messages in thread
From: Matthew Brost @ 2022-01-13 16:00 UTC (permalink / raw)
To: Michal Wajdeczko
Cc: intel-gfx, daniele.ceraolospurio, john.c.harrison, dri-devel
On Thu, Jan 13, 2022 at 03:18:14PM +0100, Michal Wajdeczko wrote:
>
>
> On 13.01.2022 00:26, Matthew Brost wrote:
> > On Thu, Jan 13, 2022 at 12:21:17AM +0100, Michal Wajdeczko wrote:
> >> On 11.01.2022 17:30, Matthew Brost wrote:
>
> ...
>
> >>> @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> >>> spin_unlock_irqrestore(&sched_engine->lock, flags);
> >>> }
> >>>
> >>> +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> >>> +{
> >>> + int ret;
> >>> +
> >>> + GEM_BUG_ON(!intel_context_is_parent(ce));
> >>> + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> >>> +
> >>> + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> >>> + NUMBER_MULTI_LRC_GUC_ID(guc),
> >>> + order_base_2(ce->parallel.number_children
> >>> + + 1));
> >>
> >> btw, is there any requirement (GuC ABI ?) that allocated ids need
> >> to be allocated with power of 2 alignment ? I don't think that we
> >> must optimize that hard and in some cases waste extra ids (as we might
> >> be limited on some configs)
> >>
> >
> > No pow2 requirement in GuC ABI, bitmaps only work on pow2 alignment and
> > didn't optmize this.
> >
>
> there is a slower variant of "find" function:
>
> bitmap_find_next_zero_area - find a contiguous aligned zero area
>
> that does not have this limitation
>
Ah, wasn't aware of this. If this becomes an issue (running of multi-lrc
ids) for customers I suppose this is the first thing we can do to try to
address this. For now, I think we leave it as is.
> ..
>
>
> >>> @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
> >>>
> >>> GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
> >>>
> >>> + if (unlikely(intel_context_is_parent(ce) &&
> >>> + !guc->submission_state.guc_ids_bitmap)) {
> >>> + guc->submission_state.guc_ids_bitmap =
> >>> + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> >>> + if (!guc->submission_state.guc_ids_bitmap)
> >>> + return -ENOMEM;
> >>> + }
> >>
> >> maybe move this chunk to new_mlrc_guc_id() ?
> >> or we can't due to the spin_lock below ?
> >> but then how do you protect guc_ids_bitmap pointer itself ?
> >>
> >
> > Can't use GFP_KERNEL inside a spin lock...
> >
>
> ok, but what if there will be two or more parallel calls to pin_guc_id()
> with all being first parent context? each will see NULL guc_ids_bitmap..
> or there is another layer of synchronization?
>
Good catch. Yes, it techincally possible two multi-lrc contexts to try
to allocate at the same time. I guess I should just do this at driver
load time + allocate the maximum number of multi-lrc ids and possibly
waste a bit of memory on a PF or VF.
Matt
> -Michal
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition
@ 2022-01-13 16:00 ` Matthew Brost
0 siblings, 0 replies; 18+ messages in thread
From: Matthew Brost @ 2022-01-13 16:00 UTC (permalink / raw)
To: Michal Wajdeczko; +Cc: intel-gfx, dri-devel
On Thu, Jan 13, 2022 at 03:18:14PM +0100, Michal Wajdeczko wrote:
>
>
> On 13.01.2022 00:26, Matthew Brost wrote:
> > On Thu, Jan 13, 2022 at 12:21:17AM +0100, Michal Wajdeczko wrote:
> >> On 11.01.2022 17:30, Matthew Brost wrote:
>
> ...
>
> >>> @@ -1863,6 +1861,33 @@ static void guc_submit_request(struct i915_request *rq)
> >>> spin_unlock_irqrestore(&sched_engine->lock, flags);
> >>> }
> >>>
> >>> +static int new_mlrc_guc_id(struct intel_guc *guc, struct intel_context *ce)
> >>> +{
> >>> + int ret;
> >>> +
> >>> + GEM_BUG_ON(!intel_context_is_parent(ce));
> >>> + GEM_BUG_ON(!guc->submission_state.guc_ids_bitmap);
> >>> +
> >>> + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
> >>> + NUMBER_MULTI_LRC_GUC_ID(guc),
> >>> + order_base_2(ce->parallel.number_children
> >>> + + 1));
> >>
> >> btw, is there any requirement (GuC ABI ?) that allocated ids need
> >> to be allocated with power of 2 alignment ? I don't think that we
> >> must optimize that hard and in some cases waste extra ids (as we might
> >> be limited on some configs)
> >>
> >
> > No pow2 requirement in GuC ABI, bitmaps only work on pow2 alignment and
> > didn't optmize this.
> >
>
> there is a slower variant of "find" function:
>
> bitmap_find_next_zero_area - find a contiguous aligned zero area
>
> that does not have this limitation
>
Ah, wasn't aware of this. If this becomes an issue (running of multi-lrc
ids) for customers I suppose this is the first thing we can do to try to
address this. For now, I think we leave it as is.
> ..
>
>
> >>> @@ -1989,6 +2008,14 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
> >>>
> >>> GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
> >>>
> >>> + if (unlikely(intel_context_is_parent(ce) &&
> >>> + !guc->submission_state.guc_ids_bitmap)) {
> >>> + guc->submission_state.guc_ids_bitmap =
> >>> + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
> >>> + if (!guc->submission_state.guc_ids_bitmap)
> >>> + return -ENOMEM;
> >>> + }
> >>
> >> maybe move this chunk to new_mlrc_guc_id() ?
> >> or we can't due to the spin_lock below ?
> >> but then how do you protect guc_ids_bitmap pointer itself ?
> >>
> >
> > Can't use GFP_KERNEL inside a spin lock...
> >
>
> ok, but what if there will be two or more parallel calls to pin_guc_id()
> with all being first parent context? each will see NULL guc_ids_bitmap..
> or there is another layer of synchronization?
>
Good catch. Yes, it techincally possible two multi-lrc contexts to try
to allocate at the same time. I guess I should just do this at driver
load time + allocate the maximum number of multi-lrc ids and possibly
waste a bit of memory on a PF or VF.
Matt
> -Michal
^ permalink raw reply [flat|nested] 18+ messages in thread
end of thread, other threads:[~2022-01-13 16:06 UTC | newest]
Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-11 16:30 [Intel-gfx] [PATCH] drm/i915: Flip guc_id allocation partition Matthew Brost
2022-01-11 16:30 ` Matthew Brost
2022-01-11 19:17 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2022-01-11 19:33 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2022-01-12 1:14 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
2022-01-12 8:54 ` [Intel-gfx] [PATCH] " Tvrtko Ursulin
2022-01-12 17:09 ` Piotr Piórkowski
2022-01-12 17:09 ` Piotr Piórkowski
2022-01-12 17:23 ` Matthew Brost
2022-01-12 17:23 ` Matthew Brost
2022-01-12 23:21 ` Michal Wajdeczko
2022-01-12 23:21 ` [Intel-gfx] " Michal Wajdeczko
2022-01-12 23:26 ` Matthew Brost
2022-01-12 23:26 ` [Intel-gfx] " Matthew Brost
2022-01-13 14:18 ` Michal Wajdeczko
2022-01-13 14:18 ` [Intel-gfx] " Michal Wajdeczko
2022-01-13 16:00 ` Matthew Brost
2022-01-13 16:00 ` [Intel-gfx] " Matthew Brost
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.