All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno
@ 2020-03-09 11:09 Chris Wilson
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 2/5] drm/i915/gt: Mark up racy check of last list element Chris Wilson
                   ` (6 more replies)
  0 siblings, 7 replies; 14+ messages in thread
From: Chris Wilson @ 2020-03-09 11:09 UTC (permalink / raw)
  To: intel-gfx

During i915_request_retire() we decouple the i915_request.hwsp_seqno
from the intel_timeline so that it may be freed before the request is
released. However, we need to warn the compiler that the pointer may
update under its nose.

[  171.438899] BUG: KCSAN: data-race in i915_request_await_dma_fence [i915] / i915_request_retire [i915]
[  171.438920]
[  171.438932] write to 0xffff8881e7e28ce0 of 8 bytes by task 148 on cpu 2:
[  171.439174]  i915_request_retire+0x1ea/0x660 [i915]
[  171.439408]  retire_requests+0x7a/0xd0 [i915]
[  171.439640]  engine_retire+0xa1/0xe0 [i915]
[  171.439657]  process_one_work+0x3b1/0x690
[  171.439671]  worker_thread+0x80/0x670
[  171.439685]  kthread+0x19a/0x1e0
[  171.439701]  ret_from_fork+0x1f/0x30
[  171.439721]
[  171.439739] read to 0xffff8881e7e28ce0 of 8 bytes by task 696 on cpu 1:
[  171.439990]  i915_request_await_dma_fence+0x162/0x520 [i915]
[  171.440230]  i915_request_await_object+0x2fe/0x470 [i915]
[  171.440467]  i915_gem_do_execbuffer+0x45dc/0x4c20 [i915]
[  171.440704]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
[  171.440722]  drm_ioctl_kernel+0xe4/0x120
[  171.440736]  drm_ioctl+0x297/0x4c7
[  171.440750]  ksys_ioctl+0x89/0xb0
[  171.440766]  __x64_sys_ioctl+0x42/0x60
[  171.440788]  do_syscall_64+0x6e/0x2c0
[  171.440802]  entry_SYSCALL_64_after_hwframe+0x44/0xa9

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_request.h | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index d4bae16b4785..6020d5b2a3df 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -396,7 +396,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 
 static inline u32 __hwsp_seqno(const struct i915_request *rq)
 {
-	return READ_ONCE(*rq->hwsp_seqno);
+	const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
+
+	return READ_ONCE(*hwsp);
 }
 
 /**
@@ -510,7 +512,8 @@ static inline bool i915_request_completed(const struct i915_request *rq)
 
 static inline void i915_request_mark_complete(struct i915_request *rq)
 {
-	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
+	WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
+		   (u32 *)&rq->fence.seqno);
 }
 
 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [Intel-gfx] [PATCH 2/5] drm/i915/gt: Mark up racy check of last list element
  2020-03-09 11:09 [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Chris Wilson
@ 2020-03-09 11:09 ` Chris Wilson
  2020-03-09 16:09   ` Mika Kuoppala
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 3/5] drm/i915/execlists: Track active elements during dequeue Chris Wilson
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 14+ messages in thread
From: Chris Wilson @ 2020-03-09 11:09 UTC (permalink / raw)
  To: intel-gfx

[   25.025543] BUG: KCSAN: data-race in __i915_request_create [i915] / process_csb [i915]
[   25.025561]
[   25.025573] write (marked) to 0xffff8881e85c1620 of 8 bytes by task 696 on cpu 1:
[   25.025789]  __i915_request_create+0x54b/0x5d0 [i915]
[   25.026001]  i915_request_create+0xcc/0x150 [i915]
[   25.026218]  i915_gem_do_execbuffer+0x2f70/0x4c20 [i915]
[   25.026428]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
[   25.026445]  drm_ioctl_kernel+0xe4/0x120
[   25.026459]  drm_ioctl+0x297/0x4c7
[   25.026472]  ksys_ioctl+0x89/0xb0
[   25.026484]  __x64_sys_ioctl+0x42/0x60
[   25.026497]  do_syscall_64+0x6e/0x2c0
[   25.026510]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
[   25.026522]
[   25.026532] read to 0xffff8881e85c1620 of 8 bytes by interrupt on cpu 2:
[   25.026742]  process_csb+0x8d6/0x1070 [i915]
[   25.026949]  execlists_submission_tasklet+0x30/0x170 [i915]
[   25.026969]  tasklet_action_common.isra.0+0x42/0xa0
[   25.026984]  __do_softirq+0xd7/0x2cd
[   25.026997]  irq_exit+0xbe/0xe0
[   25.027009]  do_IRQ+0x51/0x100
[   25.027021]  ret_from_intr+0x0/0x1c
[   25.027033]  poll_idle+0x3e/0x13b
[   25.027047]  cpuidle_enter_state+0x189/0x5d0
[   25.027060]  cpuidle_enter+0x50/0x90
[   25.027074]  do_idle+0x1a1/0x1f0
[   25.027086]  cpu_startup_entry+0x14/0x16
[   25.027100]  start_secondary+0x120/0x180
[   25.027116]  secondary_startup_64+0xa4/0xb0

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +-
 drivers/gpu/drm/i915/i915_utils.h   | 6 ++++++
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index a1d268880cfe..6266ef2ae6a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1316,7 +1316,7 @@ __execlists_schedule_out(struct i915_request *rq,
 	 * If we have just completed this context, the engine may now be
 	 * idle and we want to re-enter powersaving.
 	 */
-	if (list_is_last(&rq->link, &ce->timeline->requests) &&
+	if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
 	    i915_request_completed(rq))
 		intel_engine_add_retire(engine, ce->timeline);
 
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 26f3a4a50b40..03a73d2bd50d 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -260,6 +260,12 @@ static inline void __list_del_many(struct list_head *head,
 	WRITE_ONCE(head->next, first);
 }
 
+static inline int list_is_last_rcu(const struct list_head *list,
+				   const struct list_head *head)
+{
+	return READ_ONCE(list->next) == head;
+}
+
 /*
  * Wait until the work is finally complete, even if it tries to postpone
  * by requeueing itself. Note, that if the worker never cancels itself,
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [Intel-gfx] [PATCH 3/5] drm/i915/execlists: Track active elements during dequeue
  2020-03-09 11:09 [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Chris Wilson
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 2/5] drm/i915/gt: Mark up racy check of last list element Chris Wilson
@ 2020-03-09 11:09 ` Chris Wilson
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 4/5] drm/i915/execlists: Mark up read of i915_request.fence.flags Chris Wilson
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 14+ messages in thread
From: Chris Wilson @ 2020-03-09 11:09 UTC (permalink / raw)
  To: intel-gfx

Record the initial active element we use when building the next ELSP
submission, so that we can compare against it latter to see if there's
no change.

Fixes: 44d0a9c05bc0 ("drm/i915/execlists: Skip redundant resubmission")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 6266ef2ae6a0..a9d77b0e4e27 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1674,16 +1674,21 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
 }
 
 static struct i915_request *
-last_active(const struct intel_engine_execlists *execlists)
+__last_active(const struct intel_engine_execlists *execlists,
+	      struct i915_request * const *last)
 {
-	struct i915_request * const *last = READ_ONCE(execlists->active);
-
 	while (*last && i915_request_completed(*last))
 		last++;
 
 	return *last;
 }
 
+static struct i915_request *
+last_active(const struct intel_engine_execlists *execlists)
+{
+	return __last_active(execlists, READ_ONCE(execlists->active));
+}
+
 #define for_each_waiter(p__, rq__) \
 	list_for_each_entry_lockless(p__, \
 				     &(rq__)->sched.waiters_list, \
@@ -1852,6 +1857,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request **port = execlists->pending;
 	struct i915_request ** const last_port = port + execlists->port_mask;
+	struct i915_request * const *active;
 	struct i915_request *last;
 	struct rb_node *rb;
 	bool submit = false;
@@ -1906,7 +1912,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * i.e. we will retrigger preemption following the ack in case
 	 * of trouble.
 	 */
-	last = last_active(execlists);
+	active = READ_ONCE(execlists->active);
+	last = __last_active(execlists, active);
 	if (last) {
 		if (need_preempt(engine, last, rb)) {
 			ENGINE_TRACE(engine,
@@ -2191,7 +2198,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		 * Skip if we ended up with exactly the same set of requests,
 		 * e.g. trying to timeslice a pair of ordered contexts
 		 */
-		if (!memcmp(execlists->active, execlists->pending,
+		if (!memcmp(active, execlists->pending,
 			    (port - execlists->pending + 1) * sizeof(*port))) {
 			do
 				execlists_schedule_out(fetch_and_zero(port));
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [Intel-gfx] [PATCH 4/5] drm/i915/execlists: Mark up read of i915_request.fence.flags
  2020-03-09 11:09 [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Chris Wilson
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 2/5] drm/i915/gt: Mark up racy check of last list element Chris Wilson
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 3/5] drm/i915/execlists: Track active elements during dequeue Chris Wilson
@ 2020-03-09 11:09 ` Chris Wilson
  2020-03-09 16:49   ` Mika Kuoppala
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 5/5] drm/i915/execlsts: Mark up racy inspection of current i915_request priority Chris Wilson
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 14+ messages in thread
From: Chris Wilson @ 2020-03-09 11:09 UTC (permalink / raw)
  To: intel-gfx

[  145.927961] BUG: KCSAN: data-race in can_merge_rq [i915] / signal_irq_work [i915]
[  145.927980]
[  145.927992] write (marked) to 0xffff8881e513fab0 of 8 bytes by interrupt on cpu 2:
[  145.928250]  signal_irq_work+0x134/0x640 [i915]
[  145.928268]  irq_work_run_list+0xd7/0x120
[  145.928283]  irq_work_run+0x1d/0x50
[  145.928300]  smp_irq_work_interrupt+0x21/0x30
[  145.928328]  irq_work_interrupt+0xf/0x20
[  145.928356]  _raw_spin_unlock_irqrestore+0x34/0x40
[  145.928596]  execlists_submission_tasklet+0xde/0x170 [i915]
[  145.928616]  tasklet_action_common.isra.0+0x42/0xa0
[  145.928632]  __do_softirq+0xd7/0x2cd
[  145.928646]  irq_exit+0xbe/0xe0
[  145.928665]  do_IRQ+0x51/0x100
[  145.928684]  ret_from_intr+0x0/0x1c
[  145.928699]  schedule+0x0/0xb0
[  145.928719]  worker_thread+0x194/0x670
[  145.928743]  kthread+0x19a/0x1e0
[  145.928765]  ret_from_fork+0x1f/0x30
[  145.928784]
[  145.928796] read to 0xffff8881e513fab0 of 8 bytes by task 738 on cpu 1:
[  145.929046]  can_merge_rq+0xb1/0x100 [i915]
[  145.929282]  __execlists_submission_tasklet+0x866/0x25a0 [i915]
[  145.929518]  execlists_submit_request+0x2a4/0x2b0 [i915]
[  145.929758]  submit_notify+0x8f/0xc0 [i915]
[  145.929989]  __i915_sw_fence_complete+0x5d/0x3e0 [i915]
[  145.930221]  i915_sw_fence_complete+0x58/0x80 [i915]
[  145.930453]  i915_sw_fence_commit+0x16/0x20 [i915]
[  145.930698]  __i915_request_queue+0x60/0x70 [i915]
[  145.930935]  i915_gem_do_execbuffer+0x3997/0x4c20 [i915]
[  145.931175]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
[  145.931194]  drm_ioctl_kernel+0xe4/0x120
[  145.931208]  drm_ioctl+0x297/0x4c7
[  145.931222]  ksys_ioctl+0x89/0xb0
[  145.931238]  __x64_sys_ioctl+0x42/0x60
[  145.931260]  do_syscall_64+0x6e/0x2c0
[  145.931275]  entry_SYSCALL_64_after_hwframe+0x44/0xa9

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index a9d77b0e4e27..20dd3c2cfa2f 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1597,6 +1597,11 @@ static bool can_merge_ctx(const struct intel_context *prev,
 	return true;
 }
 
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+	return READ_ONCE(rq->fence.flags);
+}
+
 static bool can_merge_rq(const struct i915_request *prev,
 			 const struct i915_request *next)
 {
@@ -1614,7 +1619,7 @@ static bool can_merge_rq(const struct i915_request *prev,
 	if (i915_request_completed(next))
 		return true;
 
-	if (unlikely((prev->fence.flags ^ next->fence.flags) &
+	if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
 		     (BIT(I915_FENCE_FLAG_NOPREEMPT) |
 		      BIT(I915_FENCE_FLAG_SENTINEL))))
 		return false;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [Intel-gfx] [PATCH 5/5] drm/i915/execlsts: Mark up racy inspection of current i915_request priority
  2020-03-09 11:09 [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Chris Wilson
                   ` (2 preceding siblings ...)
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 4/5] drm/i915/execlists: Mark up read of i915_request.fence.flags Chris Wilson
@ 2020-03-09 11:09 ` Chris Wilson
  2020-03-09 17:02   ` Mika Kuoppala
  2020-03-09 12:28 ` [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Patchwork
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 14+ messages in thread
From: Chris Wilson @ 2020-03-09 11:09 UTC (permalink / raw)
  To: intel-gfx

[  120.176548] BUG: KCSAN: data-race in __i915_schedule [i915] / effective_prio [i915]
[  120.176566]
[  120.176577] write to 0xffff8881e35e6540 of 4 bytes by task 730 on cpu 3:
[  120.176792]  __i915_schedule+0x63e/0x920 [i915]
[  120.177007]  __bump_priority+0x63/0x80 [i915]
[  120.177220]  __i915_sched_node_add_dependency+0x258/0x300 [i915]
[  120.177438]  i915_sched_node_add_dependency+0x50/0xa0 [i915]
[  120.177654]  i915_request_await_dma_fence+0x1da/0x530 [i915]
[  120.177867]  i915_request_await_object+0x2fe/0x470 [i915]
[  120.178081]  i915_gem_do_execbuffer+0x45dc/0x4c20 [i915]
[  120.178292]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
[  120.178309]  drm_ioctl_kernel+0xe4/0x120
[  120.178322]  drm_ioctl+0x297/0x4c7
[  120.178335]  ksys_ioctl+0x89/0xb0
[  120.178348]  __x64_sys_ioctl+0x42/0x60
[  120.178361]  do_syscall_64+0x6e/0x2c0
[  120.178375]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
[  120.178387]
[  120.178397] read to 0xffff8881e35e6540 of 4 bytes by interrupt on cpu 2:
[  120.178606]  effective_prio+0x25/0xc0 [i915]
[  120.178812]  process_csb+0xe8b/0x10a0 [i915]
[  120.179021]  execlists_submission_tasklet+0x30/0x170 [i915]
[  120.179038]  tasklet_action_common.isra.0+0x42/0xa0
[  120.179053]  __do_softirq+0xd7/0x2cd
[  120.179066]  irq_exit+0xbe/0xe0
[  120.179078]  do_IRQ+0x51/0x100
[  120.179090]  ret_from_intr+0x0/0x1c
[  120.179104]  cpuidle_enter_state+0x1b8/0x5d0
[  120.179117]  cpuidle_enter+0x50/0x90
[  120.179131]  do_idle+0x1a1/0x1f0
[  120.179145]  cpu_startup_entry+0x14/0x16
[  120.179158]  start_secondary+0x120/0x180
[  120.179172]  secondary_startup_64+0xa4/0xb0

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c   | 2 +-
 drivers/gpu/drm/i915/i915_scheduler.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 20dd3c2cfa2f..3eb7adc4e057 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -293,7 +293,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->sched.attr.priority;
+	return READ_ONCE(rq->sched.attr.priority);
 }
 
 static int effective_prio(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 52f71e83e088..af51810dc78c 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -321,7 +321,7 @@ static void __i915_schedule(struct i915_sched_node *node,
 
 		GEM_BUG_ON(node_to_request(node)->engine != engine);
 
-		node->attr.priority = prio;
+		WRITE_ONCE(node->attr.priority, prio);
 
 		/*
 		 * Once the request is ready, it will be placed into the
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno
  2020-03-09 11:09 [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Chris Wilson
                   ` (3 preceding siblings ...)
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 5/5] drm/i915/execlsts: Mark up racy inspection of current i915_request priority Chris Wilson
@ 2020-03-09 12:28 ` Patchwork
  2020-03-09 14:03 ` [Intel-gfx] [PATCH 1/5] " Mika Kuoppala
  2020-03-09 15:57 ` [Intel-gfx] ✓ Fi.CI.IGT: success for series starting with [1/5] " Patchwork
  6 siblings, 0 replies; 14+ messages in thread
From: Patchwork @ 2020-03-09 12:28 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno
URL   : https://patchwork.freedesktop.org/series/74445/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_8097 -> Patchwork_16877
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/index.html

Known issues
------------

  Here are the changes found in Patchwork_16877 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@live@workarounds:
    - fi-icl-u2:          [PASS][1] -> [DMESG-FAIL][2] ([i915#922])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/fi-icl-u2/igt@i915_selftest@live@workarounds.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/fi-icl-u2/igt@i915_selftest@live@workarounds.html

  
#### Possible fixes ####

  * igt@i915_selftest@live@gem_contexts:
    - fi-skl-lmem:        [INCOMPLETE][3] ([i915#424]) -> [PASS][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/fi-skl-lmem/igt@i915_selftest@live@gem_contexts.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/fi-skl-lmem/igt@i915_selftest@live@gem_contexts.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [FAIL][5] ([fdo#111407]) -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
  [i915#424]: https://gitlab.freedesktop.org/drm/intel/issues/424
  [i915#470]: https://gitlab.freedesktop.org/drm/intel/issues/470
  [i915#922]: https://gitlab.freedesktop.org/drm/intel/issues/922


Participating hosts (46 -> 38)
------------------------------

  Additional (3): fi-bsw-kefka fi-glk-dsi fi-elk-e7500 
  Missing    (11): fi-ilk-m540 fi-hsw-4200u fi-byt-j1900 fi-byt-squawks fi-bsw-cyan fi-bwr-2160 fi-ctg-p8600 fi-ivb-3770 fi-bdw-samus fi-byt-clapper fi-skl-6600u 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_8097 -> Patchwork_16877

  CI-20190529: 20190529
  CI_DRM_8097: 2e46e269a2843c5d0b6c72bfb7fa9d9913c15415 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5499: 2e23cf6f63fc6ba1d9543f8327698d6f21813cec @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16877: fa62f674cf3804822c045b02f37954e7d0a03f99 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

fa62f674cf38 drm/i915/execlsts: Mark up racy inspection of current i915_request priority
156d45902ae0 drm/i915/execlists: Mark up read of i915_request.fence.flags
a2e7499658cf drm/i915/execlists: Track active elements during dequeue
05e6813b151b drm/i915/gt: Mark up racy check of last list element
213680e5d5f8 drm/i915: Mark up unlocked update of i915_request.hwsp_seqno

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno
  2020-03-09 11:09 [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Chris Wilson
                   ` (4 preceding siblings ...)
  2020-03-09 12:28 ` [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Patchwork
@ 2020-03-09 14:03 ` Mika Kuoppala
  2020-03-09 14:10   ` Chris Wilson
  2020-03-09 15:57 ` [Intel-gfx] ✓ Fi.CI.IGT: success for series starting with [1/5] " Patchwork
  6 siblings, 1 reply; 14+ messages in thread
From: Mika Kuoppala @ 2020-03-09 14:03 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> During i915_request_retire() we decouple the i915_request.hwsp_seqno
> from the intel_timeline so that it may be freed before the request is
> released. However, we need to warn the compiler that the pointer may
> update under its nose.
>
> [  171.438899] BUG: KCSAN: data-race in i915_request_await_dma_fence [i915] / i915_request_retire [i915]
> [  171.438920]
> [  171.438932] write to 0xffff8881e7e28ce0 of 8 bytes by task 148 on cpu 2:
> [  171.439174]  i915_request_retire+0x1ea/0x660 [i915]
> [  171.439408]  retire_requests+0x7a/0xd0 [i915]
> [  171.439640]  engine_retire+0xa1/0xe0 [i915]
> [  171.439657]  process_one_work+0x3b1/0x690
> [  171.439671]  worker_thread+0x80/0x670
> [  171.439685]  kthread+0x19a/0x1e0
> [  171.439701]  ret_from_fork+0x1f/0x30
> [  171.439721]
> [  171.439739] read to 0xffff8881e7e28ce0 of 8 bytes by task 696 on cpu 1:
> [  171.439990]  i915_request_await_dma_fence+0x162/0x520 [i915]
> [  171.440230]  i915_request_await_object+0x2fe/0x470 [i915]
> [  171.440467]  i915_gem_do_execbuffer+0x45dc/0x4c20 [i915]
> [  171.440704]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
> [  171.440722]  drm_ioctl_kernel+0xe4/0x120
> [  171.440736]  drm_ioctl+0x297/0x4c7
> [  171.440750]  ksys_ioctl+0x89/0xb0
> [  171.440766]  __x64_sys_ioctl+0x42/0x60
> [  171.440788]  do_syscall_64+0x6e/0x2c0
> [  171.440802]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_request.h | 7 +++++--
>  1 file changed, 5 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index d4bae16b4785..6020d5b2a3df 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -396,7 +396,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
>  
>  static inline u32 __hwsp_seqno(const struct i915_request *rq)
>  {
> -	return READ_ONCE(*rq->hwsp_seqno);
> +	const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
> +
> +	return READ_ONCE(*hwsp);

This is good enough for decouple. But good enough for hardware
might be different thing.

I am paranoid enough to wanting an rmb(), before the final
read once.

and clflush after.

If the hardware can't guarantee coherency in csb, why
would it in the different region in hwsp.

But the patch does the what the commit message says,
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

>  }
>  
>  /**
> @@ -510,7 +512,8 @@ static inline bool i915_request_completed(const struct i915_request *rq)
>  
>  static inline void i915_request_mark_complete(struct i915_request *rq)
>  {
> -	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
> +	WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
> +		   (u32 *)&rq->fence.seqno);
>  }
>  
>  static inline bool i915_request_has_waitboost(const struct i915_request *rq)
> -- 
> 2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno
  2020-03-09 14:03 ` [Intel-gfx] [PATCH 1/5] " Mika Kuoppala
@ 2020-03-09 14:10   ` Chris Wilson
  2020-03-09 15:21     ` Mika Kuoppala
  0 siblings, 1 reply; 14+ messages in thread
From: Chris Wilson @ 2020-03-09 14:10 UTC (permalink / raw)
  To: Mika Kuoppala, intel-gfx

Quoting Mika Kuoppala (2020-03-09 14:03:01)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > During i915_request_retire() we decouple the i915_request.hwsp_seqno
> > from the intel_timeline so that it may be freed before the request is
> > released. However, we need to warn the compiler that the pointer may
> > update under its nose.
> >
> > [  171.438899] BUG: KCSAN: data-race in i915_request_await_dma_fence [i915] / i915_request_retire [i915]
> > [  171.438920]
> > [  171.438932] write to 0xffff8881e7e28ce0 of 8 bytes by task 148 on cpu 2:
> > [  171.439174]  i915_request_retire+0x1ea/0x660 [i915]
> > [  171.439408]  retire_requests+0x7a/0xd0 [i915]
> > [  171.439640]  engine_retire+0xa1/0xe0 [i915]
> > [  171.439657]  process_one_work+0x3b1/0x690
> > [  171.439671]  worker_thread+0x80/0x670
> > [  171.439685]  kthread+0x19a/0x1e0
> > [  171.439701]  ret_from_fork+0x1f/0x30
> > [  171.439721]
> > [  171.439739] read to 0xffff8881e7e28ce0 of 8 bytes by task 696 on cpu 1:
> > [  171.439990]  i915_request_await_dma_fence+0x162/0x520 [i915]
> > [  171.440230]  i915_request_await_object+0x2fe/0x470 [i915]
> > [  171.440467]  i915_gem_do_execbuffer+0x45dc/0x4c20 [i915]
> > [  171.440704]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
> > [  171.440722]  drm_ioctl_kernel+0xe4/0x120
> > [  171.440736]  drm_ioctl+0x297/0x4c7
> > [  171.440750]  ksys_ioctl+0x89/0xb0
> > [  171.440766]  __x64_sys_ioctl+0x42/0x60
> > [  171.440788]  do_syscall_64+0x6e/0x2c0
> > [  171.440802]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > ---
> >  drivers/gpu/drm/i915/i915_request.h | 7 +++++--
> >  1 file changed, 5 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> > index d4bae16b4785..6020d5b2a3df 100644
> > --- a/drivers/gpu/drm/i915/i915_request.h
> > +++ b/drivers/gpu/drm/i915/i915_request.h
> > @@ -396,7 +396,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
> >  
> >  static inline u32 __hwsp_seqno(const struct i915_request *rq)
> >  {
> > -     return READ_ONCE(*rq->hwsp_seqno);
> > +     const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
> > +
> > +     return READ_ONCE(*hwsp);
> 
> This is good enough for decouple. But good enough for hardware
> might be different thing.
> 
> I am paranoid enough to wanting an rmb(), before the final
> read once.

What? [That pointer is nothing to do with HW; it's a pointer to a
pointer to HW.]
 
> and clflush after.

No. We want to keep the cached read around. If you are paranoid, you
would put the clflush very carefully in the interrupt signalling.

> If the hardware can't guarantee coherency in csb, why
> would it in the different region in hwsp.

It's the order of the writes that's the problem in icl. There's no such
sequence here.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno
  2020-03-09 14:10   ` Chris Wilson
@ 2020-03-09 15:21     ` Mika Kuoppala
  2020-03-09 16:04       ` Chris Wilson
  0 siblings, 1 reply; 14+ messages in thread
From: Mika Kuoppala @ 2020-03-09 15:21 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Quoting Mika Kuoppala (2020-03-09 14:03:01)
>> Chris Wilson <chris@chris-wilson.co.uk> writes:
>> 
>> > During i915_request_retire() we decouple the i915_request.hwsp_seqno
>> > from the intel_timeline so that it may be freed before the request is
>> > released. However, we need to warn the compiler that the pointer may
>> > update under its nose.
>> >
>> > [  171.438899] BUG: KCSAN: data-race in i915_request_await_dma_fence [i915] / i915_request_retire [i915]
>> > [  171.438920]
>> > [  171.438932] write to 0xffff8881e7e28ce0 of 8 bytes by task 148 on cpu 2:
>> > [  171.439174]  i915_request_retire+0x1ea/0x660 [i915]
>> > [  171.439408]  retire_requests+0x7a/0xd0 [i915]
>> > [  171.439640]  engine_retire+0xa1/0xe0 [i915]
>> > [  171.439657]  process_one_work+0x3b1/0x690
>> > [  171.439671]  worker_thread+0x80/0x670
>> > [  171.439685]  kthread+0x19a/0x1e0
>> > [  171.439701]  ret_from_fork+0x1f/0x30
>> > [  171.439721]
>> > [  171.439739] read to 0xffff8881e7e28ce0 of 8 bytes by task 696 on cpu 1:
>> > [  171.439990]  i915_request_await_dma_fence+0x162/0x520 [i915]
>> > [  171.440230]  i915_request_await_object+0x2fe/0x470 [i915]
>> > [  171.440467]  i915_gem_do_execbuffer+0x45dc/0x4c20 [i915]
>> > [  171.440704]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
>> > [  171.440722]  drm_ioctl_kernel+0xe4/0x120
>> > [  171.440736]  drm_ioctl+0x297/0x4c7
>> > [  171.440750]  ksys_ioctl+0x89/0xb0
>> > [  171.440766]  __x64_sys_ioctl+0x42/0x60
>> > [  171.440788]  do_syscall_64+0x6e/0x2c0
>> > [  171.440802]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
>> >
>> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
>> > ---
>> >  drivers/gpu/drm/i915/i915_request.h | 7 +++++--
>> >  1 file changed, 5 insertions(+), 2 deletions(-)
>> >
>> > diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
>> > index d4bae16b4785..6020d5b2a3df 100644
>> > --- a/drivers/gpu/drm/i915/i915_request.h
>> > +++ b/drivers/gpu/drm/i915/i915_request.h
>> > @@ -396,7 +396,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
>> >  
>> >  static inline u32 __hwsp_seqno(const struct i915_request *rq)
>> >  {
>> > -     return READ_ONCE(*rq->hwsp_seqno);
>> > +     const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
>> > +
>> > +     return READ_ONCE(*hwsp);
>> 
>> This is good enough for decouple. But good enough for hardware
>> might be different thing.
>> 
>> I am paranoid enough to wanting an rmb(), before the final
>> read once.
>
> What? [That pointer is nothing to do with HW; it's a pointer to a
> pointer to HW.]

But you do read the value through the pointer to hardware.

CPU:
rmb(); READ_ONCE(*hwsp);

GPU:
WRITE_ONCE(*hwsp, seqno), wmb(), interrupt -> cpu.

Thus on waking up, you would be guaranteed to see the
value gpu intended upon.

But as you say below, you want a cached value. And if
there is no reason to suspect the seqno vs int ordering,
I am fine with that.
-Mika

>  
>> and clflush after.
>
> No. We want to keep the cached read around. If you are paranoid, you
> would put the clflush very carefully in the interrupt signalling.
>
>> If the hardware can't guarantee coherency in csb, why
>> would it in the different region in hwsp.
>
> It's the order of the writes that's the problem in icl. There's no such
> sequence here.
> -Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for series starting with [1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno
  2020-03-09 11:09 [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Chris Wilson
                   ` (5 preceding siblings ...)
  2020-03-09 14:03 ` [Intel-gfx] [PATCH 1/5] " Mika Kuoppala
@ 2020-03-09 15:57 ` Patchwork
  6 siblings, 0 replies; 14+ messages in thread
From: Patchwork @ 2020-03-09 15:57 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno
URL   : https://patchwork.freedesktop.org/series/74445/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_8097_full -> Patchwork_16877_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_16877_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-apl:          [PASS][1] -> [DMESG-WARN][2] ([i915#180]) +3 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-apl4/igt@gem_ctx_isolation@rcs0-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-apl4/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_isolation@vcs1-clean:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#112080]) +5 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb4/igt@gem_ctx_isolation@vcs1-clean.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb5/igt@gem_ctx_isolation@vcs1-clean.html

  * igt@gem_ctx_persistence@close-replace-race:
    - shard-skl:          [PASS][5] -> [INCOMPLETE][6] ([i915#1402])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-skl10/igt@gem_ctx_persistence@close-replace-race.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-skl1/igt@gem_ctx_persistence@close-replace-race.html

  * igt@gem_exec_schedule@implicit-read-write-bsd2:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([fdo#109276] / [i915#677])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb2/igt@gem_exec_schedule@implicit-read-write-bsd2.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb7/igt@gem_exec_schedule@implicit-read-write-bsd2.html

  * igt@gem_exec_schedule@pi-common-bsd:
    - shard-iclb:         [PASS][9] -> [SKIP][10] ([i915#677])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb6/igt@gem_exec_schedule@pi-common-bsd.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb1/igt@gem_exec_schedule@pi-common-bsd.html

  * igt@gem_exec_schedule@reorder-wide-bsd:
    - shard-iclb:         [PASS][11] -> [SKIP][12] ([fdo#112146]) +3 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb7/igt@gem_exec_schedule@reorder-wide-bsd.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb2/igt@gem_exec_schedule@reorder-wide-bsd.html

  * igt@gem_exec_whisper@basic-fds-all:
    - shard-glk:          [PASS][13] -> [DMESG-WARN][14] ([i915#118] / [i915#95])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-glk7/igt@gem_exec_whisper@basic-fds-all.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-glk2/igt@gem_exec_whisper@basic-fds-all.html

  * igt@gem_ppgtt@flink-and-close-vma-leak:
    - shard-glk:          [PASS][15] -> [FAIL][16] ([i915#644])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-glk4/igt@gem_ppgtt@flink-and-close-vma-leak.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-glk4/igt@gem_ppgtt@flink-and-close-vma-leak.html
    - shard-tglb:         [PASS][17] -> [FAIL][18] ([i915#644])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-tglb6/igt@gem_ppgtt@flink-and-close-vma-leak.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-tglb2/igt@gem_ppgtt@flink-and-close-vma-leak.html

  * igt@i915_pm_rps@waitboost:
    - shard-iclb:         [PASS][19] -> [FAIL][20] ([i915#413])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb8/igt@i915_pm_rps@waitboost.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb7/igt@i915_pm_rps@waitboost.html

  * igt@kms_cursor_crc@pipe-a-cursor-suspend:
    - shard-kbl:          [PASS][21] -> [DMESG-WARN][22] ([i915#180]) +4 similar issues
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-kbl2/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-kbl7/igt@kms_cursor_crc@pipe-a-cursor-suspend.html

  * igt@kms_cursor_crc@pipe-b-cursor-suspend:
    - shard-skl:          [PASS][23] -> [FAIL][24] ([i915#54])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-skl7/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-skl9/igt@kms_cursor_crc@pipe-b-cursor-suspend.html

  * igt@kms_hdr@bpc-switch-suspend:
    - shard-skl:          [PASS][25] -> [FAIL][26] ([i915#1188])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-skl1/igt@kms_hdr@bpc-switch-suspend.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-skl9/igt@kms_hdr@bpc-switch-suspend.html

  * igt@kms_psr@psr2_sprite_render:
    - shard-iclb:         [PASS][27] -> [SKIP][28] ([fdo#109441])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb2/igt@kms_psr@psr2_sprite_render.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb7/igt@kms_psr@psr2_sprite_render.html

  * igt@prime_busy@hang-bsd2:
    - shard-iclb:         [PASS][29] -> [SKIP][30] ([fdo#109276]) +11 similar issues
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb1/igt@prime_busy@hang-bsd2.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb3/igt@prime_busy@hang-bsd2.html

  
#### Possible fixes ####

  * igt@gem_busy@busy-vcs1:
    - shard-iclb:         [SKIP][31] ([fdo#112080]) -> [PASS][32] +12 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb8/igt@gem_busy@busy-vcs1.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb1/igt@gem_busy@busy-vcs1.html

  * igt@gem_exec_schedule@implicit-write-read-bsd1:
    - shard-iclb:         [SKIP][33] ([fdo#109276] / [i915#677]) -> [PASS][34] +2 similar issues
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb8/igt@gem_exec_schedule@implicit-write-read-bsd1.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb1/igt@gem_exec_schedule@implicit-write-read-bsd1.html

  * igt@gem_exec_schedule@pi-distinct-iova-bsd:
    - shard-iclb:         [SKIP][35] ([i915#677]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb2/igt@gem_exec_schedule@pi-distinct-iova-bsd.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb7/igt@gem_exec_schedule@pi-distinct-iova-bsd.html

  * igt@gem_exec_schedule@preempt-other-chain-bsd:
    - shard-iclb:         [SKIP][37] ([fdo#112146]) -> [PASS][38] +6 similar issues
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb4/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb5/igt@gem_exec_schedule@preempt-other-chain-bsd.html

  * igt@gem_exec_schedule@preempt-queue-bsd1:
    - shard-iclb:         [SKIP][39] ([fdo#109276]) -> [PASS][40] +20 similar issues
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb6/igt@gem_exec_schedule@preempt-queue-bsd1.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb1/igt@gem_exec_schedule@preempt-queue-bsd1.html

  * igt@i915_pm_dc@dc6-psr:
    - shard-iclb:         [FAIL][41] ([i915#454]) -> [PASS][42]
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb6/igt@i915_pm_dc@dc6-psr.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb1/igt@i915_pm_dc@dc6-psr.html

  * igt@i915_pm_rps@reset:
    - shard-iclb:         [FAIL][43] ([i915#413]) -> [PASS][44]
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb6/igt@i915_pm_rps@reset.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb5/igt@i915_pm_rps@reset.html

  * igt@kms_cursor_crc@pipe-b-cursor-suspend:
    - shard-apl:          [DMESG-WARN][45] ([i915#180]) -> [PASS][46] +1 similar issue
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-apl6/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-apl1/igt@kms_cursor_crc@pipe-b-cursor-suspend.html

  * igt@kms_cursor_crc@pipe-c-cursor-suspend:
    - shard-skl:          [INCOMPLETE][47] ([i915#300]) -> [PASS][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-skl6/igt@kms_cursor_crc@pipe-c-cursor-suspend.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-skl6/igt@kms_cursor_crc@pipe-c-cursor-suspend.html

  * igt@kms_cursor_legacy@cursor-vs-flip-toggle:
    - shard-hsw:          [FAIL][49] ([i915#57]) -> [PASS][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-hsw6/igt@kms_cursor_legacy@cursor-vs-flip-toggle.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-hsw5/igt@kms_cursor_legacy@cursor-vs-flip-toggle.html

  * igt@kms_draw_crc@draw-method-xrgb2101010-blt-xtiled:
    - shard-skl:          [FAIL][51] ([i915#52] / [i915#54]) -> [PASS][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-skl8/igt@kms_draw_crc@draw-method-xrgb2101010-blt-xtiled.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-skl7/igt@kms_draw_crc@draw-method-xrgb2101010-blt-xtiled.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - shard-kbl:          [DMESG-WARN][53] ([i915#180]) -> [PASS][54] +4 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-kbl3/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-kbl1/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  * igt@kms_plane_lowres@pipe-a-tiling-y:
    - shard-glk:          [FAIL][55] ([i915#899]) -> [PASS][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-glk5/igt@kms_plane_lowres@pipe-a-tiling-y.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-glk6/igt@kms_plane_lowres@pipe-a-tiling-y.html

  * igt@kms_psr@psr2_cursor_plane_onoff:
    - shard-iclb:         [SKIP][57] ([fdo#109441]) -> [PASS][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb7/igt@kms_psr@psr2_cursor_plane_onoff.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb2/igt@kms_psr@psr2_cursor_plane_onoff.html

  * igt@kms_psr@suspend:
    - shard-iclb:         [INCOMPLETE][59] ([i915#1185]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-iclb3/igt@kms_psr@suspend.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-iclb7/igt@kms_psr@suspend.html

  
#### Warnings ####

  * igt@gem_ctx_persistence@close-replace-race:
    - shard-apl:          [TIMEOUT][61] ([i915#1340]) -> [INCOMPLETE][62] ([fdo#103927] / [i915#1402])
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-apl8/igt@gem_ctx_persistence@close-replace-race.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-apl8/igt@gem_ctx_persistence@close-replace-race.html

  * igt@gem_linear_blits@normal:
    - shard-apl:          [TIMEOUT][63] ([fdo#111732]) -> [TIMEOUT][64] ([fdo#111732] / [i915#1322])
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-apl1/igt@gem_linear_blits@normal.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-apl4/igt@gem_linear_blits@normal.html

  * igt@i915_pm_dc@dc6-psr:
    - shard-tglb:         [SKIP][65] ([i915#468]) -> [FAIL][66] ([i915#454])
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-tglb2/igt@i915_pm_dc@dc6-psr.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-tglb6/igt@i915_pm_dc@dc6-psr.html

  * igt@runner@aborted:
    - shard-apl:          [FAIL][67] ([fdo#103927]) -> ([FAIL][68], [FAIL][69]) ([fdo#103927] / [i915#1402])
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8097/shard-apl1/igt@runner@aborted.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-apl1/igt@runner@aborted.html
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/shard-apl8/igt@runner@aborted.html

  
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#111732]: https://bugs.freedesktop.org/show_bug.cgi?id=111732
  [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
  [fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146
  [i915#118]: https://gitlab.freedesktop.org/drm/intel/issues/118
  [i915#1185]: https://gitlab.freedesktop.org/drm/intel/issues/1185
  [i915#1188]: https://gitlab.freedesktop.org/drm/intel/issues/1188
  [i915#1322]: https://gitlab.freedesktop.org/drm/intel/issues/1322
  [i915#1340]: https://gitlab.freedesktop.org/drm/intel/issues/1340
  [i915#1402]: https://gitlab.freedesktop.org/drm/intel/issues/1402
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#300]: https://gitlab.freedesktop.org/drm/intel/issues/300
  [i915#413]: https://gitlab.freedesktop.org/drm/intel/issues/413
  [i915#454]: https://gitlab.freedesktop.org/drm/intel/issues/454
  [i915#468]: https://gitlab.freedesktop.org/drm/intel/issues/468
  [i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
  [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
  [i915#57]: https://gitlab.freedesktop.org/drm/intel/issues/57
  [i915#644]: https://gitlab.freedesktop.org/drm/intel/issues/644
  [i915#677]: https://gitlab.freedesktop.org/drm/intel/issues/677
  [i915#899]: https://gitlab.freedesktop.org/drm/intel/issues/899
  [i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_8097 -> Patchwork_16877

  CI-20190529: 20190529
  CI_DRM_8097: 2e46e269a2843c5d0b6c72bfb7fa9d9913c15415 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5499: 2e23cf6f63fc6ba1d9543f8327698d6f21813cec @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16877: fa62f674cf3804822c045b02f37954e7d0a03f99 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16877/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno
  2020-03-09 15:21     ` Mika Kuoppala
@ 2020-03-09 16:04       ` Chris Wilson
  0 siblings, 0 replies; 14+ messages in thread
From: Chris Wilson @ 2020-03-09 16:04 UTC (permalink / raw)
  To: Mika Kuoppala, intel-gfx

Quoting Mika Kuoppala (2020-03-09 15:21:31)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > Quoting Mika Kuoppala (2020-03-09 14:03:01)
> >> Chris Wilson <chris@chris-wilson.co.uk> writes:
> >> 
> >> > During i915_request_retire() we decouple the i915_request.hwsp_seqno
> >> > from the intel_timeline so that it may be freed before the request is
> >> > released. However, we need to warn the compiler that the pointer may
> >> > update under its nose.
> >> >
> >> > [  171.438899] BUG: KCSAN: data-race in i915_request_await_dma_fence [i915] / i915_request_retire [i915]
> >> > [  171.438920]
> >> > [  171.438932] write to 0xffff8881e7e28ce0 of 8 bytes by task 148 on cpu 2:
> >> > [  171.439174]  i915_request_retire+0x1ea/0x660 [i915]
> >> > [  171.439408]  retire_requests+0x7a/0xd0 [i915]
> >> > [  171.439640]  engine_retire+0xa1/0xe0 [i915]
> >> > [  171.439657]  process_one_work+0x3b1/0x690
> >> > [  171.439671]  worker_thread+0x80/0x670
> >> > [  171.439685]  kthread+0x19a/0x1e0
> >> > [  171.439701]  ret_from_fork+0x1f/0x30
> >> > [  171.439721]
> >> > [  171.439739] read to 0xffff8881e7e28ce0 of 8 bytes by task 696 on cpu 1:
> >> > [  171.439990]  i915_request_await_dma_fence+0x162/0x520 [i915]
> >> > [  171.440230]  i915_request_await_object+0x2fe/0x470 [i915]
> >> > [  171.440467]  i915_gem_do_execbuffer+0x45dc/0x4c20 [i915]
> >> > [  171.440704]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
> >> > [  171.440722]  drm_ioctl_kernel+0xe4/0x120
> >> > [  171.440736]  drm_ioctl+0x297/0x4c7
> >> > [  171.440750]  ksys_ioctl+0x89/0xb0
> >> > [  171.440766]  __x64_sys_ioctl+0x42/0x60
> >> > [  171.440788]  do_syscall_64+0x6e/0x2c0
> >> > [  171.440802]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
> >> >
> >> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> >> > ---
> >> >  drivers/gpu/drm/i915/i915_request.h | 7 +++++--
> >> >  1 file changed, 5 insertions(+), 2 deletions(-)
> >> >
> >> > diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> >> > index d4bae16b4785..6020d5b2a3df 100644
> >> > --- a/drivers/gpu/drm/i915/i915_request.h
> >> > +++ b/drivers/gpu/drm/i915/i915_request.h
> >> > @@ -396,7 +396,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
> >> >  
> >> >  static inline u32 __hwsp_seqno(const struct i915_request *rq)
> >> >  {
> >> > -     return READ_ONCE(*rq->hwsp_seqno);
> >> > +     const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
> >> > +
> >> > +     return READ_ONCE(*hwsp);
> >> 
> >> This is good enough for decouple. But good enough for hardware
> >> might be different thing.
> >> 
> >> I am paranoid enough to wanting an rmb(), before the final
> >> read once.
> >
> > What? [That pointer is nothing to do with HW; it's a pointer to a
> > pointer to HW.]
> 
> But you do read the value through the pointer to hardware.
> 
> CPU:
> rmb(); READ_ONCE(*hwsp);
> 
> GPU:
> WRITE_ONCE(*hwsp, seqno), wmb(), interrupt -> cpu.
> 
> Thus on waking up, you would be guaranteed to see the
> value gpu intended upon.

The bspec gives us the guarantee that we see the correct value as the
GPU takes care of the cacheline invalidation on writing. We haven't had
reason not to believe that yet, all our issues so far have been the
arrival of the interrupt vs update of the seqno. (Well the whole design
of the request is that we don't really care how long it takes, just that
once a request is complete it stays completed.)
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Intel-gfx] [PATCH 2/5] drm/i915/gt: Mark up racy check of last list element
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 2/5] drm/i915/gt: Mark up racy check of last list element Chris Wilson
@ 2020-03-09 16:09   ` Mika Kuoppala
  0 siblings, 0 replies; 14+ messages in thread
From: Mika Kuoppala @ 2020-03-09 16:09 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> [   25.025543] BUG: KCSAN: data-race in __i915_request_create [i915] / process_csb [i915]
> [   25.025561]
> [   25.025573] write (marked) to 0xffff8881e85c1620 of 8 bytes by task 696 on cpu 1:
> [   25.025789]  __i915_request_create+0x54b/0x5d0 [i915]
> [   25.026001]  i915_request_create+0xcc/0x150 [i915]
> [   25.026218]  i915_gem_do_execbuffer+0x2f70/0x4c20 [i915]
> [   25.026428]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
> [   25.026445]  drm_ioctl_kernel+0xe4/0x120
> [   25.026459]  drm_ioctl+0x297/0x4c7
> [   25.026472]  ksys_ioctl+0x89/0xb0
> [   25.026484]  __x64_sys_ioctl+0x42/0x60
> [   25.026497]  do_syscall_64+0x6e/0x2c0
> [   25.026510]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
> [   25.026522]
> [   25.026532] read to 0xffff8881e85c1620 of 8 bytes by interrupt on cpu 2:
> [   25.026742]  process_csb+0x8d6/0x1070 [i915]
> [   25.026949]  execlists_submission_tasklet+0x30/0x170 [i915]
> [   25.026969]  tasklet_action_common.isra.0+0x42/0xa0
> [   25.026984]  __do_softirq+0xd7/0x2cd
> [   25.026997]  irq_exit+0xbe/0xe0
> [   25.027009]  do_IRQ+0x51/0x100
> [   25.027021]  ret_from_intr+0x0/0x1c
> [   25.027033]  poll_idle+0x3e/0x13b
> [   25.027047]  cpuidle_enter_state+0x189/0x5d0
> [   25.027060]  cpuidle_enter+0x50/0x90
> [   25.027074]  do_idle+0x1a1/0x1f0
> [   25.027086]  cpu_startup_entry+0x14/0x16
> [   25.027100]  start_secondary+0x120/0x180
> [   25.027116]  secondary_startup_64+0xa4/0xb0
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +-
>  drivers/gpu/drm/i915/i915_utils.h   | 6 ++++++
>  2 files changed, 7 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index a1d268880cfe..6266ef2ae6a0 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1316,7 +1316,7 @@ __execlists_schedule_out(struct i915_request *rq,
>  	 * If we have just completed this context, the engine may now be
>  	 * idle and we want to re-enter powersaving.
>  	 */
> -	if (list_is_last(&rq->link, &ce->timeline->requests) &&
> +	if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
>  	    i915_request_completed(rq))
>  		intel_engine_add_retire(engine, ce->timeline);
>  
> diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
> index 26f3a4a50b40..03a73d2bd50d 100644
> --- a/drivers/gpu/drm/i915/i915_utils.h
> +++ b/drivers/gpu/drm/i915/i915_utils.h
> @@ -260,6 +260,12 @@ static inline void __list_del_many(struct list_head *head,
>  	WRITE_ONCE(head->next, first);
>  }
>  
> +static inline int list_is_last_rcu(const struct list_head *list,
> +				   const struct list_head *head)
> +{
> +	return READ_ONCE(list->next) == head;
> +}
> +
>  /*
>   * Wait until the work is finally complete, even if it tries to postpone
>   * by requeueing itself. Note, that if the worker never cancels itself,
> -- 
> 2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Intel-gfx] [PATCH 4/5] drm/i915/execlists: Mark up read of i915_request.fence.flags
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 4/5] drm/i915/execlists: Mark up read of i915_request.fence.flags Chris Wilson
@ 2020-03-09 16:49   ` Mika Kuoppala
  0 siblings, 0 replies; 14+ messages in thread
From: Mika Kuoppala @ 2020-03-09 16:49 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> [  145.927961] BUG: KCSAN: data-race in can_merge_rq [i915] / signal_irq_work [i915]
> [  145.927980]
> [  145.927992] write (marked) to 0xffff8881e513fab0 of 8 bytes by interrupt on cpu 2:
> [  145.928250]  signal_irq_work+0x134/0x640 [i915]
> [  145.928268]  irq_work_run_list+0xd7/0x120
> [  145.928283]  irq_work_run+0x1d/0x50
> [  145.928300]  smp_irq_work_interrupt+0x21/0x30
> [  145.928328]  irq_work_interrupt+0xf/0x20
> [  145.928356]  _raw_spin_unlock_irqrestore+0x34/0x40
> [  145.928596]  execlists_submission_tasklet+0xde/0x170 [i915]
> [  145.928616]  tasklet_action_common.isra.0+0x42/0xa0
> [  145.928632]  __do_softirq+0xd7/0x2cd
> [  145.928646]  irq_exit+0xbe/0xe0
> [  145.928665]  do_IRQ+0x51/0x100
> [  145.928684]  ret_from_intr+0x0/0x1c
> [  145.928699]  schedule+0x0/0xb0
> [  145.928719]  worker_thread+0x194/0x670
> [  145.928743]  kthread+0x19a/0x1e0
> [  145.928765]  ret_from_fork+0x1f/0x30
> [  145.928784]
> [  145.928796] read to 0xffff8881e513fab0 of 8 bytes by task 738 on cpu 1:
> [  145.929046]  can_merge_rq+0xb1/0x100 [i915]
> [  145.929282]  __execlists_submission_tasklet+0x866/0x25a0 [i915]
> [  145.929518]  execlists_submit_request+0x2a4/0x2b0 [i915]
> [  145.929758]  submit_notify+0x8f/0xc0 [i915]
> [  145.929989]  __i915_sw_fence_complete+0x5d/0x3e0 [i915]
> [  145.930221]  i915_sw_fence_complete+0x58/0x80 [i915]
> [  145.930453]  i915_sw_fence_commit+0x16/0x20 [i915]
> [  145.930698]  __i915_request_queue+0x60/0x70 [i915]
> [  145.930935]  i915_gem_do_execbuffer+0x3997/0x4c20 [i915]
> [  145.931175]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
> [  145.931194]  drm_ioctl_kernel+0xe4/0x120
> [  145.931208]  drm_ioctl+0x297/0x4c7
> [  145.931222]  ksys_ioctl+0x89/0xb0
> [  145.931238]  __x64_sys_ioctl+0x42/0x60
> [  145.931260]  do_syscall_64+0x6e/0x2c0
> [  145.931275]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/gt/intel_lrc.c | 7 ++++++-
>  1 file changed, 6 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index a9d77b0e4e27..20dd3c2cfa2f 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1597,6 +1597,11 @@ static bool can_merge_ctx(const struct intel_context *prev,
>  	return true;
>  }
>  
> +static unsigned long i915_request_flags(const struct i915_request *rq)
> +{
> +	return READ_ONCE(rq->fence.flags);

Bitmasks and atomicity through read/write once is bad idea.
But the write side was by atomic bitops.

Race between comparing two requests is still there tho.
The flags compared against tho are well established
apriori request queueing.

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> +}
> +
>  static bool can_merge_rq(const struct i915_request *prev,
>  			 const struct i915_request *next)
>  {
> @@ -1614,7 +1619,7 @@ static bool can_merge_rq(const struct i915_request *prev,
>  	if (i915_request_completed(next))
>  		return true;
>  
> -	if (unlikely((prev->fence.flags ^ next->fence.flags) &
> +	if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
>  		     (BIT(I915_FENCE_FLAG_NOPREEMPT) |
>  		      BIT(I915_FENCE_FLAG_SENTINEL))))
>  		return false;
> -- 
> 2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [Intel-gfx] [PATCH 5/5] drm/i915/execlsts: Mark up racy inspection of current i915_request priority
  2020-03-09 11:09 ` [Intel-gfx] [PATCH 5/5] drm/i915/execlsts: Mark up racy inspection of current i915_request priority Chris Wilson
@ 2020-03-09 17:02   ` Mika Kuoppala
  0 siblings, 0 replies; 14+ messages in thread
From: Mika Kuoppala @ 2020-03-09 17:02 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> [  120.176548] BUG: KCSAN: data-race in __i915_schedule [i915] / effective_prio [i915]
> [  120.176566]
> [  120.176577] write to 0xffff8881e35e6540 of 4 bytes by task 730 on cpu 3:
> [  120.176792]  __i915_schedule+0x63e/0x920 [i915]
> [  120.177007]  __bump_priority+0x63/0x80 [i915]
> [  120.177220]  __i915_sched_node_add_dependency+0x258/0x300 [i915]
> [  120.177438]  i915_sched_node_add_dependency+0x50/0xa0 [i915]
> [  120.177654]  i915_request_await_dma_fence+0x1da/0x530 [i915]
> [  120.177867]  i915_request_await_object+0x2fe/0x470 [i915]
> [  120.178081]  i915_gem_do_execbuffer+0x45dc/0x4c20 [i915]
> [  120.178292]  i915_gem_execbuffer2_ioctl+0x2c3/0x580 [i915]
> [  120.178309]  drm_ioctl_kernel+0xe4/0x120
> [  120.178322]  drm_ioctl+0x297/0x4c7
> [  120.178335]  ksys_ioctl+0x89/0xb0
> [  120.178348]  __x64_sys_ioctl+0x42/0x60
> [  120.178361]  do_syscall_64+0x6e/0x2c0
> [  120.178375]  entry_SYSCALL_64_after_hwframe+0x44/0xa9
> [  120.178387]
> [  120.178397] read to 0xffff8881e35e6540 of 4 bytes by interrupt on cpu 2:
> [  120.178606]  effective_prio+0x25/0xc0 [i915]
> [  120.178812]  process_csb+0xe8b/0x10a0 [i915]
> [  120.179021]  execlists_submission_tasklet+0x30/0x170 [i915]
> [  120.179038]  tasklet_action_common.isra.0+0x42/0xa0
> [  120.179053]  __do_softirq+0xd7/0x2cd
> [  120.179066]  irq_exit+0xbe/0xe0
> [  120.179078]  do_IRQ+0x51/0x100
> [  120.179090]  ret_from_intr+0x0/0x1c
> [  120.179104]  cpuidle_enter_state+0x1b8/0x5d0
> [  120.179117]  cpuidle_enter+0x50/0x90
> [  120.179131]  do_idle+0x1a1/0x1f0
> [  120.179145]  cpu_startup_entry+0x14/0x16
> [  120.179158]  start_secondary+0x120/0x180
> [  120.179172]  secondary_startup_64+0xa4/0xb0
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/gt/intel_lrc.c   | 2 +-
>  drivers/gpu/drm/i915/i915_scheduler.c | 2 +-
>  2 files changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 20dd3c2cfa2f..3eb7adc4e057 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -293,7 +293,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
>  
>  static inline int rq_prio(const struct i915_request *rq)
>  {
> -	return rq->sched.attr.priority;
> +	return READ_ONCE(rq->sched.attr.priority);
>  }
>  
>  static int effective_prio(const struct i915_request *rq)
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 52f71e83e088..af51810dc78c 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -321,7 +321,7 @@ static void __i915_schedule(struct i915_sched_node *node,
>  
>  		GEM_BUG_ON(node_to_request(node)->engine != engine);
>  
> -		node->attr.priority = prio;
> +		WRITE_ONCE(node->attr.priority, prio);

Ah the symmetry,

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

>  
>  		/*
>  		 * Once the request is ready, it will be placed into the
> -- 
> 2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2020-03-09 17:04 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-09 11:09 [Intel-gfx] [PATCH 1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Chris Wilson
2020-03-09 11:09 ` [Intel-gfx] [PATCH 2/5] drm/i915/gt: Mark up racy check of last list element Chris Wilson
2020-03-09 16:09   ` Mika Kuoppala
2020-03-09 11:09 ` [Intel-gfx] [PATCH 3/5] drm/i915/execlists: Track active elements during dequeue Chris Wilson
2020-03-09 11:09 ` [Intel-gfx] [PATCH 4/5] drm/i915/execlists: Mark up read of i915_request.fence.flags Chris Wilson
2020-03-09 16:49   ` Mika Kuoppala
2020-03-09 11:09 ` [Intel-gfx] [PATCH 5/5] drm/i915/execlsts: Mark up racy inspection of current i915_request priority Chris Wilson
2020-03-09 17:02   ` Mika Kuoppala
2020-03-09 12:28 ` [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/5] drm/i915: Mark up unlocked update of i915_request.hwsp_seqno Patchwork
2020-03-09 14:03 ` [Intel-gfx] [PATCH 1/5] " Mika Kuoppala
2020-03-09 14:10   ` Chris Wilson
2020-03-09 15:21     ` Mika Kuoppala
2020-03-09 16:04       ` Chris Wilson
2020-03-09 15:57 ` [Intel-gfx] ✓ Fi.CI.IGT: success for series starting with [1/5] " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.