All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC] drm/i915: Check if we hold a wakeref during ioread32/iowrite32
@ 2016-02-23 14:47 Imre Deak
  2016-02-23 14:55 ` Chris Wilson
  2016-02-23 15:50 ` ✗ Fi.CI.BAT: failure for " Patchwork
  0 siblings, 2 replies; 6+ messages in thread
From: Imre Deak @ 2016-02-23 14:47 UTC (permalink / raw)
  To: intel-gfx

The device needs to be in D0 state whenever we call these functions, so
add the corresponding assert checks.

I had to move around some helpers due to dependencies added by nested
includes.

Signed-off-by: Imre Deak <imre.deak@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            | 117 +++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   4 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |   8 +-
 drivers/gpu/drm/i915/i915_irq.c            |   8 +-
 drivers/gpu/drm/i915/intel_bios.c          |   7 +-
 drivers/gpu/drm/i915/intel_drv.h           |  75 ------------------
 drivers/gpu/drm/i915/intel_lrc.c           |   3 +-
 drivers/gpu/drm/i915/intel_lrc.h           |  17 -----
 drivers/gpu/drm/i915/intel_overlay.c       |  44 ++++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.c    |   3 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  13 +---
 11 files changed, 161 insertions(+), 138 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9e76bfc..c794664 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3509,6 +3509,123 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 #define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
 #define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
 
+static inline void
+assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
+{
+	WARN_ONCE(dev_priv->pm.suspended,
+		  "Device suspended during HW access\n");
+}
+
+static inline void
+assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
+{
+	assert_rpm_device_not_suspended(dev_priv);
+	/* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
+	 * too much noise. */
+	if (!atomic_read(&dev_priv->pm.wakeref_count))
+		DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
+}
+
+static inline int
+assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
+{
+	int seq = atomic_read(&dev_priv->pm.atomic_seq);
+
+	assert_rpm_wakelock_held(dev_priv);
+
+	return seq;
+}
+
+static inline void
+assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
+{
+	WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
+		  "HW access outside of RPM atomic section\n");
+}
+
+/**
+ * disable_rpm_wakeref_asserts - disable the RPM assert checks
+ * @dev_priv: i915 device instance
+ *
+ * This function disable asserts that check if we hold an RPM wakelock
+ * reference, while keeping the device-not-suspended checks still enabled.
+ * It's meant to be used only in special circumstances where our rule about
+ * the wakelock refcount wrt. the device power state doesn't hold. According
+ * to this rule at any point where we access the HW or want to keep the HW in
+ * an active state we must hold an RPM wakelock reference acquired via one of
+ * the intel_runtime_pm_get() helpers. Currently there are a few special spots
+ * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
+ * forcewake release timer, and the GPU RPS and hangcheck works. All other
+ * users should avoid using this function.
+ *
+ * Any calls to this function must have a symmetric call to
+ * enable_rpm_wakeref_asserts().
+ */
+static inline void
+disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+{
+	atomic_inc(&dev_priv->pm.wakeref_count);
+}
+
+/**
+ * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
+ * @dev_priv: i915 device instance
+ *
+ * This function re-enables the RPM assert checks after disabling them with
+ * disable_rpm_wakeref_asserts. It's meant to be used only in special
+ * circumstances otherwise its use should be avoided.
+ *
+ * Any calls to this function must have a symmetric call to
+ * disable_rpm_wakeref_asserts().
+ */
+static inline void
+enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+{
+	atomic_dec(&dev_priv->pm.wakeref_count);
+}
+
+
+#define I915_IOWRITE32(val, addr) ({			\
+	assert_rpm_wakelock_held(dev_priv);		\
+	iowrite32(val, addr); })
+
+#define I915_IOREAD32(addr) ({				\
+	assert_rpm_wakelock_held(dev_priv);		\
+	ioread32(addr); })
+
+static inline void intel_ring_emit(struct intel_engine_cs *ring,
+				   u32 data)
+{
+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	struct intel_ringbuffer *ringbuf = ring->buffer;
+
+	I915_IOWRITE32(data, ringbuf->virtual_start + ringbuf->tail);
+	ringbuf->tail += 4;
+}
+static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
+				       i915_reg_t reg)
+{
+	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
+}
+/**
+ * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
+ * @ringbuf: Ringbuffer to write to.
+ * @data: DWORD to write.
+ */
+static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
+					   u32 data)
+{
+	struct drm_i915_private *dev_priv = to_i915(ringbuf->ring->dev);
+
+	I915_IOWRITE32(data, ringbuf->virtual_start + ringbuf->tail);
+	ringbuf->tail += 4;
+}
+static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
+					       i915_reg_t reg)
+{
+	intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
+}
+
 /* Be very careful with read/write 64-bit values. On 32-bit machines, they
  * will be implemented using 2 32-bit writes in an arbitrary order with
  * an arbitrary delay between them. This can cause the hardware to
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8fd00d2..1ad589c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -332,7 +332,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 	offset += reloc->offset;
 	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
 					      offset & PAGE_MASK);
-	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
+	I915_IOWRITE32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
 	if (INTEL_INFO(dev)->gen >= 8) {
 		offset += sizeof(uint32_t);
@@ -344,7 +344,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 							 offset);
 		}
 
-		iowrite32(upper_32_bits(delta),
+		I915_IOWRITE32(upper_32_bits(delta),
 			  reloc_page + offset_in_page(offset));
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9127f8f..66b03dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2357,8 +2357,8 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
 #ifdef writeq
 	writeq(pte, addr);
 #else
-	iowrite32((u32)pte, addr);
-	iowrite32(pte >> 32, addr + 4);
+	I915_IOWRITE32((u32)pte, addr);
+	I915_IOWRITE32(pte >> 32, addr + 4);
 #endif
 }
 
@@ -2457,7 +2457,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 
 	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
 		addr = sg_page_iter_dma_address(&sg_iter);
-		iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
+		I915_IOWRITE32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
 		i++;
 	}
 
@@ -2538,7 +2538,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 				     I915_CACHE_LLC, use_scratch, 0);
 
 	for (i = 0; i < num_entries; i++)
-		iowrite32(scratch_pte, &gtt_base[i]);
+		I915_IOWRITE32(scratch_pte, &gtt_base[i]);
 	readl(gtt_base);
 
 	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef..a188f01 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2908,7 +2908,7 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
 		head &= ring->buffer->size - 1;
 
 		/* This here seems to blow up */
-		cmd = ioread32(ring->buffer->virtual_start + head);
+		cmd = I915_IOREAD32(ring->buffer->virtual_start + head);
 		if (cmd == ipehr)
 			break;
 
@@ -2918,11 +2918,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
 	if (!i)
 		return NULL;
 
-	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
+	*seqno = I915_IOREAD32(ring->buffer->virtual_start + head + 4) + 1;
 	if (INTEL_INFO(ring->dev)->gen >= 8) {
-		offset = ioread32(ring->buffer->virtual_start + head + 12);
+		offset = I915_IOREAD32(ring->buffer->virtual_start + head + 12);
 		offset <<= 32;
-		offset = ioread32(ring->buffer->virtual_start + head + 8);
+		offset = I915_IOREAD32(ring->buffer->virtual_start + head + 8);
 	}
 	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
 }
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index bf62a19..652bc40 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1343,7 +1343,8 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
 	return vbt;
 }
 
-static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
+static const struct vbt_header *find_vbt(struct drm_i915_private *dev_priv,
+					 void __iomem *bios, size_t size)
 {
 	size_t i;
 
@@ -1351,7 +1352,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
 	for (i = 0; i + 4 < size; i++) {
 		void *vbt;
 
-		if (ioread32(bios + i) != *((const u32 *) "$VBT"))
+		if (I915_IOREAD32(bios + i) != *((const u32 *) "$VBT"))
 			continue;
 
 		/*
@@ -1397,7 +1398,7 @@ intel_bios_init(struct drm_i915_private *dev_priv)
 		if (!bios)
 			return -1;
 
-		vbt = find_vbt(bios, size);
+		vbt = find_vbt(dev_priv, bios, size);
 		if (!vbt) {
 			pci_unmap_rom(pdev, bios);
 			return -1;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4852049..35a0ac8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1441,81 +1441,6 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
 void intel_display_power_put(struct drm_i915_private *dev_priv,
 			     enum intel_display_power_domain domain);
 
-static inline void
-assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
-{
-	WARN_ONCE(dev_priv->pm.suspended,
-		  "Device suspended during HW access\n");
-}
-
-static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
-{
-	assert_rpm_device_not_suspended(dev_priv);
-	/* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
-	 * too much noise. */
-	if (!atomic_read(&dev_priv->pm.wakeref_count))
-		DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
-}
-
-static inline int
-assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
-{
-	int seq = atomic_read(&dev_priv->pm.atomic_seq);
-
-	assert_rpm_wakelock_held(dev_priv);
-
-	return seq;
-}
-
-static inline void
-assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
-{
-	WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
-		  "HW access outside of RPM atomic section\n");
-}
-
-/**
- * disable_rpm_wakeref_asserts - disable the RPM assert checks
- * @dev_priv: i915 device instance
- *
- * This function disable asserts that check if we hold an RPM wakelock
- * reference, while keeping the device-not-suspended checks still enabled.
- * It's meant to be used only in special circumstances where our rule about
- * the wakelock refcount wrt. the device power state doesn't hold. According
- * to this rule at any point where we access the HW or want to keep the HW in
- * an active state we must hold an RPM wakelock reference acquired via one of
- * the intel_runtime_pm_get() helpers. Currently there are a few special spots
- * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
- * forcewake release timer, and the GPU RPS and hangcheck works. All other
- * users should avoid using this function.
- *
- * Any calls to this function must have a symmetric call to
- * enable_rpm_wakeref_asserts().
- */
-static inline void
-disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
-{
-	atomic_inc(&dev_priv->pm.wakeref_count);
-}
-
-/**
- * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
- * @dev_priv: i915 device instance
- *
- * This function re-enables the RPM assert checks after disabling them with
- * disable_rpm_wakeref_asserts. It's meant to be used only in special
- * circumstances otherwise its use should be avoided.
- *
- * Any calls to this function must have a symmetric call to
- * disable_rpm_wakeref_asserts().
- */
-static inline void
-enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
-{
-	atomic_dec(&dev_priv->pm.wakeref_count);
-}
-
 /* TODO: convert users of these to rely instead on proper RPM refcounting */
 #define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv)	\
 	disable_rpm_wakeref_asserts(dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3a03646..ffb8e9e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -804,13 +804,14 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 
 static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
+	struct drm_i915_private *dev_priv = to_i915(ringbuf->obj->base.dev);
 	uint32_t __iomem *virt;
 	int rem = ringbuf->size - ringbuf->tail;
 
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
-		iowrite32(MI_NOOP, virt++);
+		I915_IOWRITE32(MI_NOOP, virt++);
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index e6cda3e..1bdc260 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -73,23 +73,6 @@ static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
 {
 	ringbuf->tail &= ringbuf->size - 1;
 }
-/**
- * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
- * @ringbuf: Ringbuffer to write to.
- * @data: DWORD to write.
- */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
-					   u32 data)
-{
-	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
-	ringbuf->tail += 4;
-}
-static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
-					       i915_reg_t reg)
-{
-	intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
-}
-
 /* Logical Ring Contexts */
 
 /* One extra page is added before LRC for GuC as shared data */
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9168413..9fcd620 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -594,6 +594,7 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
 				   struct overlay_registers __iomem *regs,
 				   struct put_image_params *params)
 {
+	struct drm_i915_private *dev_priv = to_i915(overlay->dev);
 	/* fixed point with a 12 bit shift */
 	u32 xscale, yscale, xscale_UV, yscale_UV;
 #define FP_SHIFT 12
@@ -630,17 +631,17 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
 	overlay->old_xscale = xscale;
 	overlay->old_yscale = yscale;
 
-	iowrite32(((yscale & FRACT_MASK) << 20) |
+	I915_IOWRITE32(((yscale & FRACT_MASK) << 20) |
 		  ((xscale >> FP_SHIFT)  << 16) |
 		  ((xscale & FRACT_MASK) << 3),
 		 &regs->YRGBSCALE);
 
-	iowrite32(((yscale_UV & FRACT_MASK) << 20) |
+	I915_IOWRITE32(((yscale_UV & FRACT_MASK) << 20) |
 		  ((xscale_UV >> FP_SHIFT)  << 16) |
 		  ((xscale_UV & FRACT_MASK) << 3),
 		 &regs->UVSCALE);
 
-	iowrite32((((yscale    >> FP_SHIFT) << 16) |
+	I915_IOWRITE32((((yscale    >> FP_SHIFT) << 16) |
 		   ((yscale_UV >> FP_SHIFT) << 0)),
 		 &regs->UVSCALEV);
 
@@ -653,6 +654,7 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
 static void update_colorkey(struct intel_overlay *overlay,
 			    struct overlay_registers __iomem *regs)
 {
+	struct drm_i915_private *dev_priv = to_i915(overlay->dev);
 	u32 key = overlay->color_key;
 	u32 flags;
 
@@ -682,8 +684,8 @@ static void update_colorkey(struct intel_overlay *overlay,
 		break;
 	}
 
-	iowrite32(key, &regs->DCLRKV);
-	iowrite32(flags, &regs->DCLRKM);
+	I915_IOWRITE32(key, &regs->DCLRKV);
+	I915_IOWRITE32(flags, &regs->DCLRKM);
 }
 
 static u32 overlay_cmd_reg(struct put_image_params *params)
@@ -735,6 +737,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 				      struct drm_i915_gem_object *new_bo,
 				      struct put_image_params *params)
 {
+	struct drm_i915_private *dev_priv = to_i915(overlay->dev);
 	int ret, tmp_width;
 	struct overlay_registers __iomem *regs;
 	bool scale_changed = false;
@@ -770,7 +773,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 			oconfig |= OCONF_CSC_MODE_BT709;
 		oconfig |= pipe == 0 ?
 			OCONF_PIPE_A : OCONF_PIPE_B;
-		iowrite32(oconfig, &regs->OCONFIG);
+		I915_IOWRITE32(oconfig, &regs->OCONFIG);
 		intel_overlay_unmap_regs(overlay, regs);
 
 		ret = intel_overlay_on(overlay);
@@ -784,8 +787,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 		goto out_unpin;
 	}
 
-	iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
-	iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
+	I915_IOWRITE32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
+	I915_IOWRITE32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
 
 	if (params->format & I915_OVERLAY_YUV_PACKED)
 		tmp_width = packed_width_bytes(params->format, params->src_w);
@@ -795,7 +798,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	swidth = params->src_w;
 	swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
 	sheight = params->src_h;
-	iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
+	I915_IOWRITE32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
 	ostride = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -809,21 +812,21 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 				      params->src_w/uv_hscale);
 		swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
 		sheight |= (params->src_h/uv_vscale) << 16;
-		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
-		iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
+		I915_IOWRITE32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
+		I915_IOWRITE32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
 		ostride |= params->stride_UV << 16;
 	}
 
-	iowrite32(swidth, &regs->SWIDTH);
-	iowrite32(swidthsw, &regs->SWIDTHSW);
-	iowrite32(sheight, &regs->SHEIGHT);
-	iowrite32(ostride, &regs->OSTRIDE);
+	I915_IOWRITE32(swidth, &regs->SWIDTH);
+	I915_IOWRITE32(swidthsw, &regs->SWIDTHSW);
+	I915_IOWRITE32(sheight, &regs->SHEIGHT);
+	I915_IOWRITE32(ostride, &regs->OSTRIDE);
 
 	scale_changed = update_scaling_factors(overlay, regs, params);
 
 	update_colorkey(overlay, regs);
 
-	iowrite32(overlay_cmd_reg(params), &regs->OCMD);
+	I915_IOWRITE32(overlay_cmd_reg(params), &regs->OCMD);
 
 	intel_overlay_unmap_regs(overlay, regs);
 
@@ -849,6 +852,7 @@ out_unpin:
 
 int intel_overlay_switch_off(struct intel_overlay *overlay)
 {
+	struct drm_i915_private *dev_priv = to_i915(overlay->dev);
 	struct overlay_registers __iomem *regs;
 	struct drm_device *dev = overlay->dev;
 	int ret;
@@ -868,7 +872,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
 		return ret;
 
 	regs = intel_overlay_map_regs(overlay);
-	iowrite32(0, &regs->OCMD);
+	I915_IOWRITE32(0, &regs->OCMD);
 	intel_overlay_unmap_regs(overlay, regs);
 
 	ret = intel_overlay_off(overlay);
@@ -1232,9 +1236,11 @@ out_free:
 static void update_reg_attrs(struct intel_overlay *overlay,
 			     struct overlay_registers __iomem *regs)
 {
-	iowrite32((overlay->contrast << 18) | (overlay->brightness & 0xff),
+	struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+
+	I915_IOWRITE32((overlay->contrast << 18) | (overlay->brightness & 0xff),
 		  &regs->OCLRC0);
-	iowrite32(overlay->saturation, &regs->OCLRC1);
+	I915_IOWRITE32(overlay->saturation, &regs->OCLRC1);
 }
 
 static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a..12dc8e0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2331,13 +2331,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 
 static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
+	struct drm_i915_private *dev_priv = to_i915(ringbuf->ring->dev);
 	uint32_t __iomem *virt;
 	int rem = ringbuf->size - ringbuf->tail;
 
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
-		iowrite32(MI_NOOP, virt++);
+		I915_IOWRITE32(MI_NOOP, virt++);
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 566b0ae..cae1aac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -446,18 +446,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_engine_cs *ring,
-				   u32 data)
-{
-	struct intel_ringbuffer *ringbuf = ring->buffer;
-	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
-	ringbuf->tail += 4;
-}
-static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
-				       i915_reg_t reg)
-{
-	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
+
 static inline void intel_ring_advance(struct intel_engine_cs *ring)
 {
 	struct intel_ringbuffer *ringbuf = ring->buffer;
-- 
2.5.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [RFC] drm/i915: Check if we hold a wakeref during ioread32/iowrite32
  2016-02-23 14:47 [RFC] drm/i915: Check if we hold a wakeref during ioread32/iowrite32 Imre Deak
@ 2016-02-23 14:55 ` Chris Wilson
  2016-02-23 15:09   ` Imre Deak
  2016-02-23 15:50 ` ✗ Fi.CI.BAT: failure for " Patchwork
  1 sibling, 1 reply; 6+ messages in thread
From: Chris Wilson @ 2016-02-23 14:55 UTC (permalink / raw)
  To: Imre Deak; +Cc: intel-gfx

On Tue, Feb 23, 2016 at 04:47:17PM +0200, Imre Deak wrote:
> The device needs to be in D0 state whenever we call these functions, so
> add the corresponding assert checks.

No. In quite a few of those cases we are calling iowrite to non-device
memory, even normal pages.

How's the separation of struct_mutex from rpm going so that we can forgo
adding assertions and use explicit power management instead?
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [RFC] drm/i915: Check if we hold a wakeref during ioread32/iowrite32
  2016-02-23 14:55 ` Chris Wilson
@ 2016-02-23 15:09   ` Imre Deak
  2016-02-23 16:54     ` Chris Wilson
  0 siblings, 1 reply; 6+ messages in thread
From: Imre Deak @ 2016-02-23 15:09 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On ti, 2016-02-23 at 14:55 +0000, Chris Wilson wrote:
> On Tue, Feb 23, 2016 at 04:47:17PM +0200, Imre Deak wrote:
> > The device needs to be in D0 state whenever we call these
> > functions, so
> > add the corresponding assert checks.
> 
> No. In quite a few of those cases we are calling iowrite to non-
> device
> memory, even normal pages.

Hm right didn't think about that. I guess the only case we care about
then are accesses through the GTT.

> How's the separation of struct_mutex from rpm going so that we can
> forgo
> adding assertions and use explicit power management instead?

It's planned to be done, but no one is working on that yet. This is
something we could still have regardless, similarly to other helpers
accessing the device.

> -Chris
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Check if we hold a wakeref during ioread32/iowrite32
  2016-02-23 14:47 [RFC] drm/i915: Check if we hold a wakeref during ioread32/iowrite32 Imre Deak
  2016-02-23 14:55 ` Chris Wilson
@ 2016-02-23 15:50 ` Patchwork
  1 sibling, 0 replies; 6+ messages in thread
From: Patchwork @ 2016-02-23 15:50 UTC (permalink / raw)
  To: Imre Deak; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Check if we hold a wakeref during ioread32/iowrite32
URL   : https://patchwork.freedesktop.org/series/3728/
State : failure

== Summary ==

Series 3728v1 drm/i915: Check if we hold a wakeref during ioread32/iowrite32
http://patchwork.freedesktop.org/api/1.0/series/3728/revisions/1/mbox/

Test gem_cs_prefetch:
        Subgroup basic-default:
                incomplete -> PASS       (ilk-hp8440p)
Test gem_sync:
        Subgroup basic-blt:
                pass       -> INCOMPLETE (ivb-t430s)
Test kms_force_connector_basic:
        Subgroup force-edid:
                skip       -> PASS       (snb-x220t)
                pass       -> SKIP       (ivb-t430s)
        Subgroup force-load-detect:
                dmesg-fail -> FAIL       (snb-x220t)
Test kms_pipe_crc_basic:
        Subgroup nonblocking-crc-pipe-c:
                pass       -> TIMEOUT    (ivb-t430s)
        Subgroup suspend-read-crc-pipe-a:
                pass       -> DMESG-WARN (skl-i5k-2) UNSTABLE
        Subgroup suspend-read-crc-pipe-b:
                dmesg-warn -> PASS       (skl-i5k-2) UNSTABLE
Test pm_rpm:
        Subgroup basic-rte:
                dmesg-warn -> PASS       (byt-nuc) UNSTABLE

bdw-nuci7        total:165  pass:154  dwarn:0   dfail:0   fail:0   skip:11 
bdw-ultra        total:168  pass:154  dwarn:0   dfail:0   fail:0   skip:14 
bsw-nuc-2        total:168  pass:137  dwarn:0   dfail:0   fail:1   skip:30 
byt-nuc          total:168  pass:143  dwarn:0   dfail:0   fail:0   skip:25 
hsw-gt2          total:168  pass:157  dwarn:0   dfail:1   fail:0   skip:10 
ilk-hp8440p      total:168  pass:118  dwarn:0   dfail:0   fail:1   skip:49 
ivb-t430s        total:63   pass:55   dwarn:0   dfail:0   fail:1   skip:5  
skl-i5k-2        total:168  pass:151  dwarn:1   dfail:0   fail:0   skip:16 
snb-dellxps      total:168  pass:145  dwarn:0   dfail:1   fail:0   skip:22 
snb-x220t        total:168  pass:145  dwarn:0   dfail:0   fail:2   skip:21 

Results at /archive/results/CI_IGT_test/Patchwork_1466/

08fc1b101049694778bff7559e1d05250d2e7072 drm-intel-nightly: 2016y-02m-22d-17h-30m-27s UTC integration manifest
c99c6cfec524f93a828a8b366929054e9c94164d drm/i915: Check if we hold a wakeref during ioread32/iowrite32

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [RFC] drm/i915: Check if we hold a wakeref during ioread32/iowrite32
  2016-02-23 15:09   ` Imre Deak
@ 2016-02-23 16:54     ` Chris Wilson
  2016-02-26 15:42       ` Imre Deak
  0 siblings, 1 reply; 6+ messages in thread
From: Chris Wilson @ 2016-02-23 16:54 UTC (permalink / raw)
  To: Imre Deak; +Cc: intel-gfx

On Tue, Feb 23, 2016 at 05:09:29PM +0200, Imre Deak wrote:
> On ti, 2016-02-23 at 14:55 +0000, Chris Wilson wrote:
> > On Tue, Feb 23, 2016 at 04:47:17PM +0200, Imre Deak wrote:
> > > The device needs to be in D0 state whenever we call these
> > > functions, so
> > > add the corresponding assert checks.
> > 
> > No. In quite a few of those cases we are calling iowrite to non-
> > device
> > memory, even normal pages.
> 
> Hm right didn't think about that. I guess the only case we care about
> then are accesses through the GTT.
> 
> > How's the separation of struct_mutex from rpm going so that we can
> > forgo
> > adding assertions and use explicit power management instead?
> 
> It's planned to be done, but no one is working on that yet. This is
> something we could still have regardless, similarly to other helpers
> accessing the device.

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 31b600d31158..b8687b6a6acb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1430,24 +1430,6 @@ static int intel_runtime_suspend(struct device *device)
 
        DRM_DEBUG_KMS("Suspending device\n");
 
-       /*
-        * We could deadlock here in case another thread holding struct_mutex
-        * calls RPM suspend concurrently, since the RPM suspend will wait
-        * first for this RPM suspend to finish. In this case the concurrent
-        * RPM resume will be followed by its RPM suspend counterpart. Still
-        * for consistency return -EAGAIN, which will reschedule this suspend.
-        */
-       if (!mutex_trylock(&dev->struct_mutex)) {
-               DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
-               /*
-                * Bump the expiration timestamp, otherwise the suspend won't
-                * be rescheduled.
-                */
-               pm_runtime_mark_last_busy(device);
-
-               return -EAGAIN;
-       }
-
        disable_rpm_wakeref_asserts(dev_priv);
 
        /*
@@ -1455,7 +1437,6 @@ static int intel_runtime_suspend(struct device *device)
         * an RPM reference.
         */
        i915_gem_release_all_mmaps(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
 
        intel_guc_suspend(dev);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 79706621e6e4..4f6127466822 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1670,9 +1670,13 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        /* Serialisation between user GTT access and our code depends upon
         * revoking the CPU's PTE whilst the mutex is held. The next user
         * pagefault then has to wait until we release the mutex.
+        *
+        * Note that RPM complicates somewhat by adding an additional
+        * requirement that operations to the GGTT be made holding the RPM
+        * wakeref. This in turns allow us to release the mmap from within
+        * the RPM suspend code ignoring the struct_mutex serialisation in
+        * lieu of the RPM barriers.
         */
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
-
        if (!obj->fault_mappable)
                return;
 
@@ -1685,11 +1689,21 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        obj->fault_mappable = false;
 }
 
+static void assert_rpm_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       assert_rpm_wakelock_held(dev_priv);
+}
+
 void
 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
 {
        struct drm_i915_gem_object *obj;
 
+       /* This should only be called by RPM as we require the bound_list
+        * to be protected by the RPM barriers and not struct_mutex.
+        * We check that we are holding the wakeref whenever we manipulate
+        * the dev_priv->mm.bound_list (via assert_rpm_release_all_mmaps).
+        */
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
                i915_gem_release_mmap(obj);
 }
@@ -2224,9 +2238,11 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
         * so that we don't steal from recently used but inactive objects
         * (unless we are forced to ofc!)
         */
-       if (obj->bind_count)
+       if (obj->bind_count) {
+               assert_rpm_release_all_mmaps(request->i915);
                list_move_tail(&obj->global_list,
                               &request->i915->mm.bound_list);
+       }
 
        if (i915_gem_object_has_active_reference(obj)) {
                i915_gem_object_unset_active_reference(obj);
@@ -2751,9 +2767,11 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        /* Since the unbound list is global, only move to that list if
         * no more VMAs exist. */
-       if (--obj->bind_count == 0)
+       if (--obj->bind_count == 0) {
+               assert_rpm_release_all_mmaps(to_i915(obj->base.dev));
                list_move_tail(&obj->global_list,
                               &to_i915(obj->base.dev)->mm.unbound_list);
+       }
 
        /* And finally now the object is completely decoupled from this vma,
         * we can drop its hold on the backing storage and allow it to be
@@ -2913,6 +2931,7 @@ i915_vma_insert(struct i915_vma *vma,
                goto err_remove_node;
        }
 
+       assert_rpm_release_all_mmaps(dev_priv);
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
        obj->bind_count++;


-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [RFC] drm/i915: Check if we hold a wakeref during ioread32/iowrite32
  2016-02-23 16:54     ` Chris Wilson
@ 2016-02-26 15:42       ` Imre Deak
  0 siblings, 0 replies; 6+ messages in thread
From: Imre Deak @ 2016-02-26 15:42 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On ti, 2016-02-23 at 16:54 +0000, Chris Wilson wrote:
> On Tue, Feb 23, 2016 at 05:09:29PM +0200, Imre Deak wrote:
> > On ti, 2016-02-23 at 14:55 +0000, Chris Wilson wrote:
> > > On Tue, Feb 23, 2016 at 04:47:17PM +0200, Imre Deak wrote:
> > [...]
> > > How's the separation of struct_mutex from rpm going so that we
> > > can
> > > forgo
> > > adding assertions and use explicit power management instead?
> > 
> > It's planned to be done, but no one is working on that yet. This is
> > something we could still have regardless, similarly to other
> > helpers
> > accessing the device.
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.c
> b/drivers/gpu/drm/i915/i915_drv.c
> index 31b600d31158..b8687b6a6acb 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -1430,24 +1430,6 @@ static int intel_runtime_suspend(struct device
> *device)
>  
>         DRM_DEBUG_KMS("Suspending device\n");
>  
> -       /*
> -        * We could deadlock here in case another thread holding
> struct_mutex
> -        * calls RPM suspend concurrently, since the RPM suspend will
> wait
> -        * first for this RPM suspend to finish. In this case the
> concurrent
> -        * RPM resume will be followed by its RPM suspend
> counterpart. Still
> -        * for consistency return -EAGAIN, which will reschedule this
> suspend.
> -        */
> -       if (!mutex_trylock(&dev->struct_mutex)) {
> -               DRM_DEBUG_KMS("device lock contention, deffering
> suspend\n");
> -               /*
> -                * Bump the expiration timestamp, otherwise the
> suspend won't
> -                * be rescheduled.
> -                */
> -               pm_runtime_mark_last_busy(device);
> -
> -               return -EAGAIN;
> -       }
> -
>         disable_rpm_wakeref_asserts(dev_priv);
>  
>         /*
> @@ -1455,7 +1437,6 @@ static int intel_runtime_suspend(struct device
> *device)
>          * an RPM reference.
>          */
>         i915_gem_release_all_mmaps(dev_priv);
> -       mutex_unlock(&dev->struct_mutex);
>  
>         intel_guc_suspend(dev);
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem.c
> b/drivers/gpu/drm/i915/i915_gem.c
> index 79706621e6e4..4f6127466822 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1670,9 +1670,13 @@ i915_gem_release_mmap(struct
> drm_i915_gem_object *obj)
>         /* Serialisation between user GTT access and our code depends
> upon
>          * revoking the CPU's PTE whilst the mutex is held. The next
> user
>          * pagefault then has to wait until we release the mutex.
> +        *
> +        * Note that RPM complicates somewhat by adding an additional
> +        * requirement that operations to the GGTT be made holding
> the RPM
> +        * wakeref. This in turns allow us to release the mmap from
> within
> +        * the RPM suspend code ignoring the struct_mutex
> serialisation in
> +        * lieu of the RPM barriers.
>          */
> -       lockdep_assert_held(&obj->base.dev->struct_mutex);
> -
>         if (!obj->fault_mappable)
>                 return;
>  
> @@ -1685,11 +1689,21 @@ i915_gem_release_mmap(struct
> drm_i915_gem_object *obj)
>         obj->fault_mappable = false;
>  }
>  
> +static void assert_rpm_release_all_mmaps(struct drm_i915_private
> *dev_priv)
> +{
> +       assert_rpm_wakelock_held(dev_priv);
> +}
> +
>  void
>  i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
>  {
>         struct drm_i915_gem_object *obj;
>  
> +       /* This should only be called by RPM as we require the
> bound_list
> +        * to be protected by the RPM barriers and not struct_mutex.
> +        * We check that we are holding the wakeref whenever we
> manipulate
> +        * the dev_priv->mm.bound_list (via
> assert_rpm_release_all_mmaps).
> +        */
>         list_for_each_entry(obj, &dev_priv->mm.bound_list,
> global_list)
>                 i915_gem_release_mmap(obj);
>  }
> @@ -2224,9 +2238,11 @@ i915_gem_object_retire__read(struct
> i915_gem_active *active,
>          * so that we don't steal from recently used but inactive
> objects
>          * (unless we are forced to ofc!)
>          */
> -       if (obj->bind_count)
> +       if (obj->bind_count) {
> +               assert_rpm_release_all_mmaps(request->i915);
>                 list_move_tail(&obj->global_list,
>                                &request->i915->mm.bound_list);
> +       }
>  
>         if (i915_gem_object_has_active_reference(obj)) {
>                 i915_gem_object_unset_active_reference(obj);
> @@ -2751,9 +2767,11 @@ int i915_vma_unbind(struct i915_vma *vma)
>  
>         /* Since the unbound list is global, only move to that list
> if
>          * no more VMAs exist. */
> -       if (--obj->bind_count == 0)
> +       if (--obj->bind_count == 0) {
> +               assert_rpm_release_all_mmaps(to_i915(obj->base.dev));
>                 list_move_tail(&obj->global_list,
>                                &to_i915(obj->base.dev)-
> >mm.unbound_list);
> +       }
>  
>         /* And finally now the object is completely decoupled from
> this vma,
>          * we can drop its hold on the backing storage and allow it
> to be
> @@ -2913,6 +2931,7 @@ i915_vma_insert(struct i915_vma *vma,
>                 goto err_remove_node;
>         }
>  
> +       assert_rpm_release_all_mmaps(dev_priv);
>         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
>         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
>         obj->bind_count++;

Ok, so IIUC with the above we'd have the locking rule that to change
the mm.bound_list or obj->fault_mappable for an object on mm.bound_list
you need to hold both a wakeref _and_ struct_mutex. To iterate through
the mm.bound_list you only need to hold either a wakeref _or_
struct_mutex.

Based on these rules I don't see a problem with the above, but I would
also add the assert to i915_gem_object_bind_to_vm() and
i915_gem_shrink(). In fact I can't see that we take a wakeref on the
i915_gem_shrink() path.

Did you plan to send a complete patch?

--Imre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2016-02-26 15:43 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-23 14:47 [RFC] drm/i915: Check if we hold a wakeref during ioread32/iowrite32 Imre Deak
2016-02-23 14:55 ` Chris Wilson
2016-02-23 15:09   ` Imre Deak
2016-02-23 16:54     ` Chris Wilson
2016-02-26 15:42       ` Imre Deak
2016-02-23 15:50 ` ✗ Fi.CI.BAT: failure for " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.