All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/msm/dpu: Convert to a chained irq chip
@ 2019-01-03 19:06 ` Stephen Boyd
  0 siblings, 0 replies; 6+ messages in thread
From: Stephen Boyd @ 2019-01-03 19:06 UTC (permalink / raw)
  To: Rob Clark
  Cc: Rajesh Yadav, linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Jayant Shekhar,
	Jordan Crouse, Sean Paul, Jeykumar Sankaran,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-arm-kernel-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r

Devices that make up DPU, i.e. graphics card, request their interrupts
from this "virtual" interrupt chip. The interrupt chip builds upon a GIC
SPI interrupt that raises high when any of the interrupts in the DPU's
irq status register are triggered. From the kernel's perspective this is
a chained irq chip, so requesting a flow handler for the GIC SPI and
then calling generic IRQ handling code from that irq handler is not
completely proper. It's better to convert this to a chained irq so that
the GIC SPI irq doesn't appear in /proc/interrupts, can't have CPU
affinity changed, and won't be accounted for with irq stats. Doing this
also silences a recursive lockdep warning because we can specify a
different lock class for the chained interrupts, silencing a warning
that is easy to see with 'threadirqs' on the kernel commandline.

 WARNING: inconsistent lock state
 4.19.10 #76 Tainted: G        W
 --------------------------------
 inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage.
 irq/40-dpu_mdss/203 [HC0[0]:SC0[2]:HE1:SE0] takes:
 0000000053ea9021 (&irq_desc_lock_class){?.-.}, at: handle_level_irq+0x34/0x26c
 {IN-HARDIRQ-W} state was registered at:
   lock_acquire+0x244/0x360
   _raw_spin_lock+0x64/0xa0
   handle_fasteoi_irq+0x54/0x2ec
   generic_handle_irq+0x44/0x5c
   __handle_domain_irq+0x9c/0x11c
   gic_handle_irq+0x208/0x260
   el1_irq+0xb4/0x130
   arch_cpu_idle+0x178/0x3cc
   default_idle_call+0x3c/0x54
   do_idle+0x1a8/0x3dc
   cpu_startup_entry+0x24/0x28
   rest_init+0x240/0x270
   start_kernel+0x5a8/0x6bc
 irq event stamp: 18
 hardirqs last  enabled at (17): [<ffffff9042385e80>] _raw_spin_unlock_irq+0x40/0xc0
 hardirqs last disabled at (16): [<ffffff904237a1f4>] __schedule+0x20c/0x1bbc
 softirqs last  enabled at (0): [<ffffff9040f318d0>] copy_process+0xb50/0x3964
 softirqs last disabled at (18): [<ffffff9041036364>] local_bh_disable+0x8/0x20

 other info that might help us debug this:
  Possible unsafe locking scenario:

        CPU0
        ----
   lock(&irq_desc_lock_class);
   <Interrupt>
     lock(&irq_desc_lock_class);

  *** DEADLOCK ***

 no locks held by irq/40-dpu_mdss/203.

 stack backtrace:
 CPU: 0 PID: 203 Comm: irq/40-dpu_mdss Tainted: G        W         4.19.10 #76
 Call trace:
  dump_backtrace+0x0/0x2f8
  show_stack+0x20/0x2c
  __dump_stack+0x20/0x28
  dump_stack+0xcc/0x10c
  mark_lock+0xbe0/0xe24
  __lock_acquire+0x4cc/0x2708
  lock_acquire+0x244/0x360
  _raw_spin_lock+0x64/0xa0
  handle_level_irq+0x34/0x26c
  generic_handle_irq+0x44/0x5c
  dpu_mdss_irq+0x64/0xec
  irq_forced_thread_fn+0x58/0x9c
  irq_thread+0x120/0x1dc
  kthread+0x248/0x260
  ret_from_fork+0x10/0x18
 ------------[ cut here ]------------
 irq 169 handler irq_default_primary_handler+0x0/0x18 enabled interrupts

Cc: Sean Paul <seanpaul@chromium.org>
Cc: Jordan Crouse <jcrouse@codeaurora.org>
Cc: Jayant Shekhar <jshekhar@codeaurora.org>
Cc: Rajesh Yadav <ryadav@codeaurora.org>
Cc: Jeykumar Sankaran <jsanka@codeaurora.org>
Signed-off-by: Stephen Boyd <swboyd@chromium.org>
---

 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | 36 ++++++++++++++----------
 1 file changed, 21 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index cb307a2abf06..7316b4ab1b85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -23,11 +23,14 @@ struct dpu_mdss {
 	struct dpu_irq_controller irq_controller;
 };
 
-static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+static void dpu_mdss_irq(struct irq_desc *desc)
 {
-	struct dpu_mdss *dpu_mdss = arg;
+	struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	u32 interrupts;
 
+	chained_irq_enter(chip, desc);
+
 	interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
 
 	while (interrupts) {
@@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
 					   hwirq);
 		if (mapping == 0) {
 			DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
-			return IRQ_NONE;
+			break;
 		}
 
 		rc = generic_handle_irq(mapping);
 		if (rc < 0) {
 			DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
 				  hwirq, mapping, rc);
-			return IRQ_NONE;
+			break;
 		}
 
 		interrupts &= ~(1 << hwirq);
 	}
 
-	return IRQ_HANDLED;
+	chained_irq_exit(chip, desc);
 }
 
 static void dpu_mdss_irq_mask(struct irq_data *irqd)
@@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
 	.irq_unmask = dpu_mdss_irq_unmask,
 };
 
+static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
+
 static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
 		unsigned int irq, irq_hw_number_t hwirq)
 {
 	struct dpu_mdss *dpu_mdss = domain->host_data;
-	int ret;
 
+	irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
 	irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
-	ret = irq_set_chip_data(irq, dpu_mdss);
-
-	return ret;
+	return irq_set_chip_data(irq, dpu_mdss);
 }
 
 static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
@@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
 	struct msm_drm_private *priv = dev->dev_private;
 	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
 	struct dss_module_power *mp = &dpu_mdss->mp;
+	int irq;
 
 	pm_runtime_suspend(dev->dev);
 	pm_runtime_disable(dev->dev);
 	_dpu_mdss_irq_domain_fini(dpu_mdss);
-	free_irq(platform_get_irq(pdev, 0), dpu_mdss);
+	irq = platform_get_irq(pdev, 0);
+	irq_set_chained_handler_and_data(irq, NULL, NULL);
 	msm_dss_put_clk(mp->clk_config, mp->num_clk);
 	devm_kfree(&pdev->dev, mp->clk_config);
 
@@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
 	struct dpu_mdss *dpu_mdss;
 	struct dss_module_power *mp;
 	int ret = 0;
+	int irq;
 
 	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
 	if (!dpu_mdss)
@@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
 	if (ret)
 		goto irq_domain_error;
 
-	ret = request_irq(platform_get_irq(pdev, 0),
-			dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
-	if (ret) {
-		DPU_ERROR("failed to init irq: %d\n", ret);
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
 		goto irq_error;
-	}
+
+	irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
+					 dpu_mdss);
 
 	pm_runtime_enable(dev->dev);
 
-- 
Sent by a computer through tubes

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH] drm/msm/dpu: Convert to a chained irq chip
@ 2019-01-03 19:06 ` Stephen Boyd
  0 siblings, 0 replies; 6+ messages in thread
From: Stephen Boyd @ 2019-01-03 19:06 UTC (permalink / raw)
  To: Rob Clark
  Cc: linux-kernel, linux-arm-msm, linux-arm-kernel, dri-devel,
	freedreno, Sean Paul, Jordan Crouse, Jayant Shekhar,
	Rajesh Yadav, Jeykumar Sankaran

Devices that make up DPU, i.e. graphics card, request their interrupts
from this "virtual" interrupt chip. The interrupt chip builds upon a GIC
SPI interrupt that raises high when any of the interrupts in the DPU's
irq status register are triggered. From the kernel's perspective this is
a chained irq chip, so requesting a flow handler for the GIC SPI and
then calling generic IRQ handling code from that irq handler is not
completely proper. It's better to convert this to a chained irq so that
the GIC SPI irq doesn't appear in /proc/interrupts, can't have CPU
affinity changed, and won't be accounted for with irq stats. Doing this
also silences a recursive lockdep warning because we can specify a
different lock class for the chained interrupts, silencing a warning
that is easy to see with 'threadirqs' on the kernel commandline.

 WARNING: inconsistent lock state
 4.19.10 #76 Tainted: G        W
 --------------------------------
 inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage.
 irq/40-dpu_mdss/203 [HC0[0]:SC0[2]:HE1:SE0] takes:
 0000000053ea9021 (&irq_desc_lock_class){?.-.}, at: handle_level_irq+0x34/0x26c
 {IN-HARDIRQ-W} state was registered at:
   lock_acquire+0x244/0x360
   _raw_spin_lock+0x64/0xa0
   handle_fasteoi_irq+0x54/0x2ec
   generic_handle_irq+0x44/0x5c
   __handle_domain_irq+0x9c/0x11c
   gic_handle_irq+0x208/0x260
   el1_irq+0xb4/0x130
   arch_cpu_idle+0x178/0x3cc
   default_idle_call+0x3c/0x54
   do_idle+0x1a8/0x3dc
   cpu_startup_entry+0x24/0x28
   rest_init+0x240/0x270
   start_kernel+0x5a8/0x6bc
 irq event stamp: 18
 hardirqs last  enabled at (17): [<ffffff9042385e80>] _raw_spin_unlock_irq+0x40/0xc0
 hardirqs last disabled at (16): [<ffffff904237a1f4>] __schedule+0x20c/0x1bbc
 softirqs last  enabled at (0): [<ffffff9040f318d0>] copy_process+0xb50/0x3964
 softirqs last disabled at (18): [<ffffff9041036364>] local_bh_disable+0x8/0x20

 other info that might help us debug this:
  Possible unsafe locking scenario:

        CPU0
        ----
   lock(&irq_desc_lock_class);
   <Interrupt>
     lock(&irq_desc_lock_class);

  *** DEADLOCK ***

 no locks held by irq/40-dpu_mdss/203.

 stack backtrace:
 CPU: 0 PID: 203 Comm: irq/40-dpu_mdss Tainted: G        W         4.19.10 #76
 Call trace:
  dump_backtrace+0x0/0x2f8
  show_stack+0x20/0x2c
  __dump_stack+0x20/0x28
  dump_stack+0xcc/0x10c
  mark_lock+0xbe0/0xe24
  __lock_acquire+0x4cc/0x2708
  lock_acquire+0x244/0x360
  _raw_spin_lock+0x64/0xa0
  handle_level_irq+0x34/0x26c
  generic_handle_irq+0x44/0x5c
  dpu_mdss_irq+0x64/0xec
  irq_forced_thread_fn+0x58/0x9c
  irq_thread+0x120/0x1dc
  kthread+0x248/0x260
  ret_from_fork+0x10/0x18
 ------------[ cut here ]------------
 irq 169 handler irq_default_primary_handler+0x0/0x18 enabled interrupts

Cc: Sean Paul <seanpaul@chromium.org>
Cc: Jordan Crouse <jcrouse@codeaurora.org>
Cc: Jayant Shekhar <jshekhar@codeaurora.org>
Cc: Rajesh Yadav <ryadav@codeaurora.org>
Cc: Jeykumar Sankaran <jsanka@codeaurora.org>
Signed-off-by: Stephen Boyd <swboyd@chromium.org>
---

 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | 36 ++++++++++++++----------
 1 file changed, 21 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index cb307a2abf06..7316b4ab1b85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -23,11 +23,14 @@ struct dpu_mdss {
 	struct dpu_irq_controller irq_controller;
 };
 
-static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+static void dpu_mdss_irq(struct irq_desc *desc)
 {
-	struct dpu_mdss *dpu_mdss = arg;
+	struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	u32 interrupts;
 
+	chained_irq_enter(chip, desc);
+
 	interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
 
 	while (interrupts) {
@@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
 					   hwirq);
 		if (mapping == 0) {
 			DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
-			return IRQ_NONE;
+			break;
 		}
 
 		rc = generic_handle_irq(mapping);
 		if (rc < 0) {
 			DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
 				  hwirq, mapping, rc);
-			return IRQ_NONE;
+			break;
 		}
 
 		interrupts &= ~(1 << hwirq);
 	}
 
-	return IRQ_HANDLED;
+	chained_irq_exit(chip, desc);
 }
 
 static void dpu_mdss_irq_mask(struct irq_data *irqd)
@@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
 	.irq_unmask = dpu_mdss_irq_unmask,
 };
 
+static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
+
 static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
 		unsigned int irq, irq_hw_number_t hwirq)
 {
 	struct dpu_mdss *dpu_mdss = domain->host_data;
-	int ret;
 
+	irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
 	irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
-	ret = irq_set_chip_data(irq, dpu_mdss);
-
-	return ret;
+	return irq_set_chip_data(irq, dpu_mdss);
 }
 
 static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
@@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
 	struct msm_drm_private *priv = dev->dev_private;
 	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
 	struct dss_module_power *mp = &dpu_mdss->mp;
+	int irq;
 
 	pm_runtime_suspend(dev->dev);
 	pm_runtime_disable(dev->dev);
 	_dpu_mdss_irq_domain_fini(dpu_mdss);
-	free_irq(platform_get_irq(pdev, 0), dpu_mdss);
+	irq = platform_get_irq(pdev, 0);
+	irq_set_chained_handler_and_data(irq, NULL, NULL);
 	msm_dss_put_clk(mp->clk_config, mp->num_clk);
 	devm_kfree(&pdev->dev, mp->clk_config);
 
@@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
 	struct dpu_mdss *dpu_mdss;
 	struct dss_module_power *mp;
 	int ret = 0;
+	int irq;
 
 	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
 	if (!dpu_mdss)
@@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
 	if (ret)
 		goto irq_domain_error;
 
-	ret = request_irq(platform_get_irq(pdev, 0),
-			dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
-	if (ret) {
-		DPU_ERROR("failed to init irq: %d\n", ret);
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
 		goto irq_error;
-	}
+
+	irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
+					 dpu_mdss);
 
 	pm_runtime_enable(dev->dev);
 
-- 
Sent by a computer through tubes


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH] drm/msm/dpu: Convert to a chained irq chip
@ 2019-01-03 19:06 ` Stephen Boyd
  0 siblings, 0 replies; 6+ messages in thread
From: Stephen Boyd @ 2019-01-03 19:06 UTC (permalink / raw)
  To: Rob Clark
  Cc: Rajesh Yadav, linux-arm-msm, linux-kernel, dri-devel,
	Jayant Shekhar, Jordan Crouse, Sean Paul, Jeykumar Sankaran,
	freedreno, linux-arm-kernel

Devices that make up DPU, i.e. graphics card, request their interrupts
from this "virtual" interrupt chip. The interrupt chip builds upon a GIC
SPI interrupt that raises high when any of the interrupts in the DPU's
irq status register are triggered. From the kernel's perspective this is
a chained irq chip, so requesting a flow handler for the GIC SPI and
then calling generic IRQ handling code from that irq handler is not
completely proper. It's better to convert this to a chained irq so that
the GIC SPI irq doesn't appear in /proc/interrupts, can't have CPU
affinity changed, and won't be accounted for with irq stats. Doing this
also silences a recursive lockdep warning because we can specify a
different lock class for the chained interrupts, silencing a warning
that is easy to see with 'threadirqs' on the kernel commandline.

 WARNING: inconsistent lock state
 4.19.10 #76 Tainted: G        W
 --------------------------------
 inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage.
 irq/40-dpu_mdss/203 [HC0[0]:SC0[2]:HE1:SE0] takes:
 0000000053ea9021 (&irq_desc_lock_class){?.-.}, at: handle_level_irq+0x34/0x26c
 {IN-HARDIRQ-W} state was registered at:
   lock_acquire+0x244/0x360
   _raw_spin_lock+0x64/0xa0
   handle_fasteoi_irq+0x54/0x2ec
   generic_handle_irq+0x44/0x5c
   __handle_domain_irq+0x9c/0x11c
   gic_handle_irq+0x208/0x260
   el1_irq+0xb4/0x130
   arch_cpu_idle+0x178/0x3cc
   default_idle_call+0x3c/0x54
   do_idle+0x1a8/0x3dc
   cpu_startup_entry+0x24/0x28
   rest_init+0x240/0x270
   start_kernel+0x5a8/0x6bc
 irq event stamp: 18
 hardirqs last  enabled at (17): [<ffffff9042385e80>] _raw_spin_unlock_irq+0x40/0xc0
 hardirqs last disabled at (16): [<ffffff904237a1f4>] __schedule+0x20c/0x1bbc
 softirqs last  enabled at (0): [<ffffff9040f318d0>] copy_process+0xb50/0x3964
 softirqs last disabled at (18): [<ffffff9041036364>] local_bh_disable+0x8/0x20

 other info that might help us debug this:
  Possible unsafe locking scenario:

        CPU0
        ----
   lock(&irq_desc_lock_class);
   <Interrupt>
     lock(&irq_desc_lock_class);

  *** DEADLOCK ***

 no locks held by irq/40-dpu_mdss/203.

 stack backtrace:
 CPU: 0 PID: 203 Comm: irq/40-dpu_mdss Tainted: G        W         4.19.10 #76
 Call trace:
  dump_backtrace+0x0/0x2f8
  show_stack+0x20/0x2c
  __dump_stack+0x20/0x28
  dump_stack+0xcc/0x10c
  mark_lock+0xbe0/0xe24
  __lock_acquire+0x4cc/0x2708
  lock_acquire+0x244/0x360
  _raw_spin_lock+0x64/0xa0
  handle_level_irq+0x34/0x26c
  generic_handle_irq+0x44/0x5c
  dpu_mdss_irq+0x64/0xec
  irq_forced_thread_fn+0x58/0x9c
  irq_thread+0x120/0x1dc
  kthread+0x248/0x260
  ret_from_fork+0x10/0x18
 ------------[ cut here ]------------
 irq 169 handler irq_default_primary_handler+0x0/0x18 enabled interrupts

Cc: Sean Paul <seanpaul@chromium.org>
Cc: Jordan Crouse <jcrouse@codeaurora.org>
Cc: Jayant Shekhar <jshekhar@codeaurora.org>
Cc: Rajesh Yadav <ryadav@codeaurora.org>
Cc: Jeykumar Sankaran <jsanka@codeaurora.org>
Signed-off-by: Stephen Boyd <swboyd@chromium.org>
---

 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | 36 ++++++++++++++----------
 1 file changed, 21 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index cb307a2abf06..7316b4ab1b85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -23,11 +23,14 @@ struct dpu_mdss {
 	struct dpu_irq_controller irq_controller;
 };
 
-static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+static void dpu_mdss_irq(struct irq_desc *desc)
 {
-	struct dpu_mdss *dpu_mdss = arg;
+	struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	u32 interrupts;
 
+	chained_irq_enter(chip, desc);
+
 	interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
 
 	while (interrupts) {
@@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
 					   hwirq);
 		if (mapping == 0) {
 			DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
-			return IRQ_NONE;
+			break;
 		}
 
 		rc = generic_handle_irq(mapping);
 		if (rc < 0) {
 			DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
 				  hwirq, mapping, rc);
-			return IRQ_NONE;
+			break;
 		}
 
 		interrupts &= ~(1 << hwirq);
 	}
 
-	return IRQ_HANDLED;
+	chained_irq_exit(chip, desc);
 }
 
 static void dpu_mdss_irq_mask(struct irq_data *irqd)
@@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
 	.irq_unmask = dpu_mdss_irq_unmask,
 };
 
+static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
+
 static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
 		unsigned int irq, irq_hw_number_t hwirq)
 {
 	struct dpu_mdss *dpu_mdss = domain->host_data;
-	int ret;
 
+	irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
 	irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
-	ret = irq_set_chip_data(irq, dpu_mdss);
-
-	return ret;
+	return irq_set_chip_data(irq, dpu_mdss);
 }
 
 static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
@@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
 	struct msm_drm_private *priv = dev->dev_private;
 	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
 	struct dss_module_power *mp = &dpu_mdss->mp;
+	int irq;
 
 	pm_runtime_suspend(dev->dev);
 	pm_runtime_disable(dev->dev);
 	_dpu_mdss_irq_domain_fini(dpu_mdss);
-	free_irq(platform_get_irq(pdev, 0), dpu_mdss);
+	irq = platform_get_irq(pdev, 0);
+	irq_set_chained_handler_and_data(irq, NULL, NULL);
 	msm_dss_put_clk(mp->clk_config, mp->num_clk);
 	devm_kfree(&pdev->dev, mp->clk_config);
 
@@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
 	struct dpu_mdss *dpu_mdss;
 	struct dss_module_power *mp;
 	int ret = 0;
+	int irq;
 
 	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
 	if (!dpu_mdss)
@@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
 	if (ret)
 		goto irq_domain_error;
 
-	ret = request_irq(platform_get_irq(pdev, 0),
-			dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
-	if (ret) {
-		DPU_ERROR("failed to init irq: %d\n", ret);
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
 		goto irq_error;
-	}
+
+	irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
+					 dpu_mdss);
 
 	pm_runtime_enable(dev->dev);
 
-- 
Sent by a computer through tubes


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] drm/msm/dpu: Convert to a chained irq chip
  2019-01-03 19:06 ` Stephen Boyd
  (?)
@ 2019-01-22 19:00   ` Sean Paul
  -1 siblings, 0 replies; 6+ messages in thread
From: Sean Paul @ 2019-01-22 19:00 UTC (permalink / raw)
  To: Stephen Boyd
  Cc: Rajesh Yadav, linux-arm-msm, linux-kernel, dri-devel,
	Jayant Shekhar, Sean Paul, freedreno, linux-arm-kernel

On Thu, Jan 03, 2019 at 11:06:02AM -0800, Stephen Boyd wrote:
> Devices that make up DPU, i.e. graphics card, request their interrupts
> from this "virtual" interrupt chip. The interrupt chip builds upon a GIC
> SPI interrupt that raises high when any of the interrupts in the DPU's
> irq status register are triggered. From the kernel's perspective this is
> a chained irq chip, so requesting a flow handler for the GIC SPI and
> then calling generic IRQ handling code from that irq handler is not
> completely proper. It's better to convert this to a chained irq so that
> the GIC SPI irq doesn't appear in /proc/interrupts, can't have CPU
> affinity changed, and won't be accounted for with irq stats. Doing this
> also silences a recursive lockdep warning because we can specify a
> different lock class for the chained interrupts, silencing a warning
> that is easy to see with 'threadirqs' on the kernel commandline.
> 
>  WARNING: inconsistent lock state
>  4.19.10 #76 Tainted: G        W
>  --------------------------------
>  inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage.
>  irq/40-dpu_mdss/203 [HC0[0]:SC0[2]:HE1:SE0] takes:
>  0000000053ea9021 (&irq_desc_lock_class){?.-.}, at: handle_level_irq+0x34/0x26c
>  {IN-HARDIRQ-W} state was registered at:
>    lock_acquire+0x244/0x360
>    _raw_spin_lock+0x64/0xa0
>    handle_fasteoi_irq+0x54/0x2ec
>    generic_handle_irq+0x44/0x5c
>    __handle_domain_irq+0x9c/0x11c
>    gic_handle_irq+0x208/0x260
>    el1_irq+0xb4/0x130
>    arch_cpu_idle+0x178/0x3cc
>    default_idle_call+0x3c/0x54
>    do_idle+0x1a8/0x3dc
>    cpu_startup_entry+0x24/0x28
>    rest_init+0x240/0x270
>    start_kernel+0x5a8/0x6bc
>  irq event stamp: 18
>  hardirqs last  enabled at (17): [<ffffff9042385e80>] _raw_spin_unlock_irq+0x40/0xc0
>  hardirqs last disabled at (16): [<ffffff904237a1f4>] __schedule+0x20c/0x1bbc
>  softirqs last  enabled at (0): [<ffffff9040f318d0>] copy_process+0xb50/0x3964
>  softirqs last disabled at (18): [<ffffff9041036364>] local_bh_disable+0x8/0x20
> 
>  other info that might help us debug this:
>   Possible unsafe locking scenario:
> 
>         CPU0
>         ----
>    lock(&irq_desc_lock_class);
>    <Interrupt>
>      lock(&irq_desc_lock_class);
> 
>   *** DEADLOCK ***
> 
>  no locks held by irq/40-dpu_mdss/203.
> 
>  stack backtrace:
>  CPU: 0 PID: 203 Comm: irq/40-dpu_mdss Tainted: G        W         4.19.10 #76
>  Call trace:
>   dump_backtrace+0x0/0x2f8
>   show_stack+0x20/0x2c
>   __dump_stack+0x20/0x28
>   dump_stack+0xcc/0x10c
>   mark_lock+0xbe0/0xe24
>   __lock_acquire+0x4cc/0x2708
>   lock_acquire+0x244/0x360
>   _raw_spin_lock+0x64/0xa0
>   handle_level_irq+0x34/0x26c
>   generic_handle_irq+0x44/0x5c
>   dpu_mdss_irq+0x64/0xec
>   irq_forced_thread_fn+0x58/0x9c
>   irq_thread+0x120/0x1dc
>   kthread+0x248/0x260
>   ret_from_fork+0x10/0x18
>  ------------[ cut here ]------------
>  irq 169 handler irq_default_primary_handler+0x0/0x18 enabled interrupts
> 
> Cc: Sean Paul <seanpaul@chromium.org>
> Cc: Jordan Crouse <jcrouse@codeaurora.org>
> Cc: Jayant Shekhar <jshekhar@codeaurora.org>
> Cc: Rajesh Yadav <ryadav@codeaurora.org>
> Cc: Jeykumar Sankaran <jsanka@codeaurora.org>
> Signed-off-by: Stephen Boyd <swboyd@chromium.org>

LGTM, applied to dpu-staging.

Thanks,

Sean

> ---
> 
>  drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | 36 ++++++++++++++----------
>  1 file changed, 21 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> index cb307a2abf06..7316b4ab1b85 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> @@ -23,11 +23,14 @@ struct dpu_mdss {
>  	struct dpu_irq_controller irq_controller;
>  };
>  
> -static irqreturn_t dpu_mdss_irq(int irq, void *arg)
> +static void dpu_mdss_irq(struct irq_desc *desc)
>  {
> -	struct dpu_mdss *dpu_mdss = arg;
> +	struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
> +	struct irq_chip *chip = irq_desc_get_chip(desc);
>  	u32 interrupts;
>  
> +	chained_irq_enter(chip, desc);
> +
>  	interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
>  
>  	while (interrupts) {
> @@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
>  					   hwirq);
>  		if (mapping == 0) {
>  			DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
> -			return IRQ_NONE;
> +			break;
>  		}
>  
>  		rc = generic_handle_irq(mapping);
>  		if (rc < 0) {
>  			DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
>  				  hwirq, mapping, rc);
> -			return IRQ_NONE;
> +			break;
>  		}
>  
>  		interrupts &= ~(1 << hwirq);
>  	}
>  
> -	return IRQ_HANDLED;
> +	chained_irq_exit(chip, desc);
>  }
>  
>  static void dpu_mdss_irq_mask(struct irq_data *irqd)
> @@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
>  	.irq_unmask = dpu_mdss_irq_unmask,
>  };
>  
> +static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
> +
>  static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
>  		unsigned int irq, irq_hw_number_t hwirq)
>  {
>  	struct dpu_mdss *dpu_mdss = domain->host_data;
> -	int ret;
>  
> +	irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
>  	irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
> -	ret = irq_set_chip_data(irq, dpu_mdss);
> -
> -	return ret;
> +	return irq_set_chip_data(irq, dpu_mdss);
>  }
>  
>  static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
> @@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
>  	struct msm_drm_private *priv = dev->dev_private;
>  	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
>  	struct dss_module_power *mp = &dpu_mdss->mp;
> +	int irq;
>  
>  	pm_runtime_suspend(dev->dev);
>  	pm_runtime_disable(dev->dev);
>  	_dpu_mdss_irq_domain_fini(dpu_mdss);
> -	free_irq(platform_get_irq(pdev, 0), dpu_mdss);
> +	irq = platform_get_irq(pdev, 0);
> +	irq_set_chained_handler_and_data(irq, NULL, NULL);
>  	msm_dss_put_clk(mp->clk_config, mp->num_clk);
>  	devm_kfree(&pdev->dev, mp->clk_config);
>  
> @@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
>  	struct dpu_mdss *dpu_mdss;
>  	struct dss_module_power *mp;
>  	int ret = 0;
> +	int irq;
>  
>  	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
>  	if (!dpu_mdss)
> @@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
>  	if (ret)
>  		goto irq_domain_error;
>  
> -	ret = request_irq(platform_get_irq(pdev, 0),
> -			dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
> -	if (ret) {
> -		DPU_ERROR("failed to init irq: %d\n", ret);
> +	irq = platform_get_irq(pdev, 0);
> +	if (irq < 0)
>  		goto irq_error;
> -	}
> +
> +	irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
> +					 dpu_mdss);
>  
>  	pm_runtime_enable(dev->dev);
>  
> -- 
> Sent by a computer through tubes
> 

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] drm/msm/dpu: Convert to a chained irq chip
@ 2019-01-22 19:00   ` Sean Paul
  0 siblings, 0 replies; 6+ messages in thread
From: Sean Paul @ 2019-01-22 19:00 UTC (permalink / raw)
  To: Stephen Boyd
  Cc: Rob Clark, linux-kernel, linux-arm-msm, linux-arm-kernel,
	dri-devel, freedreno, Sean Paul, Jordan Crouse, Jayant Shekhar,
	Rajesh Yadav, Jeykumar Sankaran

On Thu, Jan 03, 2019 at 11:06:02AM -0800, Stephen Boyd wrote:
> Devices that make up DPU, i.e. graphics card, request their interrupts
> from this "virtual" interrupt chip. The interrupt chip builds upon a GIC
> SPI interrupt that raises high when any of the interrupts in the DPU's
> irq status register are triggered. From the kernel's perspective this is
> a chained irq chip, so requesting a flow handler for the GIC SPI and
> then calling generic IRQ handling code from that irq handler is not
> completely proper. It's better to convert this to a chained irq so that
> the GIC SPI irq doesn't appear in /proc/interrupts, can't have CPU
> affinity changed, and won't be accounted for with irq stats. Doing this
> also silences a recursive lockdep warning because we can specify a
> different lock class for the chained interrupts, silencing a warning
> that is easy to see with 'threadirqs' on the kernel commandline.
> 
>  WARNING: inconsistent lock state
>  4.19.10 #76 Tainted: G        W
>  --------------------------------
>  inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage.
>  irq/40-dpu_mdss/203 [HC0[0]:SC0[2]:HE1:SE0] takes:
>  0000000053ea9021 (&irq_desc_lock_class){?.-.}, at: handle_level_irq+0x34/0x26c
>  {IN-HARDIRQ-W} state was registered at:
>    lock_acquire+0x244/0x360
>    _raw_spin_lock+0x64/0xa0
>    handle_fasteoi_irq+0x54/0x2ec
>    generic_handle_irq+0x44/0x5c
>    __handle_domain_irq+0x9c/0x11c
>    gic_handle_irq+0x208/0x260
>    el1_irq+0xb4/0x130
>    arch_cpu_idle+0x178/0x3cc
>    default_idle_call+0x3c/0x54
>    do_idle+0x1a8/0x3dc
>    cpu_startup_entry+0x24/0x28
>    rest_init+0x240/0x270
>    start_kernel+0x5a8/0x6bc
>  irq event stamp: 18
>  hardirqs last  enabled at (17): [<ffffff9042385e80>] _raw_spin_unlock_irq+0x40/0xc0
>  hardirqs last disabled at (16): [<ffffff904237a1f4>] __schedule+0x20c/0x1bbc
>  softirqs last  enabled at (0): [<ffffff9040f318d0>] copy_process+0xb50/0x3964
>  softirqs last disabled at (18): [<ffffff9041036364>] local_bh_disable+0x8/0x20
> 
>  other info that might help us debug this:
>   Possible unsafe locking scenario:
> 
>         CPU0
>         ----
>    lock(&irq_desc_lock_class);
>    <Interrupt>
>      lock(&irq_desc_lock_class);
> 
>   *** DEADLOCK ***
> 
>  no locks held by irq/40-dpu_mdss/203.
> 
>  stack backtrace:
>  CPU: 0 PID: 203 Comm: irq/40-dpu_mdss Tainted: G        W         4.19.10 #76
>  Call trace:
>   dump_backtrace+0x0/0x2f8
>   show_stack+0x20/0x2c
>   __dump_stack+0x20/0x28
>   dump_stack+0xcc/0x10c
>   mark_lock+0xbe0/0xe24
>   __lock_acquire+0x4cc/0x2708
>   lock_acquire+0x244/0x360
>   _raw_spin_lock+0x64/0xa0
>   handle_level_irq+0x34/0x26c
>   generic_handle_irq+0x44/0x5c
>   dpu_mdss_irq+0x64/0xec
>   irq_forced_thread_fn+0x58/0x9c
>   irq_thread+0x120/0x1dc
>   kthread+0x248/0x260
>   ret_from_fork+0x10/0x18
>  ------------[ cut here ]------------
>  irq 169 handler irq_default_primary_handler+0x0/0x18 enabled interrupts
> 
> Cc: Sean Paul <seanpaul@chromium.org>
> Cc: Jordan Crouse <jcrouse@codeaurora.org>
> Cc: Jayant Shekhar <jshekhar@codeaurora.org>
> Cc: Rajesh Yadav <ryadav@codeaurora.org>
> Cc: Jeykumar Sankaran <jsanka@codeaurora.org>
> Signed-off-by: Stephen Boyd <swboyd@chromium.org>

LGTM, applied to dpu-staging.

Thanks,

Sean

> ---
> 
>  drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | 36 ++++++++++++++----------
>  1 file changed, 21 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> index cb307a2abf06..7316b4ab1b85 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> @@ -23,11 +23,14 @@ struct dpu_mdss {
>  	struct dpu_irq_controller irq_controller;
>  };
>  
> -static irqreturn_t dpu_mdss_irq(int irq, void *arg)
> +static void dpu_mdss_irq(struct irq_desc *desc)
>  {
> -	struct dpu_mdss *dpu_mdss = arg;
> +	struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
> +	struct irq_chip *chip = irq_desc_get_chip(desc);
>  	u32 interrupts;
>  
> +	chained_irq_enter(chip, desc);
> +
>  	interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
>  
>  	while (interrupts) {
> @@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
>  					   hwirq);
>  		if (mapping == 0) {
>  			DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
> -			return IRQ_NONE;
> +			break;
>  		}
>  
>  		rc = generic_handle_irq(mapping);
>  		if (rc < 0) {
>  			DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
>  				  hwirq, mapping, rc);
> -			return IRQ_NONE;
> +			break;
>  		}
>  
>  		interrupts &= ~(1 << hwirq);
>  	}
>  
> -	return IRQ_HANDLED;
> +	chained_irq_exit(chip, desc);
>  }
>  
>  static void dpu_mdss_irq_mask(struct irq_data *irqd)
> @@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
>  	.irq_unmask = dpu_mdss_irq_unmask,
>  };
>  
> +static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
> +
>  static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
>  		unsigned int irq, irq_hw_number_t hwirq)
>  {
>  	struct dpu_mdss *dpu_mdss = domain->host_data;
> -	int ret;
>  
> +	irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
>  	irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
> -	ret = irq_set_chip_data(irq, dpu_mdss);
> -
> -	return ret;
> +	return irq_set_chip_data(irq, dpu_mdss);
>  }
>  
>  static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
> @@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
>  	struct msm_drm_private *priv = dev->dev_private;
>  	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
>  	struct dss_module_power *mp = &dpu_mdss->mp;
> +	int irq;
>  
>  	pm_runtime_suspend(dev->dev);
>  	pm_runtime_disable(dev->dev);
>  	_dpu_mdss_irq_domain_fini(dpu_mdss);
> -	free_irq(platform_get_irq(pdev, 0), dpu_mdss);
> +	irq = platform_get_irq(pdev, 0);
> +	irq_set_chained_handler_and_data(irq, NULL, NULL);
>  	msm_dss_put_clk(mp->clk_config, mp->num_clk);
>  	devm_kfree(&pdev->dev, mp->clk_config);
>  
> @@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
>  	struct dpu_mdss *dpu_mdss;
>  	struct dss_module_power *mp;
>  	int ret = 0;
> +	int irq;
>  
>  	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
>  	if (!dpu_mdss)
> @@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
>  	if (ret)
>  		goto irq_domain_error;
>  
> -	ret = request_irq(platform_get_irq(pdev, 0),
> -			dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
> -	if (ret) {
> -		DPU_ERROR("failed to init irq: %d\n", ret);
> +	irq = platform_get_irq(pdev, 0);
> +	if (irq < 0)
>  		goto irq_error;
> -	}
> +
> +	irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
> +					 dpu_mdss);
>  
>  	pm_runtime_enable(dev->dev);
>  
> -- 
> Sent by a computer through tubes
> 

-- 
Sean Paul, Software Engineer, Google / Chromium OS

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] drm/msm/dpu: Convert to a chained irq chip
@ 2019-01-22 19:00   ` Sean Paul
  0 siblings, 0 replies; 6+ messages in thread
From: Sean Paul @ 2019-01-22 19:00 UTC (permalink / raw)
  To: Stephen Boyd
  Cc: Jordan Crouse, Rajesh Yadav, linux-arm-msm, linux-kernel,
	dri-devel, Jayant Shekhar, Rob Clark, Sean Paul,
	Jeykumar Sankaran, freedreno, linux-arm-kernel

On Thu, Jan 03, 2019 at 11:06:02AM -0800, Stephen Boyd wrote:
> Devices that make up DPU, i.e. graphics card, request their interrupts
> from this "virtual" interrupt chip. The interrupt chip builds upon a GIC
> SPI interrupt that raises high when any of the interrupts in the DPU's
> irq status register are triggered. From the kernel's perspective this is
> a chained irq chip, so requesting a flow handler for the GIC SPI and
> then calling generic IRQ handling code from that irq handler is not
> completely proper. It's better to convert this to a chained irq so that
> the GIC SPI irq doesn't appear in /proc/interrupts, can't have CPU
> affinity changed, and won't be accounted for with irq stats. Doing this
> also silences a recursive lockdep warning because we can specify a
> different lock class for the chained interrupts, silencing a warning
> that is easy to see with 'threadirqs' on the kernel commandline.
> 
>  WARNING: inconsistent lock state
>  4.19.10 #76 Tainted: G        W
>  --------------------------------
>  inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage.
>  irq/40-dpu_mdss/203 [HC0[0]:SC0[2]:HE1:SE0] takes:
>  0000000053ea9021 (&irq_desc_lock_class){?.-.}, at: handle_level_irq+0x34/0x26c
>  {IN-HARDIRQ-W} state was registered at:
>    lock_acquire+0x244/0x360
>    _raw_spin_lock+0x64/0xa0
>    handle_fasteoi_irq+0x54/0x2ec
>    generic_handle_irq+0x44/0x5c
>    __handle_domain_irq+0x9c/0x11c
>    gic_handle_irq+0x208/0x260
>    el1_irq+0xb4/0x130
>    arch_cpu_idle+0x178/0x3cc
>    default_idle_call+0x3c/0x54
>    do_idle+0x1a8/0x3dc
>    cpu_startup_entry+0x24/0x28
>    rest_init+0x240/0x270
>    start_kernel+0x5a8/0x6bc
>  irq event stamp: 18
>  hardirqs last  enabled at (17): [<ffffff9042385e80>] _raw_spin_unlock_irq+0x40/0xc0
>  hardirqs last disabled at (16): [<ffffff904237a1f4>] __schedule+0x20c/0x1bbc
>  softirqs last  enabled at (0): [<ffffff9040f318d0>] copy_process+0xb50/0x3964
>  softirqs last disabled at (18): [<ffffff9041036364>] local_bh_disable+0x8/0x20
> 
>  other info that might help us debug this:
>   Possible unsafe locking scenario:
> 
>         CPU0
>         ----
>    lock(&irq_desc_lock_class);
>    <Interrupt>
>      lock(&irq_desc_lock_class);
> 
>   *** DEADLOCK ***
> 
>  no locks held by irq/40-dpu_mdss/203.
> 
>  stack backtrace:
>  CPU: 0 PID: 203 Comm: irq/40-dpu_mdss Tainted: G        W         4.19.10 #76
>  Call trace:
>   dump_backtrace+0x0/0x2f8
>   show_stack+0x20/0x2c
>   __dump_stack+0x20/0x28
>   dump_stack+0xcc/0x10c
>   mark_lock+0xbe0/0xe24
>   __lock_acquire+0x4cc/0x2708
>   lock_acquire+0x244/0x360
>   _raw_spin_lock+0x64/0xa0
>   handle_level_irq+0x34/0x26c
>   generic_handle_irq+0x44/0x5c
>   dpu_mdss_irq+0x64/0xec
>   irq_forced_thread_fn+0x58/0x9c
>   irq_thread+0x120/0x1dc
>   kthread+0x248/0x260
>   ret_from_fork+0x10/0x18
>  ------------[ cut here ]------------
>  irq 169 handler irq_default_primary_handler+0x0/0x18 enabled interrupts
> 
> Cc: Sean Paul <seanpaul@chromium.org>
> Cc: Jordan Crouse <jcrouse@codeaurora.org>
> Cc: Jayant Shekhar <jshekhar@codeaurora.org>
> Cc: Rajesh Yadav <ryadav@codeaurora.org>
> Cc: Jeykumar Sankaran <jsanka@codeaurora.org>
> Signed-off-by: Stephen Boyd <swboyd@chromium.org>

LGTM, applied to dpu-staging.

Thanks,

Sean

> ---
> 
>  drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c | 36 ++++++++++++++----------
>  1 file changed, 21 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> index cb307a2abf06..7316b4ab1b85 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
> @@ -23,11 +23,14 @@ struct dpu_mdss {
>  	struct dpu_irq_controller irq_controller;
>  };
>  
> -static irqreturn_t dpu_mdss_irq(int irq, void *arg)
> +static void dpu_mdss_irq(struct irq_desc *desc)
>  {
> -	struct dpu_mdss *dpu_mdss = arg;
> +	struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
> +	struct irq_chip *chip = irq_desc_get_chip(desc);
>  	u32 interrupts;
>  
> +	chained_irq_enter(chip, desc);
> +
>  	interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
>  
>  	while (interrupts) {
> @@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
>  					   hwirq);
>  		if (mapping == 0) {
>  			DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
> -			return IRQ_NONE;
> +			break;
>  		}
>  
>  		rc = generic_handle_irq(mapping);
>  		if (rc < 0) {
>  			DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
>  				  hwirq, mapping, rc);
> -			return IRQ_NONE;
> +			break;
>  		}
>  
>  		interrupts &= ~(1 << hwirq);
>  	}
>  
> -	return IRQ_HANDLED;
> +	chained_irq_exit(chip, desc);
>  }
>  
>  static void dpu_mdss_irq_mask(struct irq_data *irqd)
> @@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
>  	.irq_unmask = dpu_mdss_irq_unmask,
>  };
>  
> +static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
> +
>  static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
>  		unsigned int irq, irq_hw_number_t hwirq)
>  {
>  	struct dpu_mdss *dpu_mdss = domain->host_data;
> -	int ret;
>  
> +	irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
>  	irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
> -	ret = irq_set_chip_data(irq, dpu_mdss);
> -
> -	return ret;
> +	return irq_set_chip_data(irq, dpu_mdss);
>  }
>  
>  static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
> @@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
>  	struct msm_drm_private *priv = dev->dev_private;
>  	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
>  	struct dss_module_power *mp = &dpu_mdss->mp;
> +	int irq;
>  
>  	pm_runtime_suspend(dev->dev);
>  	pm_runtime_disable(dev->dev);
>  	_dpu_mdss_irq_domain_fini(dpu_mdss);
> -	free_irq(platform_get_irq(pdev, 0), dpu_mdss);
> +	irq = platform_get_irq(pdev, 0);
> +	irq_set_chained_handler_and_data(irq, NULL, NULL);
>  	msm_dss_put_clk(mp->clk_config, mp->num_clk);
>  	devm_kfree(&pdev->dev, mp->clk_config);
>  
> @@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
>  	struct dpu_mdss *dpu_mdss;
>  	struct dss_module_power *mp;
>  	int ret = 0;
> +	int irq;
>  
>  	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
>  	if (!dpu_mdss)
> @@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
>  	if (ret)
>  		goto irq_domain_error;
>  
> -	ret = request_irq(platform_get_irq(pdev, 0),
> -			dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
> -	if (ret) {
> -		DPU_ERROR("failed to init irq: %d\n", ret);
> +	irq = platform_get_irq(pdev, 0);
> +	if (irq < 0)
>  		goto irq_error;
> -	}
> +
> +	irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
> +					 dpu_mdss);
>  
>  	pm_runtime_enable(dev->dev);
>  
> -- 
> Sent by a computer through tubes
> 

-- 
Sean Paul, Software Engineer, Google / Chromium OS

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-01-22 19:00 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-03 19:06 [PATCH] drm/msm/dpu: Convert to a chained irq chip Stephen Boyd
2019-01-03 19:06 ` Stephen Boyd
2019-01-03 19:06 ` Stephen Boyd
2019-01-22 19:00 ` Sean Paul
2019-01-22 19:00   ` Sean Paul
2019-01-22 19:00   ` Sean Paul

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.