From: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
To: dmaengine@vger.kernel.org
Cc: Michal Simek <michal.simek@xilinx.com>,
Hyun Kwon <hyun.kwon@xilinx.com>,
Tejas Upadhyay <tejasu@xilinx.com>,
Satish Kumar Nagireddy <SATISHNA@xilinx.com>
Subject: [PATCH v3 5/6] dmaengine: xilinx: dpdma: Add debugfs support
Date: Thu, 23 Jan 2020 04:29:38 +0200 [thread overview]
Message-ID: <20200123022939.9739-6-laurent.pinchart@ideasonboard.com> (raw)
In-Reply-To: <20200123022939.9739-1-laurent.pinchart@ideasonboard.com>
Expose statistics to debugfs when available. This helps debugging issues
with the DPDMA driver.
Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
---
Changes since v2:
- Refactor debugfs code
---
drivers/dma/xilinx/xilinx_dpdma.c | 227 ++++++++++++++++++++++++++++++
1 file changed, 227 insertions(+)
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 15ba85aa63d9..a0df729e2034 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
@@ -265,6 +266,228 @@ struct xilinx_dpdma_device {
bool ext_addr;
};
+/* -----------------------------------------------------------------------------
+ * DebugFS
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
+#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+enum xilinx_dpdma_testcases {
+ DPDMA_TC_INTR_DONE,
+ DPDMA_TC_NONE
+};
+
+struct xilinx_dpdma_debugfs {
+ enum xilinx_dpdma_testcases testcase;
+ u16 xilinx_dpdma_irq_done_count;
+ unsigned int chan_id;
+};
+
+static struct xilinx_dpdma_debugfs dpdma_debugfs;
+struct xilinx_dpdma_debugfs_request {
+ const char *name;
+ enum xilinx_dpdma_testcases tc;
+ ssize_t (*read)(char *buf);
+ int (*write)(char *args);
+};
+
+static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
+{
+ if (chan->id == dpdma_debugfs.chan_id)
+ dpdma_debugfs.xilinx_dpdma_irq_done_count++;
+}
+
+static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
+{
+ size_t out_str_len;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
+ out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
+ out_str_len);
+ snprintf(buf, out_str_len, "%d",
+ dpdma_debugfs.xilinx_dpdma_irq_done_count);
+
+ return 0;
+}
+
+static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
+{
+ char *arg;
+ int ret;
+ u32 id;
+
+ arg = strsep(&args, " ");
+ if (!arg || strncasecmp(arg, "start", 5))
+ return -EINVAL;
+
+ arg = strsep(&args, " ");
+ if (!arg)
+ return -EINVAL;
+
+ ret = kstrtou32(arg, 0, &id);
+ if (ret < 0)
+ return ret;
+
+ if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
+ return -EINVAL;
+
+ dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
+ dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
+ dpdma_debugfs.chan_id = id;
+
+ return 0;
+}
+
+/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
+struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
+ {
+ .name = "DESCRIPTOR_DONE_INTR",
+ .tc = DPDMA_TC_INTR_DONE,
+ .read = xilinx_dpdma_debugfs_desc_done_irq_read,
+ .write = xilinx_dpdma_debugfs_desc_done_irq_write,
+ },
+};
+
+static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ enum xilinx_dpdma_testcases testcase;
+ char *kern_buff;
+ int ret;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
+ if (!kern_buff) {
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+ return -ENOMEM;
+ }
+
+ testcase = READ_ONCE(dpdma_debugfs.testcase);
+ if (testcase != DPDMA_TC_NONE) {
+ ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
+ if (ret < 0)
+ goto done;
+ } else {
+ strlcpy(kern_buff, "No testcase executed",
+ XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
+ }
+
+ size = min(size, strlen(kern_buff));
+ ret = copy_to_user(buf, kern_buff, size);
+
+done:
+ kfree(kern_buff);
+ if (ret)
+ return ret;
+
+ *pos = size + 1;
+ return size;
+}
+
+static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
+ const char __user *buf, size_t size,
+ loff_t *pos)
+{
+ char *kern_buff, *kern_buff_start;
+ char *testcase;
+ unsigned int i;
+ int ret;
+
+ if (*pos != 0 || size <= 0)
+ return -EINVAL;
+
+ /* Supporting single instance of test as of now. */
+ if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
+ return -EBUSY;
+
+ kern_buff = kzalloc(size, GFP_KERNEL);
+ if (!kern_buff)
+ return -ENOMEM;
+ kern_buff_start = kern_buff;
+
+ ret = strncpy_from_user(kern_buff, buf, size);
+ if (ret < 0)
+ goto done;
+
+ /* Read the testcase name from a user request. */
+ testcase = strsep(&kern_buff, " ");
+
+ for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
+ if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = dpdma_debugfs_reqs[i].write(kern_buff);
+ if (ret < 0)
+ goto done;
+
+ ret = size;
+
+done:
+ kfree(kern_buff_start);
+ return ret;
+}
+
+static const struct file_operations fops_xilinx_dpdma_dbgfs = {
+ .owner = THIS_MODULE,
+ .read = xilinx_dpdma_debugfs_read,
+ .write = xilinx_dpdma_debugfs_write,
+};
+
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+ int err;
+ struct dentry *xilinx_dpdma_debugfs_dir, *xilinx_dpdma_debugfs_file;
+
+ dpdma_debugfs.testcase = DPDMA_TC_NONE;
+
+ xilinx_dpdma_debugfs_dir = debugfs_create_dir("dpdma", NULL);
+ if (!xilinx_dpdma_debugfs_dir) {
+ dev_err(dev, "debugfs_create_dir failed\n");
+ return -ENODEV;
+ }
+
+ xilinx_dpdma_debugfs_file =
+ debugfs_create_file("testcase", 0444,
+ xilinx_dpdma_debugfs_dir, NULL,
+ &fops_xilinx_dpdma_dbgfs);
+ if (!xilinx_dpdma_debugfs_file) {
+ dev_err(dev, "debugfs_create_file testcase failed\n");
+ err = -ENODEV;
+ goto err_dbgfs;
+ }
+ return 0;
+
+err_dbgfs:
+ debugfs_remove_recursive(xilinx_dpdma_debugfs_dir);
+ xilinx_dpdma_debugfs_dir = NULL;
+ return err;
+}
+
+#else
+static int xilinx_dpdma_debugfs_init(struct device *dev)
+{
+ return 0;
+}
+
+static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
/* -----------------------------------------------------------------------------
* I/O Accessors
*/
@@ -840,6 +1063,8 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
+ xilinx_dpdma_debugfs_desc_done_irq(chan);
+
if (active)
vchan_cyclic_callback(&active->vdesc);
else
@@ -1469,6 +1694,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
xilinx_dpdma_enable_irq(xdev);
+ xilinx_dpdma_debugfs_init(&pdev->dev);
+
dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
return 0;
--
Regards,
Laurent Pinchart
next prev parent reply other threads:[~2020-01-23 2:30 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-01-23 2:29 [PATCH v3 0/6] dma: Add Xilinx ZynqMP DPDMA driver Laurent Pinchart
2020-01-23 2:29 ` [PATCH v3 1/6] dt: bindings: dma: xilinx: dpdma: DT bindings for Xilinx DPDMA Laurent Pinchart
2020-01-23 2:29 ` [PATCH v3 2/6] dmaengine: Add interleaved cyclic transaction type Laurent Pinchart
2020-01-23 8:03 ` Peter Ujfalusi
2020-01-23 8:43 ` Vinod Koul
2020-01-23 8:51 ` Peter Ujfalusi
2020-01-23 12:23 ` Laurent Pinchart
2020-01-24 6:10 ` Vinod Koul
2020-01-24 8:50 ` Laurent Pinchart
2020-02-10 14:06 ` Laurent Pinchart
2020-02-13 13:29 ` Vinod Koul
2020-02-13 13:48 ` Laurent Pinchart
2020-02-13 14:07 ` Vinod Koul
2020-02-13 14:15 ` Peter Ujfalusi
2020-02-13 16:52 ` Laurent Pinchart
2020-02-14 4:23 ` Vinod Koul
2020-02-14 16:22 ` Laurent Pinchart
2020-02-17 10:00 ` Peter Ujfalusi
2020-02-19 9:25 ` Vinod Koul
2020-02-26 16:30 ` Laurent Pinchart
2020-03-02 3:47 ` Vinod Koul
2020-03-02 7:37 ` Laurent Pinchart
2020-03-03 4:32 ` Vinod Koul
2020-03-03 19:22 ` Laurent Pinchart
2020-03-04 5:13 ` Vinod Koul
2020-03-04 8:01 ` Laurent Pinchart
2020-03-04 15:37 ` Vinod Koul
2020-03-04 16:00 ` Laurent Pinchart
2020-03-04 16:24 ` Vinod Koul
[not found] ` <20200311155248.GA4772@pendragon.ideasonboard.com>
2020-03-18 15:14 ` Laurent Pinchart
2020-03-25 16:00 ` Laurent Pinchart
2020-03-26 7:02 ` Vinod Koul
2020-04-08 17:00 ` Laurent Pinchart
2020-04-15 15:12 ` Laurent Pinchart
2020-03-06 14:49 ` Peter Ujfalusi
2020-03-11 23:15 ` Laurent Pinchart
2020-02-26 16:24 ` Laurent Pinchart
2020-03-02 3:42 ` Vinod Koul
2020-01-24 7:20 ` Peter Ujfalusi
2020-01-24 7:38 ` Peter Ujfalusi
2020-01-24 8:58 ` Laurent Pinchart
2020-01-24 8:56 ` Laurent Pinchart
2020-01-23 2:29 ` [PATCH v3 3/6] dmaengine: virt-dma: Use lockdep to check locking requirements Laurent Pinchart
2020-01-23 2:29 ` [PATCH v3 4/6] dmaengine: xilinx: dpdma: Add the Xilinx DisplayPort DMA engine driver Laurent Pinchart
2020-01-23 2:29 ` Laurent Pinchart [this message]
2020-01-23 2:29 ` [PATCH v3 6/6] arm64: dts: zynqmp: Add DPDMA node Laurent Pinchart
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200123022939.9739-6-laurent.pinchart@ideasonboard.com \
--to=laurent.pinchart@ideasonboard.com \
--cc=SATISHNA@xilinx.com \
--cc=dmaengine@vger.kernel.org \
--cc=hyun.kwon@xilinx.com \
--cc=michal.simek@xilinx.com \
--cc=tejasu@xilinx.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).