All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dipen Patel <dipenp@nvidia.com>
To: <thierry.reding@gmail.com>, <jonathanh@nvidia.com>,
	<linux-kernel@vger.kernel.org>, <linux-tegra@vger.kernel.org>,
	<linux-gpio@vger.kernel.org>, <linus.walleij@linaro.org>,
	<bgolaszewski@baylibre.com>, <warthog618@gmail.com>,
	<devicetree@vger.kernel.org>, <linux-doc@vger.kernel.org>,
	<robh+dt@kernel.org>
Cc: Dipen Patel <dipenp@nvidia.com>
Subject: [RFC v2 08/11] gpiolib: cdev: Add hardware timestamp clock type
Date: Thu, 30 Sep 2021 16:26:14 -0700	[thread overview]
Message-ID: <20210930232617.6396-9-dipenp@nvidia.com> (raw)
In-Reply-To: <20210930232617.6396-1-dipenp@nvidia.com>

This patch adds new clock type for the GPIO controller which can
timestamp gpio lines in realtime using hardware means. To expose such
functionalities to the userspace, code has been added in this patch
where during line create call, it checks for new clock type and if
requested, calls hardware timestamp related API from gpiolib.c.
During line change event, the HTE subsystem pushes timestamp data
through callbacks.

Signed-off-by: Dipen Patel <dipenp@nvidia.com>
---
Changes in v2:
- Added hte_dir and static structure hte_ts_desc.
- Added callbacks which get invoked by HTE when new data is available.
- Better use of hte_dir and seq from hte_ts_desc.
- Modified sw debounce function to accommodate hardware timestamping.

 drivers/gpio/gpiolib-cdev.c | 161 ++++++++++++++++++++++++++++++++++--
 include/uapi/linux/gpio.h   |   1 +
 2 files changed, 153 insertions(+), 9 deletions(-)

diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index c7b5446d01fd..1736ad54e3ec 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -464,6 +464,12 @@ struct line {
 	 * stale value.
 	 */
 	unsigned int level;
+	/*
+	 * dir will be touched in HTE callbacks hte_ts_cb_t and
+	 * hte_ts_threaded_cb_t and they are mutually exclusive. This will be
+	 * unused when HTE is not supported/disabled.
+	 */
+	enum hte_dir dir;
 };
 
 /**
@@ -518,6 +524,7 @@ struct linereq {
 	 GPIO_V2_LINE_DRIVE_FLAGS | \
 	 GPIO_V2_LINE_EDGE_FLAGS | \
 	 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
+	 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HARDWARE | \
 	 GPIO_V2_LINE_BIAS_FLAGS)
 
 static void linereq_put_event(struct linereq *lr,
@@ -546,6 +553,94 @@ static u64 line_event_timestamp(struct line *line)
 	return ktime_get_ns();
 }
 
+static hte_return_t process_hw_ts_thread(void *p)
+{
+	struct line *line = p;
+	struct linereq *lr = line->req;
+	struct gpio_v2_line_event le;
+	u64 eflags;
+
+	memset(&le, 0, sizeof(le));
+
+	le.timestamp_ns = line->timestamp_ns;
+	line->timestamp_ns = 0;
+
+	if (line->dir >= HTE_DIR_NOSUPP) {
+		eflags = READ_ONCE(line->eflags);
+		if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
+			int level = gpiod_get_value_cansleep(line->desc);
+
+			if (level)
+				/* Emit low-to-high event */
+				le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+			else
+				/* Emit high-to-low event */
+				le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+		} else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
+			/* Emit low-to-high event */
+			le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+		} else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
+			/* Emit high-to-low event */
+			le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+		} else {
+			return HTE_CB_ERROR;
+		}
+	} else {
+		if (line->dir == HTE_RISING_EDGE_TS)
+			le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
+		else
+			le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
+	}
+
+	le.line_seqno = line->line_seqno;
+	le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
+	le.offset = gpio_chip_hwgpio(line->desc);
+
+	linereq_put_event(lr, &le);
+
+	return HTE_CB_HANDLED;
+}
+
+static hte_return_t process_hw_ts(struct hte_ts_data *ts, void *p)
+{
+	struct line *line = p;
+	struct linereq *lr = line->req;
+
+	if (!ts)
+		return HTE_CB_ERROR;
+
+	line->timestamp_ns = ts->tsc;
+	line->dir = ts->dir;
+
+	/*
+	 * It is possible that HTE engine detects spurious edges for the
+	 * lines where software debounce is enabled. This primary callback
+	 * will be called multiple times in that case. It will be better to
+	 * let debounce_work_func handle instead of process_hw_ts_thread.
+	 * The timestamp_ns will be overwritten here which is fine as we are
+	 * interested in the last value anyway. The debounce_work_func will
+	 * then just read whatever last line->timestamp_ns is stored. Because
+	 * this callback can be called multiple times, we are not really
+	 * interested in ts->seq.
+	 */
+	if (!READ_ONCE(line->sw_debounced)) {
+		line->line_seqno = ts->seq;
+
+		/*
+		 * Increment in this callback incase all the lines in linereq
+		 * are enabled for hw timestamping. This will work even if
+		 * subset of lines are enabled for hw timestamping as
+		 * edge_irq_* callbacks will proceed as usual for them.
+		 */
+		if (lr->num_lines != 1)
+			line->req_seqno = atomic_inc_return(&lr->seqno);
+
+		return HTE_RUN_THREADED_CB;
+	}
+
+	return HTE_CB_HANDLED;
+}
+
 static irqreturn_t edge_irq_thread(int irq, void *p)
 {
 	struct line *line = p;
@@ -553,6 +648,10 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
 	struct gpio_v2_line_event le;
 	u64 eflags;
 
+	/* Let process_hw_ts_thread handle */
+	if (test_bit(FLAG_EVENT_CLOCK_HARDWARE, &line->desc->flags))
+		return IRQ_HANDLED;
+
 	/* Do not leak kernel stack to userspace */
 	memset(&le, 0, sizeof(le));
 
@@ -604,6 +703,10 @@ static irqreturn_t edge_irq_handler(int irq, void *p)
 	struct line *line = p;
 	struct linereq *lr = line->req;
 
+	/* Let HTE supplied callbacks handle */
+	if (test_bit(FLAG_EVENT_CLOCK_HARDWARE, &line->desc->flags))
+		return IRQ_HANDLED;
+
 	/*
 	 * Just store the timestamp in hardirq context so we get it as
 	 * close in time as possible to the actual event.
@@ -682,14 +785,6 @@ static void debounce_work_func(struct work_struct *work)
 	/* Do not leak kernel stack to userspace */
 	memset(&le, 0, sizeof(le));
 
-	lr = line->req;
-	le.timestamp_ns = line_event_timestamp(line);
-	le.offset = gpio_chip_hwgpio(line->desc);
-	line->line_seqno++;
-	le.line_seqno = line->line_seqno;
-	le.seqno = (lr->num_lines == 1) ?
-		le.line_seqno : atomic_inc_return(&lr->seqno);
-
 	if (level)
 		/* Emit low-to-high event */
 		le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
@@ -697,6 +792,23 @@ static void debounce_work_func(struct work_struct *work)
 		/* Emit high-to-low event */
 		le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
 
+	if (test_bit(FLAG_EVENT_CLOCK_HARDWARE, &line->desc->flags)) {
+		le.timestamp_ns = line->timestamp_ns;
+		if (line->dir < HTE_DIR_NOSUPP)
+			le.id = (line->dir == HTE_RISING_EDGE_TS) ?
+				 GPIO_V2_LINE_EVENT_RISING_EDGE :
+				 GPIO_V2_LINE_EVENT_FALLING_EDGE;
+	} else {
+		le.timestamp_ns = line_event_timestamp(line);
+	}
+
+	lr = line->req;
+	le.offset = gpio_chip_hwgpio(line->desc);
+	line->line_seqno++;
+	le.line_seqno = line->line_seqno;
+	le.seqno = (lr->num_lines == 1) ?
+		le.line_seqno : atomic_inc_return(&lr->seqno);
+
 	linereq_put_event(lr, &le);
 }
 
@@ -891,7 +1003,6 @@ static int gpio_v2_line_flags_validate(u64 flags)
 	/* Return an error if an unknown flag is set */
 	if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
 		return -EINVAL;
-
 	/*
 	 * Do not allow both INPUT and OUTPUT flags to be set as they are
 	 * contradictory.
@@ -900,6 +1011,11 @@ static int gpio_v2_line_flags_validate(u64 flags)
 	    (flags & GPIO_V2_LINE_FLAG_OUTPUT))
 		return -EINVAL;
 
+	/* Only allow one event clock source */
+	if ((flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
+	    (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HARDWARE))
+		return -EINVAL;
+
 	/* Edge detection requires explicit input. */
 	if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
 	    !(flags & GPIO_V2_LINE_FLAG_INPUT))
@@ -992,6 +1108,8 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
 
 	assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
 		   flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
+	assign_bit(FLAG_EVENT_CLOCK_HARDWARE, flagsp,
+		   flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HARDWARE);
 }
 
 static long linereq_get_values(struct linereq *lr, void __user *ip)
@@ -1154,6 +1272,21 @@ static long linereq_set_config_unlocked(struct linereq *lr,
 				return ret;
 		}
 
+		/* Check if new config sets hardware assisted clock */
+		if (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HARDWARE) {
+			ret = gpiod_req_hw_timestamp_ns(desc, process_hw_ts,
+							process_hw_ts_thread,
+							&lr->lines[i]);
+			if (ret)
+				return ret;
+		} else {
+			/*
+			 * HTE subsys will do nothing if there is nothing to
+			 * release.
+			 */
+			gpiod_rel_hw_timestamp_ns(desc);
+		}
+
 		blocking_notifier_call_chain(&desc->gdev->notifier,
 					     GPIO_V2_LINE_CHANGED_CONFIG,
 					     desc);
@@ -1409,6 +1542,14 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
 					flags & GPIO_V2_LINE_EDGE_FLAGS);
 			if (ret)
 				goto out_free_linereq;
+
+			if (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HARDWARE) {
+				ret = gpiod_req_hw_timestamp_ns(desc, process_hw_ts,
+							process_hw_ts_thread,
+							&lr->lines[i]);
+				if (ret)
+					goto out_free_linereq;
+			}
 		}
 
 		blocking_notifier_call_chain(&desc->gdev->notifier,
@@ -1959,6 +2100,8 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
 
 	if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
 		info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
+	else if (test_bit(FLAG_EVENT_CLOCK_HARDWARE, &desc->flags))
+		info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HARDWARE;
 
 	debounce_period_us = READ_ONCE(desc->debounce_period_us);
 	if (debounce_period_us) {
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index eaaea3d8e6b4..d360545b4c21 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -80,6 +80,7 @@ enum gpio_v2_line_flag {
 	GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN	= _BITULL(9),
 	GPIO_V2_LINE_FLAG_BIAS_DISABLED		= _BITULL(10),
 	GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME	= _BITULL(11),
+	GPIO_V2_LINE_FLAG_EVENT_CLOCK_HARDWARE	= _BITULL(12),
 };
 
 /**
-- 
2.17.1


  parent reply	other threads:[~2021-09-30 23:26 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-30 23:26 [RFC v2 00/11] Intro to Hardware timestamping engine Dipen Patel
2021-09-30 23:26 ` [RFC v2 01/11] Documentation: Add HTE subsystem guide Dipen Patel
2021-10-02  0:18   ` Randy Dunlap
2021-11-03  0:36     ` Dipen Patel
2021-09-30 23:26 ` [RFC v2 02/11] drivers: Add hardware timestamp engine (HTE) Dipen Patel
2021-10-01 23:53   ` Randy Dunlap
2021-11-03  0:37     ` Dipen Patel
2021-09-30 23:26 ` [RFC v2 03/11] hte: Add tegra194 HTE kernel provider Dipen Patel
2021-10-01 23:44   ` Randy Dunlap
2021-10-02  0:07   ` Randy Dunlap
2021-11-03  1:15     ` Dipen Patel
2021-09-30 23:26 ` [RFC v2 04/11] dt-bindings: Add HTE bindings Dipen Patel
2021-10-03 21:53   ` Linus Walleij
2021-10-08 22:11   ` Rob Herring
2021-11-03 21:43     ` Dipen Patel
2021-09-30 23:26 ` [RFC v2 05/11] hte: Add Tegra194 IRQ HTE test driver Dipen Patel
2021-09-30 23:26 ` [RFC v2 06/11] gpiolib: Add HTE support Dipen Patel
2021-10-01  9:19   ` kernel test robot
2021-10-03 22:01   ` Linus Walleij
2021-09-30 23:26 ` [RFC v2 07/11] gpio: tegra186: Add HTE in gpio-tegra186 driver Dipen Patel
2021-10-08 22:16   ` Rob Herring
2021-11-03  5:05     ` Dipen Patel
2021-09-30 23:26 ` Dipen Patel [this message]
2021-10-03 21:59   ` [RFC v2 08/11] gpiolib: cdev: Add hardware timestamp clock type Linus Walleij
2021-10-03 22:09   ` Linus Walleij
2021-09-30 23:26 ` [RFC v2 09/11] tools: gpio: Add new hardware " Dipen Patel
2021-10-03 22:02   ` Linus Walleij
2021-09-30 23:26 ` [RFC v2 10/11] hte: Add tegra GPIO HTE test driver Dipen Patel
2021-10-01 23:47   ` Randy Dunlap
2021-11-03  5:07     ` Dipen Patel
2021-09-30 23:26 ` [RFC v2 11/11] MAINTAINERS: Added HTE Subsystem Dipen Patel
2021-10-02  0:08   ` Randy Dunlap
2021-10-02  4:00     ` Joe Perches

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210930232617.6396-9-dipenp@nvidia.com \
    --to=dipenp@nvidia.com \
    --cc=bgolaszewski@baylibre.com \
    --cc=devicetree@vger.kernel.org \
    --cc=jonathanh@nvidia.com \
    --cc=linus.walleij@linaro.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-gpio@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tegra@vger.kernel.org \
    --cc=robh+dt@kernel.org \
    --cc=thierry.reding@gmail.com \
    --cc=warthog618@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.