linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
@ 2014-02-11 11:45 Philipp Zabel
       [not found] ` < 20140320153804.35d5b835@samsung.com>
                   ` (4 more replies)
  0 siblings, 5 replies; 74+ messages in thread
From: Philipp Zabel @ 2014-02-11 11:45 UTC (permalink / raw)
  To: Russell King - ARM Linux, Mauro Carvalho Chehab
  Cc: Grant Likely, Rob Herring, Sylwester Nawrocki, Laurent Pinchart,
	Tomi Valkeinen, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel, Philipp Zabel

From: Philipp Zabel <philipp.zabel@gmail.com>

This patch moves the parsing helpers used to parse connected graphs
in the device tree, like the video interface bindings documented in
Documentation/devicetree/bindings/media/video-interfaces.txt, from
drivers/media/v4l2-core to drivers/of.

This allows to reuse the same parser code from outside the V4L2 framework,
most importantly from display drivers. There have been patches that duplicate
the code (and I am going to send one of my own), such as
http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
and others that parse the same binding in a different way:
https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html

I think that all common video interface parsing helpers should be moved to a
single place, outside of the specific subsystems, so that it can be reused
by all drivers.

I moved v4l2_of_get_next_endpoint, v4l2_of_get_remote_port,
and v4l2_of_get_remote_port_parent. They are renamed to
of_graph_get_next_endpoint, of_graph_get_remote_port, and
of_graph_get_remote_port_parent, respectively.

Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
---
 drivers/media/Kconfig             |   1 +
 drivers/media/v4l2-core/v4l2-of.c | 117 ---------------------------------
 drivers/of/Kconfig                |   3 +
 drivers/of/Makefile               |   1 +
 drivers/of/of_graph.c             | 133 ++++++++++++++++++++++++++++++++++++++
 include/linux/of_graph.h          |  23 +++++++
 include/media/v4l2-of.h           |  16 ++---
 7 files changed, 167 insertions(+), 127 deletions(-)
 create mode 100644 drivers/of/of_graph.c
 create mode 100644 include/linux/of_graph.h

diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 1d0758a..882faeb 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -96,6 +96,7 @@ config VIDEO_DEV
 	tristate
 	depends on MEDIA_SUPPORT
 	depends on MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_RADIO_SUPPORT
+	select OF_GRAPH if OF
 	default y
 
 config VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
index 42e3e8a..f919db3 100644
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -152,120 +152,3 @@ int v4l2_of_parse_endpoint(const struct device_node *node,
 	return 0;
 }
 EXPORT_SYMBOL(v4l2_of_parse_endpoint);
-
-/**
- * v4l2_of_get_next_endpoint() - get next endpoint node
- * @parent: pointer to the parent device node
- * @prev: previous endpoint node, or NULL to get first
- *
- * Return: An 'endpoint' node pointer with refcount incremented. Refcount
- * of the passed @prev node is not decremented, the caller have to use
- * of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
-					struct device_node *prev)
-{
-	struct device_node *endpoint;
-	struct device_node *port = NULL;
-
-	if (!parent)
-		return NULL;
-
-	if (!prev) {
-		struct device_node *node;
-		/*
-		 * It's the first call, we have to find a port subnode
-		 * within this node or within an optional 'ports' node.
-		 */
-		node = of_get_child_by_name(parent, "ports");
-		if (node)
-			parent = node;
-
-		port = of_get_child_by_name(parent, "port");
-
-		if (port) {
-			/* Found a port, get an endpoint. */
-			endpoint = of_get_next_child(port, NULL);
-			of_node_put(port);
-		} else {
-			endpoint = NULL;
-		}
-
-		if (!endpoint)
-			pr_err("%s(): no endpoint nodes specified for %s\n",
-			       __func__, parent->full_name);
-		of_node_put(node);
-	} else {
-		port = of_get_parent(prev);
-		if (!port)
-			/* Hm, has someone given us the root node ?... */
-			return NULL;
-
-		/* Avoid dropping prev node refcount to 0. */
-		of_node_get(prev);
-		endpoint = of_get_next_child(port, prev);
-		if (endpoint) {
-			of_node_put(port);
-			return endpoint;
-		}
-
-		/* No more endpoints under this port, try the next one. */
-		do {
-			port = of_get_next_child(parent, port);
-			if (!port)
-				return NULL;
-		} while (of_node_cmp(port->name, "port"));
-
-		/* Pick up the first endpoint in this port. */
-		endpoint = of_get_next_child(port, NULL);
-		of_node_put(port);
-	}
-
-	return endpoint;
-}
-EXPORT_SYMBOL(v4l2_of_get_next_endpoint);
-
-/**
- * v4l2_of_get_remote_port_parent() - get remote port's parent node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote device node associated with remote endpoint node linked
- *	   to @node. Use of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_remote_port_parent(
-			       const struct device_node *node)
-{
-	struct device_node *np;
-	unsigned int depth;
-
-	/* Get remote endpoint node. */
-	np = of_parse_phandle(node, "remote-endpoint", 0);
-
-	/* Walk 3 levels up only if there is 'ports' node. */
-	for (depth = 3; depth && np; depth--) {
-		np = of_get_next_parent(np);
-		if (depth == 2 && of_node_cmp(np->name, "ports"))
-			break;
-	}
-	return np;
-}
-EXPORT_SYMBOL(v4l2_of_get_remote_port_parent);
-
-/**
- * v4l2_of_get_remote_port() - get remote port node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote port node associated with remote endpoint node linked
- *	   to @node. Use of_node_put() on it when done.
- */
-struct device_node *v4l2_of_get_remote_port(const struct device_node *node)
-{
-	struct device_node *np;
-
-	/* Get remote endpoint node. */
-	np = of_parse_phandle(node, "remote-endpoint", 0);
-	if (!np)
-		return NULL;
-	return of_get_next_parent(np);
-}
-EXPORT_SYMBOL(v4l2_of_get_remote_port);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index c6973f1..1bfbb0e 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -75,4 +75,7 @@ config OF_MTD
 	depends on MTD
 	def_bool y
 
+config OF_GRAPH
+	bool
+
 endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index efd0510..7ee8ab3 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_OF_MDIO)	+= of_mdio.o
 obj-$(CONFIG_OF_PCI)	+= of_pci.o
 obj-$(CONFIG_OF_PCI_IRQ)  += of_pci_irq.o
 obj-$(CONFIG_OF_MTD)	+= of_mtd.o
+obj-$(CONFIG_OF_GRAPH)	+= of_graph.o
diff --git a/drivers/of/of_graph.c b/drivers/of/of_graph.c
new file mode 100644
index 0000000..aa526d7
--- /dev/null
+++ b/drivers/of/of_graph.c
@@ -0,0 +1,133 @@
+/*
+ * OF graph binding parsing library
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/types.h>
+
+/**
+ * of_graph_get_next_endpoint() - get next endpoint node
+ * @parent: pointer to the parent device node
+ * @prev: previous endpoint node, or NULL to get first
+ *
+ * Return: An 'endpoint' node pointer with refcount incremented. Refcount
+ * of the passed @prev node is not decremented, the caller have to use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+					struct device_node *prev)
+{
+	struct device_node *endpoint;
+	struct device_node *port = NULL;
+
+	if (!parent)
+		return NULL;
+
+	if (!prev) {
+		struct device_node *node;
+		/*
+		 * It's the first call, we have to find a port subnode
+		 * within this node or within an optional 'ports' node.
+		 */
+		node = of_get_child_by_name(parent, "ports");
+		if (node)
+			parent = node;
+
+		port = of_get_child_by_name(parent, "port");
+
+		if (port) {
+			/* Found a port, get an endpoint. */
+			endpoint = of_get_next_child(port, NULL);
+			of_node_put(port);
+		} else {
+			endpoint = NULL;
+		}
+
+		if (!endpoint)
+			pr_err("%s(): no endpoint nodes specified for %s\n",
+			       __func__, parent->full_name);
+		of_node_put(node);
+	} else {
+		port = of_get_parent(prev);
+		if (!port)
+			/* Hm, has someone given us the root node ?... */
+			return NULL;
+
+		/* Avoid dropping prev node refcount to 0. */
+		of_node_get(prev);
+		endpoint = of_get_next_child(port, prev);
+		if (endpoint) {
+			of_node_put(port);
+			return endpoint;
+		}
+
+		/* No more endpoints under this port, try the next one. */
+		do {
+			port = of_get_next_child(parent, port);
+			if (!port)
+				return NULL;
+		} while (of_node_cmp(port->name, "port"));
+
+		/* Pick up the first endpoint in this port. */
+		endpoint = of_get_next_child(port, NULL);
+		of_node_put(port);
+	}
+
+	return endpoint;
+}
+EXPORT_SYMBOL(of_graph_get_next_endpoint);
+
+/**
+ * of_graph_get_remote_port_parent() - get remote port's parent node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote device node associated with remote endpoint node linked
+ *	   to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port_parent(
+			       const struct device_node *node)
+{
+	struct device_node *np;
+	unsigned int depth;
+
+	/* Get remote endpoint node. */
+	np = of_parse_phandle(node, "remote-endpoint", 0);
+
+	/* Walk 3 levels up only if there is 'ports' node. */
+	for (depth = 3; depth && np; depth--) {
+		np = of_get_next_parent(np);
+		if (depth == 2 && of_node_cmp(np->name, "ports"))
+			break;
+	}
+	return np;
+}
+EXPORT_SYMBOL(of_graph_get_remote_port_parent);
+
+/**
+ * of_graph_get_remote_port() - get remote port node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote port node associated with remote endpoint node linked
+ *	   to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port(const struct device_node *node)
+{
+	struct device_node *np;
+
+	/* Get remote endpoint node. */
+	np = of_parse_phandle(node, "remote-endpoint", 0);
+	if (!np)
+		return NULL;
+	return of_get_next_parent(np);
+}
+EXPORT_SYMBOL(of_graph_get_remote_port);
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
new file mode 100644
index 0000000..352306a
--- /dev/null
+++ b/include/linux/of_graph.h
@@ -0,0 +1,23 @@
+/*
+ * OF graph binding parsing helpers
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OF_GRAPH_H
+#define __LINUX_OF_GRAPH_H
+
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+					struct device_node *previous);
+struct device_node *of_graph_get_remote_port_parent(
+					const struct device_node *node);
+struct device_node *of_graph_get_remote_port(const struct device_node *node);
+
+#endif /* __LINUX_OF_GRAPH_H */
diff --git a/include/media/v4l2-of.h b/include/media/v4l2-of.h
index 541cea4..404a493 100644
--- a/include/media/v4l2-of.h
+++ b/include/media/v4l2-of.h
@@ -17,6 +17,7 @@
 #include <linux/list.h>
 #include <linux/types.h>
 #include <linux/errno.h>
+#include <linux/of_graph.h>
 
 #include <media/v4l2-mediabus.h>
 
@@ -72,11 +73,6 @@ struct v4l2_of_endpoint {
 #ifdef CONFIG_OF
 int v4l2_of_parse_endpoint(const struct device_node *node,
 			   struct v4l2_of_endpoint *endpoint);
-struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
-					struct device_node *previous);
-struct device_node *v4l2_of_get_remote_port_parent(
-					const struct device_node *node);
-struct device_node *v4l2_of_get_remote_port(const struct device_node *node);
 #else /* CONFIG_OF */
 
 static inline int v4l2_of_parse_endpoint(const struct device_node *node,
@@ -85,25 +81,25 @@ static inline int v4l2_of_parse_endpoint(const struct device_node *node,
 	return -ENOSYS;
 }
 
+#endif /* CONFIG_OF */
+
 static inline struct device_node *v4l2_of_get_next_endpoint(
 					const struct device_node *parent,
 					struct device_node *previous)
 {
-	return NULL;
+	return of_graph_get_next_endpoint(parent, previous);
 }
 
 static inline struct device_node *v4l2_of_get_remote_port_parent(
 					const struct device_node *node)
 {
-	return NULL;
+	return of_graph_get_remote_port_parent(node);
 }
 
 static inline struct device_node *v4l2_of_get_remote_port(
 					const struct device_node *node)
 {
-	return NULL;
+	return of_graph_get_remote_port(node);
 }
 
-#endif /* CONFIG_OF */
-
 #endif /* _V4L2_OF_H */
-- 
1.8.5.3


^ permalink raw reply related	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 11:45 [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of Philipp Zabel
                   ` (3 preceding siblings ...)
       [not found] ` < 20140310145815.17595C405FA@trevor.secretlab.ca>
@ 2014-02-11 13:56 ` Rob Herring
  2014-02-11 14:52   ` Russell King - ARM Linux
                     ` (2 more replies)
  4 siblings, 3 replies; 74+ messages in thread
From: Rob Herring @ 2014-02-11 13:56 UTC (permalink / raw)
  To: Philipp Zabel
  Cc: Russell King - ARM Linux, Mauro Carvalho Chehab, Grant Likely,
	Rob Herring, Sylwester Nawrocki, Laurent Pinchart,
	Tomi Valkeinen, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> From: Philipp Zabel <philipp.zabel@gmail.com>
>
> This patch moves the parsing helpers used to parse connected graphs
> in the device tree, like the video interface bindings documented in
> Documentation/devicetree/bindings/media/video-interfaces.txt, from
> drivers/media/v4l2-core to drivers/of.

This is the opposite direction things have been moving...

> This allows to reuse the same parser code from outside the V4L2 framework,
> most importantly from display drivers. There have been patches that duplicate
> the code (and I am going to send one of my own), such as
> http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> and others that parse the same binding in a different way:
> https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
>
> I think that all common video interface parsing helpers should be moved to a
> single place, outside of the specific subsystems, so that it can be reused
> by all drivers.

Perhaps that should be done rather than moving to drivers/of now and
then again to somewhere else.

> I moved v4l2_of_get_next_endpoint, v4l2_of_get_remote_port,
> and v4l2_of_get_remote_port_parent. They are renamed to
> of_graph_get_next_endpoint, of_graph_get_remote_port, and
> of_graph_get_remote_port_parent, respectively.
>
> Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> ---
>  drivers/media/Kconfig             |   1 +
>  drivers/media/v4l2-core/v4l2-of.c | 117 ---------------------------------
>  drivers/of/Kconfig                |   3 +
>  drivers/of/Makefile               |   1 +
>  drivers/of/of_graph.c             | 133 ++++++++++++++++++++++++++++++++++++++
>  include/linux/of_graph.h          |  23 +++++++
>  include/media/v4l2-of.h           |  16 ++---
>  7 files changed, 167 insertions(+), 127 deletions(-)
>  create mode 100644 drivers/of/of_graph.c
>  create mode 100644 include/linux/of_graph.h

[snip]

> diff --git a/include/media/v4l2-of.h b/include/media/v4l2-of.h
> index 541cea4..404a493 100644
> --- a/include/media/v4l2-of.h
> +++ b/include/media/v4l2-of.h
> @@ -17,6 +17,7 @@
>  #include <linux/list.h>
>  #include <linux/types.h>
>  #include <linux/errno.h>
> +#include <linux/of_graph.h>
>
>  #include <media/v4l2-mediabus.h>
>
> @@ -72,11 +73,6 @@ struct v4l2_of_endpoint {
>  #ifdef CONFIG_OF
>  int v4l2_of_parse_endpoint(const struct device_node *node,
>                            struct v4l2_of_endpoint *endpoint);
> -struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
> -                                       struct device_node *previous);
> -struct device_node *v4l2_of_get_remote_port_parent(
> -                                       const struct device_node *node);
> -struct device_node *v4l2_of_get_remote_port(const struct device_node *node);
>  #else /* CONFIG_OF */
>
>  static inline int v4l2_of_parse_endpoint(const struct device_node *node,
> @@ -85,25 +81,25 @@ static inline int v4l2_of_parse_endpoint(const struct device_node *node,
>         return -ENOSYS;
>  }
>
> +#endif /* CONFIG_OF */
> +
>  static inline struct device_node *v4l2_of_get_next_endpoint(
>                                         const struct device_node *parent,
>                                         struct device_node *previous)
>  {
> -       return NULL;
> +       return of_graph_get_next_endpoint(parent, previous);

Won't this break for !OF?

Rob

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 13:56 ` Rob Herring
@ 2014-02-11 14:52   ` Russell King - ARM Linux
  2014-02-11 15:23     ` Laurent Pinchart
  2014-02-11 16:30     ` Rob Herring
  2014-02-11 15:27   ` Philipp Zabel
  2014-02-17 18:14   ` Grant Likely
  2 siblings, 2 replies; 74+ messages in thread
From: Russell King - ARM Linux @ 2014-02-11 14:52 UTC (permalink / raw)
  To: Rob Herring
  Cc: Philipp Zabel, Mauro Carvalho Chehab, Grant Likely, Rob Herring,
	Sylwester Nawrocki, Laurent Pinchart, Tomi Valkeinen,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel

On Tue, Feb 11, 2014 at 07:56:33AM -0600, Rob Herring wrote:
> On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > This allows to reuse the same parser code from outside the V4L2 framework,
> > most importantly from display drivers. There have been patches that duplicate
> > the code (and I am going to send one of my own), such as
> > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > and others that parse the same binding in a different way:
> > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> >
> > I think that all common video interface parsing helpers should be moved to a
> > single place, outside of the specific subsystems, so that it can be reused
> > by all drivers.
> 
> Perhaps that should be done rather than moving to drivers/of now and
> then again to somewhere else.

Do you have a better suggestion where it should move to?

drivers/gpu/drm - no, because v4l2 wants to use it
drivers/media/video - no, because DRM drivers want to use it
drivers/video - no, because v4l2 and drm drivers want to use it

Maybe drivers/of-graph/ ?  Or maybe it's just as good a place to move it
into drivers/of ?

-- 
FTTC broadband for 0.8mile line: 5.8Mbps down 500kbps up.  Estimation
in database were 13.1 to 19Mbit for a good line, about 7.5+ for a bad.
Estimate before purchase was "up to 13.2Mbit".

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 14:52   ` Russell King - ARM Linux
@ 2014-02-11 15:23     ` Laurent Pinchart
  2014-02-11 16:36       ` Philipp Zabel
  2014-02-11 16:30     ` Rob Herring
  1 sibling, 1 reply; 74+ messages in thread
From: Laurent Pinchart @ 2014-02-11 15:23 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: Rob Herring, Philipp Zabel, Mauro Carvalho Chehab, Grant Likely,
	Rob Herring, Sylwester Nawrocki, Tomi Valkeinen, Kyungmin Park,
	linux-kernel, linux-media, devicetree, Philipp Zabel

Hi Russell,

On Tuesday 11 February 2014 14:52:48 Russell King - ARM Linux wrote:
> On Tue, Feb 11, 2014 at 07:56:33AM -0600, Rob Herring wrote:
> > On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel wrote:
> > > This allows to reuse the same parser code from outside the V4L2
> > > framework, most importantly from display drivers. There have been
> > > patches that duplicate the code (and I am going to send one of my own),
> > > such as
> > > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > > and others that parse the same binding in a different way:
> > > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> > > 
> > > I think that all common video interface parsing helpers should be moved
> > > to a single place, outside of the specific subsystems, so that it can
> > > be reused by all drivers.
> > 
> > Perhaps that should be done rather than moving to drivers/of now and
> > then again to somewhere else.
> 
> Do you have a better suggestion where it should move to?
> 
> drivers/gpu/drm - no, because v4l2 wants to use it
> drivers/media/video - no, because DRM drivers want to use it
> drivers/video - no, because v4l2 and drm drivers want to use it

Just pointing out a missing location (which might be rejected due to similar 
concerns), there's also drivers/media, which isn't V4L-specific.

> Maybe drivers/of-graph/ ?  Or maybe it's just as good a place to move it
> into drivers/of ?

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 13:56 ` Rob Herring
  2014-02-11 14:52   ` Russell King - ARM Linux
@ 2014-02-11 15:27   ` Philipp Zabel
  2014-02-17 18:14   ` Grant Likely
  2 siblings, 0 replies; 74+ messages in thread
From: Philipp Zabel @ 2014-02-11 15:27 UTC (permalink / raw)
  To: Rob Herring
  Cc: Russell King - ARM Linux, Mauro Carvalho Chehab, Grant Likely,
	Rob Herring, Sylwester Nawrocki, Laurent Pinchart,
	Tomi Valkeinen, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Rob,

Am Dienstag, den 11.02.2014, 07:56 -0600 schrieb Rob Herring:
> On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > From: Philipp Zabel <philipp.zabel@gmail.com>
> >
> > This patch moves the parsing helpers used to parse connected graphs
> > in the device tree, like the video interface bindings documented in
> > Documentation/devicetree/bindings/media/video-interfaces.txt, from
> > drivers/media/v4l2-core to drivers/of.
> 
> This is the opposite direction things have been moving...

I understand subsystem specific functionality is moving from drivers/of
into the subsystems. In this case three subsystems all could benefit
from the same set of parsing functions, so it is not clear to me where
if not drivers/of would be the correct place for this code.

> > This allows to reuse the same parser code from outside the V4L2 framework,
> > most importantly from display drivers. There have been patches that duplicate
> > the code (and I am going to send one of my own), such as
> > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > and others that parse the same binding in a different way:
> > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> >
> > I think that all common video interface parsing helpers should be moved to a
> > single place, outside of the specific subsystems, so that it can be reused
> > by all drivers.
> 
> Perhaps that should be done rather than moving to drivers/of now and
> then again to somewhere else.
>
> > I moved v4l2_of_get_next_endpoint, v4l2_of_get_remote_port,
> > and v4l2_of_get_remote_port_parent. They are renamed to
> > of_graph_get_next_endpoint, of_graph_get_remote_port, and
> > of_graph_get_remote_port_parent, respectively.
> >
> > Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> > ---
> >  drivers/media/Kconfig             |   1 +
> >  drivers/media/v4l2-core/v4l2-of.c | 117 ---------------------------------
> >  drivers/of/Kconfig                |   3 +
> >  drivers/of/Makefile               |   1 +
> >  drivers/of/of_graph.c             | 133 ++++++++++++++++++++++++++++++++++++++
> >  include/linux/of_graph.h          |  23 +++++++
> >  include/media/v4l2-of.h           |  16 ++---
> >  7 files changed, 167 insertions(+), 127 deletions(-)
> >  create mode 100644 drivers/of/of_graph.c
> >  create mode 100644 include/linux/of_graph.h
> 
> [snip]
> 
> > diff --git a/include/media/v4l2-of.h b/include/media/v4l2-of.h
> > index 541cea4..404a493 100644
> > --- a/include/media/v4l2-of.h
> > +++ b/include/media/v4l2-of.h
> > @@ -17,6 +17,7 @@
> >  #include <linux/list.h>
> >  #include <linux/types.h>
> >  #include <linux/errno.h>
> > +#include <linux/of_graph.h>
> >
> >  #include <media/v4l2-mediabus.h>
> >
> > @@ -72,11 +73,6 @@ struct v4l2_of_endpoint {
> >  #ifdef CONFIG_OF
> >  int v4l2_of_parse_endpoint(const struct device_node *node,
> >                            struct v4l2_of_endpoint *endpoint);
> > -struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
> > -                                       struct device_node *previous);
> > -struct device_node *v4l2_of_get_remote_port_parent(
> > -                                       const struct device_node *node);
> > -struct device_node *v4l2_of_get_remote_port(const struct device_node *node);
> >  #else /* CONFIG_OF */
> >
> >  static inline int v4l2_of_parse_endpoint(const struct device_node *node,
> > @@ -85,25 +81,25 @@ static inline int v4l2_of_parse_endpoint(const struct device_node *node,
> >         return -ENOSYS;
> >  }
> >
> > +#endif /* CONFIG_OF */
> > +
> >  static inline struct device_node *v4l2_of_get_next_endpoint(
> >                                         const struct device_node *parent,
> >                                         struct device_node *previous)
> >  {
> > -       return NULL;
> > +       return of_graph_get_next_endpoint(parent, previous);
> 
> Won't this break for !OF?

It will. The of_graph_* functions should get their own stubs for that
case.

regards
Philipp


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 14:52   ` Russell King - ARM Linux
  2014-02-11 15:23     ` Laurent Pinchart
@ 2014-02-11 16:30     ` Rob Herring
  1 sibling, 0 replies; 74+ messages in thread
From: Rob Herring @ 2014-02-11 16:30 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: Philipp Zabel, Mauro Carvalho Chehab, Grant Likely, Rob Herring,
	Sylwester Nawrocki, Laurent Pinchart, Tomi Valkeinen,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel

On Tue, Feb 11, 2014 at 8:52 AM, Russell King - ARM Linux
<linux@arm.linux.org.uk> wrote:
> On Tue, Feb 11, 2014 at 07:56:33AM -0600, Rob Herring wrote:
>> On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
>> > This allows to reuse the same parser code from outside the V4L2 framework,
>> > most importantly from display drivers. There have been patches that duplicate
>> > the code (and I am going to send one of my own), such as
>> > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
>> > and others that parse the same binding in a different way:
>> > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
>> >
>> > I think that all common video interface parsing helpers should be moved to a
>> > single place, outside of the specific subsystems, so that it can be reused
>> > by all drivers.
>>
>> Perhaps that should be done rather than moving to drivers/of now and
>> then again to somewhere else.
>
> Do you have a better suggestion where it should move to?

No.

> drivers/gpu/drm - no, because v4l2 wants to use it
> drivers/media/video - no, because DRM drivers want to use it
> drivers/video - no, because v4l2 and drm drivers want to use it

I don't believe it exists currently, so it would need to be created.
Perhaps adding a layer of directory to combine these. This patch alone
is not enough to really justify that, but if there's a lot more shared
code possible then it would be the right direction.

> Maybe drivers/of-graph/ ?  Or maybe it's just as good a place to move it
> into drivers/of ?

I assume you weren't serious, but no for /of-graph. If a better place
can't be found/made, I'll take it.

Rob

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 15:23     ` Laurent Pinchart
@ 2014-02-11 16:36       ` Philipp Zabel
  2014-02-11 17:22         ` Sylwester Nawrocki
  2014-02-11 17:24         ` Laurent Pinchart
  0 siblings, 2 replies; 74+ messages in thread
From: Philipp Zabel @ 2014-02-11 16:36 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Russell King - ARM Linux, Rob Herring, Mauro Carvalho Chehab,
	Grant Likely, Rob Herring, Sylwester Nawrocki, Tomi Valkeinen,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel

Am Dienstag, den 11.02.2014, 16:23 +0100 schrieb Laurent Pinchart:
> Hi Russell,
> 
> On Tuesday 11 February 2014 14:52:48 Russell King - ARM Linux wrote:
> > On Tue, Feb 11, 2014 at 07:56:33AM -0600, Rob Herring wrote:
> > > On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel wrote:
> > > > This allows to reuse the same parser code from outside the V4L2
> > > > framework, most importantly from display drivers. There have been
> > > > patches that duplicate the code (and I am going to send one of my own),
> > > > such as
> > > > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > > > and others that parse the same binding in a different way:
> > > > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> > > > 
> > > > I think that all common video interface parsing helpers should be moved
> > > > to a single place, outside of the specific subsystems, so that it can
> > > > be reused by all drivers.
> > > 
> > > Perhaps that should be done rather than moving to drivers/of now and
> > > then again to somewhere else.
> > 
> > Do you have a better suggestion where it should move to?
> > 
> > drivers/gpu/drm - no, because v4l2 wants to use it
> > drivers/media/video - no, because DRM drivers want to use it
> > drivers/video - no, because v4l2 and drm drivers want to use it
> 
> Just pointing out a missing location (which might be rejected due to similar 
> concerns), there's also drivers/media, which isn't V4L-specific.

Since drivers/Makefile has media/ in obj-y, moving the graph helpers to
drivers/media should technically work.

> > Maybe drivers/of-graph/ ?  Or maybe it's just as good a place to move it
> > into drivers/of ?

include/media/of_graph.h,
drivers/media/of_graph.c?

regards
Philipp


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 16:36       ` Philipp Zabel
@ 2014-02-11 17:22         ` Sylwester Nawrocki
  2014-02-11 17:41           ` Mauro Carvalho Chehab
  2014-02-11 21:46           ` Philipp Zabel
  2014-02-11 17:24         ` Laurent Pinchart
  1 sibling, 2 replies; 74+ messages in thread
From: Sylwester Nawrocki @ 2014-02-11 17:22 UTC (permalink / raw)
  To: Philipp Zabel, Laurent Pinchart
  Cc: Russell King - ARM Linux, Rob Herring, Mauro Carvalho Chehab,
	Grant Likely, Rob Herring, Tomi Valkeinen, Kyungmin Park,
	linux-kernel, linux-media, devicetree, Philipp Zabel,
	Guennadi Liakhovetski

(adding Guennadi to Cc)

On 11/02/14 17:36, Philipp Zabel wrote:
> Am Dienstag, den 11.02.2014, 16:23 +0100 schrieb Laurent Pinchart:
>> Hi Russell,
>>
>> On Tuesday 11 February 2014 14:52:48 Russell King - ARM Linux wrote:
>>> On Tue, Feb 11, 2014 at 07:56:33AM -0600, Rob Herring wrote:
>>>> On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel wrote:
>>>>> This allows to reuse the same parser code from outside the V4L2
>>>>> framework, most importantly from display drivers. There have been
>>>>> patches that duplicate the code (and I am going to send one of my own),
>>>>> such as
>>>>> http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
>>>>> and others that parse the same binding in a different way:
>>>>> https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
>>>>>
>>>>> I think that all common video interface parsing helpers should be moved
>>>>> to a single place, outside of the specific subsystems, so that it can
>>>>> be reused by all drivers.
>>>>
>>>> Perhaps that should be done rather than moving to drivers/of now and
>>>> then again to somewhere else.
>>>
>>> Do you have a better suggestion where it should move to?
>>>
>>> drivers/gpu/drm - no, because v4l2 wants to use it
>>> drivers/media/video - no, because DRM drivers want to use it
>>> drivers/video - no, because v4l2 and drm drivers want to use it
>>
>> Just pointing out a missing location (which might be rejected due to similar 
>> concerns), there's also drivers/media, which isn't V4L-specific.
> 
> Since drivers/Makefile has media/ in obj-y, moving the graph helpers to
> drivers/media should technically work.
> 
>>> Maybe drivers/of-graph/ ?  Or maybe it's just as good a place to move it
>>> into drivers/of ?
> 
> include/media/of_graph.h,
> drivers/media/of_graph.c?

drivers/media sounds like a good alternative to me.

I would just remove also v4l2_of_{parse/get}* and update the users
to call of_graph_* directly, there should not be many of them.

--
Thanks,
Sylwester


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 16:36       ` Philipp Zabel
  2014-02-11 17:22         ` Sylwester Nawrocki
@ 2014-02-11 17:24         ` Laurent Pinchart
  1 sibling, 0 replies; 74+ messages in thread
From: Laurent Pinchart @ 2014-02-11 17:24 UTC (permalink / raw)
  To: Philipp Zabel
  Cc: Russell King - ARM Linux, Rob Herring, Mauro Carvalho Chehab,
	Grant Likely, Rob Herring, Sylwester Nawrocki, Tomi Valkeinen,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel

Hi Philipp,

On Tuesday 11 February 2014 17:36:57 Philipp Zabel wrote:
> Am Dienstag, den 11.02.2014, 16:23 +0100 schrieb Laurent Pinchart:
> > On Tuesday 11 February 2014 14:52:48 Russell King - ARM Linux wrote:
> > > On Tue, Feb 11, 2014 at 07:56:33AM -0600, Rob Herring wrote:
> > > > On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel wrote:
> > > > > This allows to reuse the same parser code from outside the V4L2
> > > > > framework, most importantly from display drivers. There have been
> > > > > patches that duplicate the code (and I am going to send one of my
> > > > > own),
> > > > > such as
> > > > > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.h
> > > > > tml
> > > > > and others that parse the same binding in a different way:
> > > > > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.ht
> > > > > ml
> > > > > 
> > > > > I think that all common video interface parsing helpers should be
> > > > > moved to a single place, outside of the specific subsystems, so that
> > > > > it can be reused by all drivers.
> > > > 
> > > > Perhaps that should be done rather than moving to drivers/of now and
> > > > then again to somewhere else.
> > > 
> > > Do you have a better suggestion where it should move to?
> > > 
> > > drivers/gpu/drm - no, because v4l2 wants to use it
> > > drivers/media/video - no, because DRM drivers want to use it
> > > drivers/video - no, because v4l2 and drm drivers want to use it
> > 
> > Just pointing out a missing location (which might be rejected due to
> > similar concerns), there's also drivers/media, which isn't V4L-specific.
> 
> Since drivers/Makefile has media/ in obj-y, moving the graph helpers to
> drivers/media should technically work.
> 
> > > Maybe drivers/of-graph/ ?  Or maybe it's just as good a place to move it
> > > into drivers/of ?
> 
> include/media/of_graph.h,
> drivers/media/of_graph.c?

I'm personally fine with that.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 17:22         ` Sylwester Nawrocki
@ 2014-02-11 17:41           ` Mauro Carvalho Chehab
  2014-02-11 21:00             ` Guennadi Liakhovetski
  2014-02-11 21:46           ` Philipp Zabel
  1 sibling, 1 reply; 74+ messages in thread
From: Mauro Carvalho Chehab @ 2014-02-11 17:41 UTC (permalink / raw)
  To: Sylwester Nawrocki
  Cc: Philipp Zabel, Laurent Pinchart, Russell King - ARM Linux,
	Rob Herring, Grant Likely, Rob Herring, Tomi Valkeinen,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel, Guennadi Liakhovetski

Em Tue, 11 Feb 2014 18:22:34 +0100
Sylwester Nawrocki <s.nawrocki@samsung.com> escreveu:

> (adding Guennadi to Cc)
> 
> On 11/02/14 17:36, Philipp Zabel wrote:
> > Am Dienstag, den 11.02.2014, 16:23 +0100 schrieb Laurent Pinchart:
> >> Hi Russell,
> >>
> >> On Tuesday 11 February 2014 14:52:48 Russell King - ARM Linux wrote:
> >>> On Tue, Feb 11, 2014 at 07:56:33AM -0600, Rob Herring wrote:
> >>>> On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel wrote:
> >>>>> This allows to reuse the same parser code from outside the V4L2
> >>>>> framework, most importantly from display drivers. There have been
> >>>>> patches that duplicate the code (and I am going to send one of my own),
> >>>>> such as
> >>>>> http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> >>>>> and others that parse the same binding in a different way:
> >>>>> https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> >>>>>
> >>>>> I think that all common video interface parsing helpers should be moved
> >>>>> to a single place, outside of the specific subsystems, so that it can
> >>>>> be reused by all drivers.
> >>>>
> >>>> Perhaps that should be done rather than moving to drivers/of now and
> >>>> then again to somewhere else.
> >>>
> >>> Do you have a better suggestion where it should move to?
> >>>
> >>> drivers/gpu/drm - no, because v4l2 wants to use it
> >>> drivers/media/video - no, because DRM drivers want to use it
> >>> drivers/video - no, because v4l2 and drm drivers want to use it
> >>
> >> Just pointing out a missing location (which might be rejected due to similar 
> >> concerns), there's also drivers/media, which isn't V4L-specific.
> > 
> > Since drivers/Makefile has media/ in obj-y, moving the graph helpers to
> > drivers/media should technically work.
> > 
> >>> Maybe drivers/of-graph/ ?  Or maybe it's just as good a place to move it
> >>> into drivers/of ?
> > 
> > include/media/of_graph.h,
> > drivers/media/of_graph.c?
> 
> drivers/media sounds like a good alternative to me.

>From my side, I'm ok with putting them at drivers/media. You may add my acked-by
for such change:

Acked-by: Mauro Carvalho Chehab <m.chehab@samsung.com>

> I would just remove also v4l2_of_{parse/get}* and update the users
> to call of_graph_* directly, there should not be many of them.
> 
> --
> Thanks,
> Sylwester
> 


-- 

Cheers,
Mauro

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 17:41           ` Mauro Carvalho Chehab
@ 2014-02-11 21:00             ` Guennadi Liakhovetski
  0 siblings, 0 replies; 74+ messages in thread
From: Guennadi Liakhovetski @ 2014-02-11 21:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab
  Cc: Sylwester Nawrocki, Philipp Zabel, Laurent Pinchart,
	Russell King - ARM Linux, Rob Herring, Grant Likely, Rob Herring,
	Tomi Valkeinen, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Wed, 12 Feb 2014, Mauro Carvalho Chehab wrote:

> Em Tue, 11 Feb 2014 18:22:34 +0100
> Sylwester Nawrocki <s.nawrocki@samsung.com> escreveu:
> 
> > (adding Guennadi to Cc)
> > 
> > On 11/02/14 17:36, Philipp Zabel wrote:
> > > Am Dienstag, den 11.02.2014, 16:23 +0100 schrieb Laurent Pinchart:
> > >> Hi Russell,
> > >>
> > >> On Tuesday 11 February 2014 14:52:48 Russell King - ARM Linux wrote:
> > >>> On Tue, Feb 11, 2014 at 07:56:33AM -0600, Rob Herring wrote:
> > >>>> On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel wrote:
> > >>>>> This allows to reuse the same parser code from outside the V4L2
> > >>>>> framework, most importantly from display drivers. There have been
> > >>>>> patches that duplicate the code (and I am going to send one of my own),
> > >>>>> such as
> > >>>>> http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > >>>>> and others that parse the same binding in a different way:
> > >>>>> https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> > >>>>>
> > >>>>> I think that all common video interface parsing helpers should be moved
> > >>>>> to a single place, outside of the specific subsystems, so that it can
> > >>>>> be reused by all drivers.
> > >>>>
> > >>>> Perhaps that should be done rather than moving to drivers/of now and
> > >>>> then again to somewhere else.
> > >>>
> > >>> Do you have a better suggestion where it should move to?
> > >>>
> > >>> drivers/gpu/drm - no, because v4l2 wants to use it
> > >>> drivers/media/video - no, because DRM drivers want to use it
> > >>> drivers/video - no, because v4l2 and drm drivers want to use it
> > >>
> > >> Just pointing out a missing location (which might be rejected due to similar 
> > >> concerns), there's also drivers/media, which isn't V4L-specific.
> > > 
> > > Since drivers/Makefile has media/ in obj-y, moving the graph helpers to
> > > drivers/media should technically work.
> > > 
> > >>> Maybe drivers/of-graph/ ?  Or maybe it's just as good a place to move it
> > >>> into drivers/of ?
> > > 
> > > include/media/of_graph.h,
> > > drivers/media/of_graph.c?
> > 
> > drivers/media sounds like a good alternative to me.
> 
> From my side, I'm ok with putting them at drivers/media. You may add my acked-by
> for such change:
> 
> Acked-by: Mauro Carvalho Chehab <m.chehab@samsung.com>

Cannot see any problems with this

Acked-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>

Thanks
Guennadi

> > I would just remove also v4l2_of_{parse/get}* and update the users
> > to call of_graph_* directly, there should not be many of them.
> > 
> > --
> > Thanks,
> > Sylwester
> 
> -- 
> 
> Cheers,
> Mauro
> 

---
Guennadi Liakhovetski, Ph.D.
Freelance Open-Source Software Developer
http://www.open-technology.de/

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 17:22         ` Sylwester Nawrocki
  2014-02-11 17:41           ` Mauro Carvalho Chehab
@ 2014-02-11 21:46           ` Philipp Zabel
  1 sibling, 0 replies; 74+ messages in thread
From: Philipp Zabel @ 2014-02-11 21:46 UTC (permalink / raw)
  To: Sylwester Nawrocki
  Cc: Philipp Zabel, Laurent Pinchart, Russell King - ARM Linux,
	Rob Herring, Mauro Carvalho Chehab, Grant Likely, Rob Herring,
	Tomi Valkeinen, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel, Guennadi Liakhovetski

Hi Sylwester,

On Tue, Feb 11, 2014 at 06:22:34PM +0100, Sylwester Nawrocki wrote:
> drivers/media sounds like a good alternative to me.
> 
> I would just remove also v4l2_of_{parse/get}* and update the users
> to call of_graph_* directly, there should not be many of them.

For now I'd like to skip v4l2_of_parse_endpoint. The others can just be
copied verbatim, but this one also depends on struct 4l2_of_endpoint,
struct v4l2_of_bus_*, and <media/v4l2-mediabus.h>.

regards
Philipp

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-11 13:56 ` Rob Herring
  2014-02-11 14:52   ` Russell King - ARM Linux
  2014-02-11 15:27   ` Philipp Zabel
@ 2014-02-17 18:14   ` Grant Likely
  2014-02-18  7:06     ` Sascha Hauer
                       ` (2 more replies)
  2 siblings, 3 replies; 74+ messages in thread
From: Grant Likely @ 2014-02-17 18:14 UTC (permalink / raw)
  To: Rob Herring, Philipp Zabel
  Cc: Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Laurent Pinchart, Tomi Valkeinen,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel

On Tue, 11 Feb 2014 07:56:33 -0600, Rob Herring <robherring2@gmail.com> wrote:
> On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > From: Philipp Zabel <philipp.zabel@gmail.com>
> >
> > This patch moves the parsing helpers used to parse connected graphs
> > in the device tree, like the video interface bindings documented in
> > Documentation/devicetree/bindings/media/video-interfaces.txt, from
> > drivers/media/v4l2-core to drivers/of.
> 
> This is the opposite direction things have been moving...
> 
> > This allows to reuse the same parser code from outside the V4L2 framework,
> > most importantly from display drivers. There have been patches that duplicate
> > the code (and I am going to send one of my own), such as
> > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > and others that parse the same binding in a different way:
> > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> >
> > I think that all common video interface parsing helpers should be moved to a
> > single place, outside of the specific subsystems, so that it can be reused
> > by all drivers.
> 
> Perhaps that should be done rather than moving to drivers/of now and
> then again to somewhere else.

This is just parsing helpers though, isn't it? I have no problem pulling
helper functions into drivers/of if they are usable by multiple
subsystems. I don't really understand the model being used though. I
would appreciate a description of the usage model for these functions
for poor folks like me who can't keep track of what is going on in
subsystems.

g.

> 
> > I moved v4l2_of_get_next_endpoint, v4l2_of_get_remote_port,
> > and v4l2_of_get_remote_port_parent. They are renamed to
> > of_graph_get_next_endpoint, of_graph_get_remote_port, and
> > of_graph_get_remote_port_parent, respectively.
> >
> > Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> > ---
> >  drivers/media/Kconfig             |   1 +
> >  drivers/media/v4l2-core/v4l2-of.c | 117 ---------------------------------
> >  drivers/of/Kconfig                |   3 +
> >  drivers/of/Makefile               |   1 +
> >  drivers/of/of_graph.c             | 133 ++++++++++++++++++++++++++++++++++++++
> >  include/linux/of_graph.h          |  23 +++++++
> >  include/media/v4l2-of.h           |  16 ++---
> >  7 files changed, 167 insertions(+), 127 deletions(-)
> >  create mode 100644 drivers/of/of_graph.c
> >  create mode 100644 include/linux/of_graph.h
> 
> [snip]
> 
> > diff --git a/include/media/v4l2-of.h b/include/media/v4l2-of.h
> > index 541cea4..404a493 100644
> > --- a/include/media/v4l2-of.h
> > +++ b/include/media/v4l2-of.h
> > @@ -17,6 +17,7 @@
> >  #include <linux/list.h>
> >  #include <linux/types.h>
> >  #include <linux/errno.h>
> > +#include <linux/of_graph.h>
> >
> >  #include <media/v4l2-mediabus.h>
> >
> > @@ -72,11 +73,6 @@ struct v4l2_of_endpoint {
> >  #ifdef CONFIG_OF
> >  int v4l2_of_parse_endpoint(const struct device_node *node,
> >                            struct v4l2_of_endpoint *endpoint);
> > -struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
> > -                                       struct device_node *previous);
> > -struct device_node *v4l2_of_get_remote_port_parent(
> > -                                       const struct device_node *node);
> > -struct device_node *v4l2_of_get_remote_port(const struct device_node *node);
> >  #else /* CONFIG_OF */
> >
> >  static inline int v4l2_of_parse_endpoint(const struct device_node *node,
> > @@ -85,25 +81,25 @@ static inline int v4l2_of_parse_endpoint(const struct device_node *node,
> >         return -ENOSYS;
> >  }
> >
> > +#endif /* CONFIG_OF */
> > +
> >  static inline struct device_node *v4l2_of_get_next_endpoint(
> >                                         const struct device_node *parent,
> >                                         struct device_node *previous)
> >  {
> > -       return NULL;
> > +       return of_graph_get_next_endpoint(parent, previous);
> 
> Won't this break for !OF?
> 
> Rob


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-17 18:14   ` Grant Likely
@ 2014-02-18  7:06     ` Sascha Hauer
  2014-02-18 16:26       ` Grant Likely
  2014-02-18 10:43     ` Sylwester Nawrocki
  2014-02-18 13:41     ` Philipp Zabel
  2 siblings, 1 reply; 74+ messages in thread
From: Sascha Hauer @ 2014-02-18  7:06 UTC (permalink / raw)
  To: Grant Likely
  Cc: Rob Herring, Philipp Zabel, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Tomi Valkeinen, Kyungmin Park, linux-kernel,
	linux-media, devicetree, Philipp Zabel

Hi Grant,

On Mon, Feb 17, 2014 at 06:14:51PM +0000, Grant Likely wrote:
> On Tue, 11 Feb 2014 07:56:33 -0600, Rob Herring <robherring2@gmail.com> wrote:
> > On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > > From: Philipp Zabel <philipp.zabel@gmail.com>
> > >
> > > This patch moves the parsing helpers used to parse connected graphs
> > > in the device tree, like the video interface bindings documented in
> > > Documentation/devicetree/bindings/media/video-interfaces.txt, from
> > > drivers/media/v4l2-core to drivers/of.
> > 
> > This is the opposite direction things have been moving...
> > 
> > > This allows to reuse the same parser code from outside the V4L2 framework,
> > > most importantly from display drivers. There have been patches that duplicate
> > > the code (and I am going to send one of my own), such as
> > > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > > and others that parse the same binding in a different way:
> > > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> > >
> > > I think that all common video interface parsing helpers should be moved to a
> > > single place, outside of the specific subsystems, so that it can be reused
> > > by all drivers.
> > 
> > Perhaps that should be done rather than moving to drivers/of now and
> > then again to somewhere else.
> 
> This is just parsing helpers though, isn't it? I have no problem pulling
> helper functions into drivers/of if they are usable by multiple
> subsystems. I don't really understand the model being used though. I
> would appreciate a description of the usage model for these functions
> for poor folks like me who can't keep track of what is going on in
> subsystems.

You can find it under Documentation/devicetree/bindings/media/video-interfaces.txt

Sascha

-- 
Pengutronix e.K.                           |                             |
Industrial Linux Solutions                 | http://www.pengutronix.de/  |
Peiner Str. 6-8, 31137 Hildesheim, Germany | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-17 18:14   ` Grant Likely
  2014-02-18  7:06     ` Sascha Hauer
@ 2014-02-18 10:43     ` Sylwester Nawrocki
  2014-02-18 13:41     ` Philipp Zabel
  2 siblings, 0 replies; 74+ messages in thread
From: Sylwester Nawrocki @ 2014-02-18 10:43 UTC (permalink / raw)
  To: Grant Likely
  Cc: Rob Herring, Philipp Zabel, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Laurent Pinchart,
	Tomi Valkeinen, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On 17/02/14 19:14, Grant Likely wrote:
> On Tue, 11 Feb 2014 07:56:33 -0600, Rob Herring <robherring2@gmail.com> wrote:
>> > On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
>>> > > From: Philipp Zabel <philipp.zabel@gmail.com>
>>> > >
>>> > > This patch moves the parsing helpers used to parse connected graphs
>>> > > in the device tree, like the video interface bindings documented in
>>> > > Documentation/devicetree/bindings/media/video-interfaces.txt, from
>>> > > drivers/media/v4l2-core to drivers/of.
>> > 
>> > This is the opposite direction things have been moving...
>> > 
>>> > > This allows to reuse the same parser code from outside the V4L2 framework,
>>> > > most importantly from display drivers. There have been patches that duplicate
>>> > > the code (and I am going to send one of my own), such as
>>> > > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
>>> > > and others that parse the same binding in a different way:
>>> > > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
>>> > >
>>> > > I think that all common video interface parsing helpers should be moved to a
>>> > > single place, outside of the specific subsystems, so that it can be reused
>>> > > by all drivers.
>> > 
>> > Perhaps that should be done rather than moving to drivers/of now and
>> > then again to somewhere else.
>
> This is just parsing helpers though, isn't it? I have no problem pulling
> helper functions into drivers/of if they are usable by multiple
> subsystems. I don't really understand the model being used though. I
> would appreciate a description of the usage model for these functions
> for poor folks like me who can't keep track of what is going on in
> subsystems.

Yes, this is (mostly) just parsing helpers to walk graph of connected (sub-)
devices. Originally I though about adding this code to drivers/of/of_video.c, 
nevertheless it ended up in drivers/media/vl2-core/v4l2-of.c. However those
helpers, likely only after some improvements/enhancements, could be used 
in other subsystems like drivers/video or drivers/gpu/drm, wherever a whole
device consists of multiple connected sub-devices. 

These helpers are supposed to be used (generically) to walk a graph of 
sub-devices, e.g. to find a remote sub-device (e.g. a data transmitter) 
to some sub-device (e.g. a data receiver) in order to configure it for 
data transmission.

This parsing helpers code could be somehow related to rmk's componentized 
device handling [1], in a sense it is supposed to be similarly generic.

I think having those helpers in a common location and shared by subsystems
could be useful in terms of consistent DT bindings for same devices (e.g.
displays) handled currently by multiple kernel subsystems, e.g. DRM, FB, 
V4L2.

Of course there might be still some work needed so these helpers are usable
for all (e.g. simplifying corresponding DT binding to have less bloated
description for very simple devices - this has been on my todo list), but 
it would be really nice to first agree to the common location. 


[1] https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/drivers/base/component.c?id=2a41e6070dd7ef539d0f3b1652b4839d04378e11
 
--
Thanks,
Sylwester

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-17 18:14   ` Grant Likely
  2014-02-18  7:06     ` Sascha Hauer
  2014-02-18 10:43     ` Sylwester Nawrocki
@ 2014-02-18 13:41     ` Philipp Zabel
  2 siblings, 0 replies; 74+ messages in thread
From: Philipp Zabel @ 2014-02-18 13:41 UTC (permalink / raw)
  To: Grant Likely
  Cc: Rob Herring, Russell King - ARM Linux, Mauro Carvalho Chehab,
	Rob Herring, Sylwester Nawrocki, Laurent Pinchart,
	Tomi Valkeinen, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Grant,

Am Montag, den 17.02.2014, 18:14 +0000 schrieb Grant Likely:
> On Tue, 11 Feb 2014 07:56:33 -0600, Rob Herring <robherring2@gmail.com> wrote:
> > On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > > From: Philipp Zabel <philipp.zabel@gmail.com>
> > >
> > > This patch moves the parsing helpers used to parse connected graphs
> > > in the device tree, like the video interface bindings documented in
> > > Documentation/devicetree/bindings/media/video-interfaces.txt, from
> > > drivers/media/v4l2-core to drivers/of.
> > 
> > This is the opposite direction things have been moving...
> > 
> > > This allows to reuse the same parser code from outside the V4L2 framework,
> > > most importantly from display drivers. There have been patches that duplicate
> > > the code (and I am going to send one of my own), such as
> > > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > > and others that parse the same binding in a different way:
> > > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> > >
> > > I think that all common video interface parsing helpers should be moved to a
> > > single place, outside of the specific subsystems, so that it can be reused
> > > by all drivers.
> > 
> > Perhaps that should be done rather than moving to drivers/of now and
> > then again to somewhere else.
> 
> This is just parsing helpers though, isn't it? I have no problem pulling
> helper functions into drivers/of if they are usable by multiple
> subsystems. I don't really understand the model being used though. I
> would appreciate a description of the usage model for these functions
> for poor folks like me who can't keep track of what is going on in
> subsystems.

I have taken the liberty to put you on Cc: for the i.MX drm series that
I'd like to use these helpers in. The patch in question is
"[RFC PATCH v3 3/9] staging: imx-drm-core: Use OF graph to find
components and connections between encoder and crtcs"
(http://www.spinics.net/lists/arm-kernel/msg308542.html).
It currently uses local copies (s/of_graph/imx_drm_of/) of the
get_next_endpoint, get_remote_port, and get_remote_port_parent
functions to obtain all necessary components for the componentized
imx-drm device, and to map the connections between crtcs (display
interface ports) and encoders.

regards
Philipp


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-18  7:06     ` Sascha Hauer
@ 2014-02-18 16:26       ` Grant Likely
       [not found]         ` < 1393263389.3091.82.camel@pizza.hi.pengutronix.de>
  2014-02-24 17:36         ` Philipp Zabel
  0 siblings, 2 replies; 74+ messages in thread
From: Grant Likely @ 2014-02-18 16:26 UTC (permalink / raw)
  To: Sascha Hauer
  Cc: Rob Herring, Philipp Zabel, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Tomi Valkeinen, Kyungmin Park, linux-kernel,
	linux-media, devicetree, Philipp Zabel

On Tue, 18 Feb 2014 08:06:24 +0100, Sascha Hauer <s.hauer@pengutronix.de> wrote:
> Hi Grant,
> 
> On Mon, Feb 17, 2014 at 06:14:51PM +0000, Grant Likely wrote:
> > On Tue, 11 Feb 2014 07:56:33 -0600, Rob Herring <robherring2@gmail.com> wrote:
> > > On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > > > From: Philipp Zabel <philipp.zabel@gmail.com>
> > > >
> > > > This patch moves the parsing helpers used to parse connected graphs
> > > > in the device tree, like the video interface bindings documented in
> > > > Documentation/devicetree/bindings/media/video-interfaces.txt, from
> > > > drivers/media/v4l2-core to drivers/of.
> > > 
> > > This is the opposite direction things have been moving...
> > > 
> > > > This allows to reuse the same parser code from outside the V4L2 framework,
> > > > most importantly from display drivers. There have been patches that duplicate
> > > > the code (and I am going to send one of my own), such as
> > > > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > > > and others that parse the same binding in a different way:
> > > > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> > > >
> > > > I think that all common video interface parsing helpers should be moved to a
> > > > single place, outside of the specific subsystems, so that it can be reused
> > > > by all drivers.
> > > 
> > > Perhaps that should be done rather than moving to drivers/of now and
> > > then again to somewhere else.
> > 
> > This is just parsing helpers though, isn't it? I have no problem pulling
> > helper functions into drivers/of if they are usable by multiple
> > subsystems. I don't really understand the model being used though. I
> > would appreciate a description of the usage model for these functions
> > for poor folks like me who can't keep track of what is going on in
> > subsystems.
> 
> You can find it under Documentation/devicetree/bindings/media/video-interfaces.txt

Okay, I think I'm okay with moving the helpers, but I will make one
requirement. I would like to have a short binding document describing
the pattern being used. The document above talks a lot about video
specific issues, but the helpers appear to be specifically about the
tree walking properties of the API.

g.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-18 16:26       ` Grant Likely
       [not found]         ` < 1393263389.3091.82.camel@pizza.hi.pengutronix.de>
@ 2014-02-24 17:36         ` Philipp Zabel
  2014-02-26 11:01           ` Grant Likely
  1 sibling, 1 reply; 74+ messages in thread
From: Philipp Zabel @ 2014-02-24 17:36 UTC (permalink / raw)
  To: Grant Likely
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Tomi Valkeinen, Kyungmin Park, linux-kernel,
	linux-media, devicetree, Philipp Zabel

Am Dienstag, den 18.02.2014, 16:26 +0000 schrieb Grant Likely:
> On Tue, 18 Feb 2014 08:06:24 +0100, Sascha Hauer <s.hauer@pengutronix.de> wrote:
> > Hi Grant,
> > 
> > On Mon, Feb 17, 2014 at 06:14:51PM +0000, Grant Likely wrote:
> > > On Tue, 11 Feb 2014 07:56:33 -0600, Rob Herring <robherring2@gmail.com> wrote:
> > > > On Tue, Feb 11, 2014 at 5:45 AM, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > > > > From: Philipp Zabel <philipp.zabel@gmail.com>
> > > > >
> > > > > This patch moves the parsing helpers used to parse connected graphs
> > > > > in the device tree, like the video interface bindings documented in
> > > > > Documentation/devicetree/bindings/media/video-interfaces.txt, from
> > > > > drivers/media/v4l2-core to drivers/of.
> > > > 
> > > > This is the opposite direction things have been moving...
> > > > 
> > > > > This allows to reuse the same parser code from outside the V4L2 framework,
> > > > > most importantly from display drivers. There have been patches that duplicate
> > > > > the code (and I am going to send one of my own), such as
> > > > > http://lists.freedesktop.org/archives/dri-devel/2013-August/043308.html
> > > > > and others that parse the same binding in a different way:
> > > > > https://www.mail-archive.com/linux-omap@vger.kernel.org/msg100761.html
> > > > >
> > > > > I think that all common video interface parsing helpers should be moved to a
> > > > > single place, outside of the specific subsystems, so that it can be reused
> > > > > by all drivers.
> > > > 
> > > > Perhaps that should be done rather than moving to drivers/of now and
> > > > then again to somewhere else.
> > > 
> > > This is just parsing helpers though, isn't it? I have no problem pulling
> > > helper functions into drivers/of if they are usable by multiple
> > > subsystems. I don't really understand the model being used though. I
> > > would appreciate a description of the usage model for these functions
> > > for poor folks like me who can't keep track of what is going on in
> > > subsystems.
> > 
> > You can find it under Documentation/devicetree/bindings/media/video-interfaces.txt
> 
> Okay, I think I'm okay with moving the helpers, but I will make one
> requirement. I would like to have a short binding document describing
> the pattern being used. The document above talks a lot about video
> specific issues, but the helpers appear to be specifically about the
> tree walking properties of the API.

Reusing the non-video-secific parts of video-interfaces.txt, how about
the following:

"Common bindings for device graphs"

General concept
---------------

The hierarchical organisation of the device tree is well suited to describe
control flow to devices, but data flow between devices that work together to
form a logical compound device can follow arbitrarily complex graphs.
The device tree graph bindings allow to describe data bus connections between
individual devices, that can't be inferred from device tree parent-child
relationships. The common bindings do not contain any information about the
direction or type of data flow, they just map connections. Specific properties
of the connections can be set depending on the type of connection. To see
how this binding applies to video pipelines, see for example
Documentation/device-tree/bindings/media/video-interfaces.txt.

Data interfaces on devices are described by their child 'port' nodes. The port
node contains an 'endpoint' subnode for each remote device connected to this
port via a bus. If a port is connected to more than one remote device on the
same bus, an 'endpoint' child node must be provided for each of them. If more
than one port is present in a device node or there is more than one endpoint at
a port, or port node needs to be associated with a selected hardware interface,
a common scheme using '#address-cells', '#size-cells' and 'reg' properties is
used.

device {
	...
	#address-cells = <1>;
	#size-cells = <0>;

	port@0 {
		...
		endpoint@0 { ... };
		endpoint@1 { ... };
	};

	port@1 { ... };
};

All 'port' nodes can be grouped under optional 'ports' node, which allows to
specify #address-cells, #size-cells properties independently for the 'port'
and 'endpoint' nodes and any child device nodes a device might have.

device {
	...
	ports {
		#address-cells = <1>;
		#size-cells = <0>;

		port@0 {
			...
			endpoint@0 { ... };
			endpoint@1 { ... };
		};

		port@1 { ... };
	};
};

Each endpoint can contain a 'remote-endpoint' phandle property that points to
the corresponding endpoint in the port of the remote device. Two 'endpoint'
nodes are linked with each other through their 'remote-endpoint' phandles.

device_1 {
	port {
		device_1_output: endpoint {
			remote-endpoint = <&device_2_input>;
		};
	};
};

device_1 {
	port {
		device_2_input: endpoint {
			remote-endpoint = <&device_1_output>;
		};
	};
};


Required properties
-------------------

If there is more than one 'port' or more than one 'endpoint' node or 'reg'
property is present in port and/or endpoint nodes the following properties
are required in a relevant parent node:

 - #address-cells : number of cells required to define port/endpoint
		    identifier, should be 1.
 - #size-cells    : should be zero.

Optional endpoint properties
----------------------------

- remote-endpoint: phandle to an 'endpoint' subnode of a remote device node.



^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-24 17:36         ` Philipp Zabel
@ 2014-02-26 11:01           ` Grant Likely
  2014-02-26 14:48             ` Philipp Zabel
  0 siblings, 1 reply; 74+ messages in thread
From: Grant Likely @ 2014-02-26 11:01 UTC (permalink / raw)
  To: Philipp Zabel
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Tomi Valkeinen, Kyungmin Park, linux-kernel,
	linux-media, devicetree, Philipp Zabel

On Mon, 24 Feb 2014 18:36:29 +0100, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> Am Dienstag, den 18.02.2014, 16:26 +0000 schrieb Grant Likely:
> > > 
> > > You can find it under Documentation/devicetree/bindings/media/video-interfaces.txt
> > 
> > Okay, I think I'm okay with moving the helpers, but I will make one
> > requirement. I would like to have a short binding document describing
> > the pattern being used. The document above talks a lot about video
> > specific issues, but the helpers appear to be specifically about the
> > tree walking properties of the API.
> 
> Reusing the non-video-secific parts of video-interfaces.txt, how about
> the following:

This is good, but I have some comments. This document describes itself
as the common way for doing a device graph within the device tree, but
there is already a well established pattern for device graphs that is
used by the interrupts-extended, clocks and other bindings. Those are
all domain-specific bindings, but the core concept is one device uses
a resource provided by another device. The resource references construct
a graph independent from the natural FDT node graph. (ie. the interrupts
binding forms the interrupt graph. Same for the clock binding).

So, while this binding does describe a pattern for separate device
graphs, it is by no means the only common way of doing so.

I would like the document to acknowledge the difference from the
phandle+args pattern used elsewhere and a description of when it would
be appropriate to use this instead of a simpler binding.

> 
> "Common bindings for device graphs"
> 
> General concept
> ---------------
> 
> The hierarchical organisation of the device tree is well suited to describe
> control flow to devices, but data flow between devices that work together to
> form a logical compound device can follow arbitrarily complex graphs.

I would argue that this pattern isn't necessarily restricted to data
flow descriptions. It wants to describe linkage between devices that are
sufficiently complex that the simple binding doesn't do the job.

> The device tree graph bindings allow to describe data bus connections between
> individual devices, that can't be inferred from device tree parent-child
> relationships. The common bindings do not contain any information about the
> direction or type of data flow, they just map connections. Specific properties
> of the connections can be set depending on the type of connection. To see
> how this binding applies to video pipelines, see for example
> Documentation/device-tree/bindings/media/video-interfaces.txt.

Even if you don't want to declare the direction of data flow, there does
need to be some guidance as to how the binding is constructed. Does
device A point to device B? Or the other way around? Why would someone
choose one over the other? I don't want to see a situation where A & B
point to each other. Things get complex if the graph is allowed to be
cyclical.

> Data interfaces on devices are described by their child 'port' nodes. The port
> node contains an 'endpoint' subnode for each remote device connected to this
> port via a bus. If a port is connected to more than one remote device on the
> same bus, an 'endpoint' child node must be provided for each of them. If more
> than one port is present in a device node or there is more than one endpoint at
> a port, or port node needs to be associated with a selected hardware interface,
> a common scheme using '#address-cells', '#size-cells' and 'reg' properties is
> used.
> 
> device {
> 	...
> 	#address-cells = <1>;
> 	#size-cells = <0>;
> 
> 	port@0 {
> 		...
> 		endpoint@0 { ... };
> 		endpoint@1 { ... };
> 	};
> 
> 	port@1 { ... };
> };
> 
> All 'port' nodes can be grouped under optional 'ports' node, which allows to
> specify #address-cells, #size-cells properties independently for the 'port'
> and 'endpoint' nodes and any child device nodes a device might have.
> 
> device {
> 	...
> 	ports {
> 		#address-cells = <1>;
> 		#size-cells = <0>;
> 
> 		port@0 {
> 			...
> 			endpoint@0 { ... };
> 			endpoint@1 { ... };
> 		};
> 
> 		port@1 { ... };
> 	};
> };
> 
> Each endpoint can contain a 'remote-endpoint' phandle property that points to
> the corresponding endpoint in the port of the remote device. Two 'endpoint'
> nodes are linked with each other through their 'remote-endpoint' phandles.

I really don't like this aspect. It is far too easy to get wrong. Graphs
should be one direction only.

> 
> device_1 {
> 	port {
> 		device_1_output: endpoint {
> 			remote-endpoint = <&device_2_input>;
> 		};
> 	};
> };
> 
> device_1 {
> 	port {
> 		device_2_input: endpoint {
> 			remote-endpoint = <&device_1_output>;
> 		};
> 	};
> };
> 
> 
> Required properties
> -------------------
> 
> If there is more than one 'port' or more than one 'endpoint' node or 'reg'
> property is present in port and/or endpoint nodes the following properties
> are required in a relevant parent node:
> 
>  - #address-cells : number of cells required to define port/endpoint
> 		    identifier, should be 1.
>  - #size-cells    : should be zero.
> 
> Optional endpoint properties
> ----------------------------
> 
> - remote-endpoint: phandle to an 'endpoint' subnode of a remote device node.
> 
> 


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-26 11:01           ` Grant Likely
@ 2014-02-26 14:48             ` Philipp Zabel
  2014-02-27  8:36               ` Tomi Valkeinen
  2014-03-07 17:05               ` Grant Likely
  0 siblings, 2 replies; 74+ messages in thread
From: Philipp Zabel @ 2014-02-26 14:48 UTC (permalink / raw)
  To: Grant Likely
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Tomi Valkeinen, Kyungmin Park, linux-kernel,
	linux-media, devicetree, Philipp Zabel

Hi Grant,

thank you for the comments.

Am Mittwoch, den 26.02.2014, 11:01 +0000 schrieb Grant Likely:
> On Mon, 24 Feb 2014 18:36:29 +0100, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > Am Dienstag, den 18.02.2014, 16:26 +0000 schrieb Grant Likely:
> > > > 
> > > > You can find it under Documentation/devicetree/bindings/media/video-interfaces.txt
> > > 
> > > Okay, I think I'm okay with moving the helpers, but I will make one
> > > requirement. I would like to have a short binding document describing
> > > the pattern being used. The document above talks a lot about video
> > > specific issues, but the helpers appear to be specifically about the
> > > tree walking properties of the API.
> > 
> > Reusing the non-video-secific parts of video-interfaces.txt, how about
> > the following:
> 
> This is good, but I have some comments. This document describes itself
> as the common way for doing a device graph within the device tree, but
> there is already a well established pattern for device graphs that is
> used by the interrupts-extended, clocks and other bindings. Those are
> all domain-specific bindings, but the core concept is one device uses
> a resource provided by another device. The resource references construct
> a graph independent from the natural FDT node graph. (ie. the interrupts
> binding forms the interrupt graph. Same for the clock binding).
> 
> So, while this binding does describe a pattern for separate device
> graphs, it is by no means the only common way of doing so.
> 
> I would like the document to acknowledge the difference from the
> phandle+args pattern used elsewhere and a description of when it would
> be appropriate to use this instead of a simpler binding.

Alright. The main point of this binding is that the devices may have
multiple distinct ports that each can be connected to other devices.
So, contrary to the other graph bindings, devices are not simple nodes
in a directed graph. For example a single capture device with two
separate inputs connected to camera sensors:

	,--------.
	| cam [port]-.   ,-----------------.
	`--------´    `-[port@0] capture   |
	,---------.   ,-[port@1] interface |
	| cam [port]-´   `-----------------´
	`---------´

Or two separate display controllers with two parallel outputs each, a
4-port multiplexer, and an encoder (e.g. lvds):

	,------------------.
	| display    [port@0]--.   ,-------------.
	| controller [port@1]-. `-[port@0]       |
	`------------------´   `--[port@1]  mux  |
	,------------------.   ,--[port@2]       |    ,-------------.
	| display    [port@0]-´ ,-[port@3] [port@4]--[port] encoder |
	| controller [port@1]--´   `-------------´    `-------------´
	`------------------´

Optionally, the same with the multiplexer integrated into the
encoder device:

	,------------------.
	| display    [port@0]--.   ,----------------.
	| controller [port@1]-. `-[port@0]          |
	`------------------´   `--[port@1] encoder  |
	,------------------.   ,--[port@2] with mux |
	| display    [port@0]-´ ,-[port@3]          |
	| controller [port@1]--´   `----------------´
	`------------------´

Or display controller, encoder, and panel:

 ,--------.    ,---------.
 | dc [port]--[port@0]   |
 `--------´    | encoder |    ,-----------.
               |   [port@1]--[port] panel |
               `---------´    `-----------´ 

> > "Common bindings for device graphs"
> > 
> > General concept
> > ---------------
> > 
> > The hierarchical organisation of the device tree is well suited to describe
> > control flow to devices, but data flow between devices that work together to
> > form a logical compound device can follow arbitrarily complex graphs.
> 
> I would argue that this pattern isn't necessarily restricted to data
> flow descriptions. It wants to describe linkage between devices that are
> sufficiently complex that the simple binding doesn't do the job.

Ok. In principle it would be possible to use the same scheme for other
connections than data links.

> > The device tree graph bindings allow to describe data bus connections between
> > individual devices, that can't be inferred from device tree parent-child
> > relationships. The common bindings do not contain any information about the
> > direction or type of data flow, they just map connections. Specific properties
> > of the connections can be set depending on the type of connection. To see
> > how this binding applies to video pipelines, see for example
> > Documentation/device-tree/bindings/media/video-interfaces.txt.
> 
> Even if you don't want to declare the direction of data flow, there does
> need to be some guidance as to how the binding is constructed. Does
> device A point to device B? Or the other way around? Why would someone
> choose one over the other? I don't want to see a situation where A & B
> point to each other. Things get complex if the graph is allowed to be
> cyclical.

According to video-interfaces.txt, it is expected that endpoints contain
phandles pointing to the remote endpoint on both sides. I'd like to
leave this up to the more specialized bindings, but I can see that this
makes enumerating the connections starting from each device tree node
easier, for example at probe time.
If the back links are not provided in the device tree, a device at the
receiving end of a remote-endpoint phandle can only know about connected
remote ports after the kernel parsed the whole graph and created the
back links itself.

> > Each endpoint can contain a 'remote-endpoint' phandle property that points to
> > the corresponding endpoint in the port of the remote device. Two 'endpoint'
> > nodes are linked with each other through their 'remote-endpoint' phandles.
> 
> I really don't like this aspect. It is far too easy to get wrong.

On the other hand it is really easy to test for and warn about missing
or misdirected backlinks.

>  Graphs should be one direction only.

But this is not what the current binding in video-interfaces.txt
describes. I don't think it is a good idea to explicitly forbid
backlinks in this binding.

regards
Philipp


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-26 14:48             ` Philipp Zabel
@ 2014-02-27  8:36               ` Tomi Valkeinen
  2014-03-07 17:06                 ` Grant Likely
  2014-03-07 17:05               ` Grant Likely
  1 sibling, 1 reply; 74+ messages in thread
From: Tomi Valkeinen @ 2014-02-27  8:36 UTC (permalink / raw)
  To: Philipp Zabel, Grant Likely
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 895 bytes --]

On 26/02/14 16:48, Philipp Zabel wrote:

>> I would like the document to acknowledge the difference from the
>> phandle+args pattern used elsewhere and a description of when it would
>> be appropriate to use this instead of a simpler binding.
> 
> Alright. The main point of this binding is that the devices may have
> multiple distinct ports that each can be connected to other devices.

The other main point with this binding are multiple endpoints per port.
So you can have, say, a display controller, with single port, which has
two endpoints going to two separate LCD panels.

In physical level that would usually mean that the same pins from the
display controller are connected to two panels. Most likely this would
mean that only one panel can be used at a time, possibly with different
settings (say, 16 RGB pins for one panel, 24 RGB pins for the other).

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-26 14:48             ` Philipp Zabel
  2014-02-27  8:36               ` Tomi Valkeinen
@ 2014-03-07 17:05               ` Grant Likely
  2014-03-08 10:33                 ` Tomi Valkeinen
  2014-03-08 11:54                 ` Russell King - ARM Linux
  1 sibling, 2 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-07 17:05 UTC (permalink / raw)
  To: Philipp Zabel
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Tomi Valkeinen, Kyungmin Park, linux-kernel,
	linux-media, devicetree, Philipp Zabel

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain, Size: 9389 bytes --]

On Wed, 26 Feb 2014 15:48:49 +0100, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> Hi Grant,
> 
> thank you for the comments.

Hi Philipp,

I've got lots of comments and quesitons below, but I must say thank you
for doing this. It is a helpful description.

> 
> Am Mittwoch, den 26.02.2014, 11:01 +0000 schrieb Grant Likely:
> > On Mon, 24 Feb 2014 18:36:29 +0100, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > > Am Dienstag, den 18.02.2014, 16:26 +0000 schrieb Grant Likely:
> > > > > 
> > > > > You can find it under Documentation/devicetree/bindings/media/video-interfaces.txt
> > > > 
> > > > Okay, I think I'm okay with moving the helpers, but I will make one
> > > > requirement. I would like to have a short binding document describing
> > > > the pattern being used. The document above talks a lot about video
> > > > specific issues, but the helpers appear to be specifically about the
> > > > tree walking properties of the API.
> > > 
> > > Reusing the non-video-secific parts of video-interfaces.txt, how about
> > > the following:
> > 
> > This is good, but I have some comments. This document describes itself
> > as the common way for doing a device graph within the device tree, but
> > there is already a well established pattern for device graphs that is
> > used by the interrupts-extended, clocks and other bindings. Those are
> > all domain-specific bindings, but the core concept is one device uses
> > a resource provided by another device. The resource references construct
> > a graph independent from the natural FDT node graph. (ie. the interrupts
> > binding forms the interrupt graph. Same for the clock binding).
> > 
> > So, while this binding does describe a pattern for separate device
> > graphs, it is by no means the only common way of doing so.
> > 
> > I would like the document to acknowledge the difference from the
> > phandle+args pattern used elsewhere and a description of when it would
> > be appropriate to use this instead of a simpler binding.
> 
> Alright. The main point of this binding is that the devices may have
> multiple distinct ports that each can be connected to other devices.
> So, contrary to the other graph bindings, devices are not simple nodes
> in a directed graph. For example a single capture device with two
> separate inputs connected to camera sensors:
> 
> 	,--------.
> 	| cam [port]-.   ,-----------------.
> 	`--------´    `-[port@0] capture   |
> 	,---------.   ,-[port@1] interface |
> 	| cam [port]-´   `-----------------´
> 	`---------´

Bear with me, I'm going to comment on each point. I'm not criticizing,
but I do want to understand the HW design. In particular I want to
understand why the linkage is treated as bidirectional instead of one
device being the master.

In this specific example, what would be managing the transfer? Is there
an overarching driver that assembles the pieces, or would you expect
individual drivers to find each other?

> Or two separate display controllers with two parallel outputs each, a
> 4-port multiplexer, and an encoder (e.g. lvds):
> 
> 	,------------------.
> 	| display    [port@0]--.   ,-------------.
> 	| controller [port@1]-. `-[port@0]       |
> 	`------------------´   `--[port@1]  mux  |
> 	,------------------.   ,--[port@2]       |    ,-------------.
> 	| display    [port@0]-´ ,-[port@3] [port@4]--[port] encoder |
> 	| controller [port@1]--´   `-------------´    `-------------´
> 	`------------------´

Similar question here, how is the above complex assembled into a logical
device? Is there an overarching controller device for this component
complex?

> Optionally, the same with the multiplexer integrated into the
> encoder device:
> 
> 	,------------------.
> 	| display    [port@0]--.   ,----------------.
> 	| controller [port@1]-. `-[port@0]          |
> 	`------------------´   `--[port@1] encoder  |
> 	,------------------.   ,--[port@2] with mux |
> 	| display    [port@0]-´ ,-[port@3]          |
> 	| controller [port@1]--´   `----------------´
> 	`------------------´
> 
> Or display controller, encoder, and panel:
> 
>  ,--------.    ,---------.
>  | dc [port]--[port@0]   |
>  `--------´    | encoder |    ,-----------.
>                |   [port@1]--[port] panel |
>                `---------´    `-----------´ 

In all of the above examples I see a unidirectional data flow. There are
producer ports and consumer ports. Is there any (reasonable) possibility
of peer ports that are bidirectional?

> 
> > > "Common bindings for device graphs"
> > > 
> > > General concept
> > > ---------------
> > > 
> > > The hierarchical organisation of the device tree is well suited to describe
> > > control flow to devices, but data flow between devices that work together to
> > > form a logical compound device can follow arbitrarily complex graphs.
> > 
> > I would argue that this pattern isn't necessarily restricted to data
> > flow descriptions. It wants to describe linkage between devices that are
> > sufficiently complex that the simple binding doesn't do the job.
> 
> Ok. In principle it would be possible to use the same scheme for other
> connections than data links.
> 
> > > The device tree graph bindings allow to describe data bus connections between
> > > individual devices, that can't be inferred from device tree parent-child
> > > relationships. The common bindings do not contain any information about the
> > > direction or type of data flow, they just map connections. Specific properties
> > > of the connections can be set depending on the type of connection. To see
> > > how this binding applies to video pipelines, see for example
> > > Documentation/device-tree/bindings/media/video-interfaces.txt.
> > 
> > Even if you don't want to declare the direction of data flow, there does
> > need to be some guidance as to how the binding is constructed. Does
> > device A point to device B? Or the other way around? Why would someone
> > choose one over the other? I don't want to see a situation where A & B
> > point to each other. Things get complex if the graph is allowed to be
> > cyclical.
> 
> According to video-interfaces.txt, it is expected that endpoints contain
> phandles pointing to the remote endpoint on both sides. I'd like to
> leave this up to the more specialized bindings, but I can see that this
> makes enumerating the connections starting from each device tree node
> easier, for example at probe time.

This has come up in the past. That approach comes down to premature
optimization at the expense of making the data structure more prone to
inconsistency. I consider it to be a bad pattern.

Backlinks are good for things like dynamically linked lists that need to
be fast and the software fully manages the links. For a data structure like
the FDT it is better to have the data in one place, and one place only.
Besides, computers are *good* at parsing data structures. :-)

I appreciate that existing drivers may be using the backlinks, but I
strongly recommend not doing it for new users.

> If the back links are not provided in the device tree, a device at the
> receiving end of a remote-endpoint phandle can only know about connected
> remote ports after the kernel parsed the whole graph and created the
> back links itself.

A whole tree parse isn't expensive. We do it all the time.

Besides, nothing useful can be done until a driver has taken
responsibility for each side of the connection. I would expect to need a
registry anyway for pairing things up. If one side gets connected
without having the backlink, then it should get registered and wait
until another port gets registered that does have the linkage. In which
case a whole tree parse isn't ever necessary.

I think the common code would be even more useful if it implemented a
port registry helper in addition to the parsing. That would make the
decoding transparent.

> > > Each endpoint can contain a 'remote-endpoint' phandle property that points to
> > > the corresponding endpoint in the port of the remote device. Two 'endpoint'
> > > nodes are linked with each other through their 'remote-endpoint' phandles.
> > 
> > I really don't like this aspect. It is far too easy to get wrong.
> 
> On the other hand it is really easy to test for and warn about missing
> or misdirected backlinks.

I don't think that makes it a better data structure.

> >  Graphs should be one direction only.
> 
> But this is not what the current binding in video-interfaces.txt
> describes. I don't think it is a good idea to explicitly forbid
> backlinks in this binding.

Nah, I disagree. The binding document /should/ forbid it to make it
really clear what the common pattern is. The code doesn't have to
enforce it (you don't want to break existing platforms), but the
document should be blunt.

Another thought. In terms of the pattern, I would add a recommendation
that there should be a way to identify ports of a particular type. ie.
If I were using the pattern to implement an patch bay of DSP filters,
where each input and output, then each target node should have a unique
identifier property analogous to "interrupt-controller" or
"gpio-controller". In this fictitious example I would probably choose
"audiostream-input-port" and "audiostream-output-port" as empty
properties in the port nodes. (I'm not suggesting a change to the
existing binding, but making a recommendation to new users).

g.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-02-27  8:36               ` Tomi Valkeinen
@ 2014-03-07 17:06                 ` Grant Likely
  2014-03-08 10:57                   ` Tomi Valkeinen
  0 siblings, 1 reply; 74+ messages in thread
From: Grant Likely @ 2014-03-07 17:06 UTC (permalink / raw)
  To: Tomi Valkeinen, Philipp Zabel
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Thu, 27 Feb 2014 10:36:36 +0200, Tomi Valkeinen <tomi.valkeinen@ti.com> wrote:
> On 26/02/14 16:48, Philipp Zabel wrote:
> 
> >> I would like the document to acknowledge the difference from the
> >> phandle+args pattern used elsewhere and a description of when it would
> >> be appropriate to use this instead of a simpler binding.
> > 
> > Alright. The main point of this binding is that the devices may have
> > multiple distinct ports that each can be connected to other devices.
> 
> The other main point with this binding are multiple endpoints per port.
> So you can have, say, a display controller, with single port, which has
> two endpoints going to two separate LCD panels.
> 
> In physical level that would usually mean that the same pins from the
> display controller are connected to two panels. Most likely this would
> mean that only one panel can be used at a time, possibly with different
> settings (say, 16 RGB pins for one panel, 24 RGB pins for the other).

What device is in control in that scenario?

g.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-07 17:05               ` Grant Likely
@ 2014-03-08 10:33                 ` Tomi Valkeinen
  2014-03-08 11:41                   ` Grant Likely
  2014-03-08 11:54                 ` Russell King - ARM Linux
  1 sibling, 1 reply; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-08 10:33 UTC (permalink / raw)
  To: Grant Likely, Philipp Zabel
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 5824 bytes --]

On 07/03/14 19:05, Grant Likely wrote:
> On Wed, 26 Feb 2014 15:48:49 +0100, Philipp Zabel <p.zabel@pengutronix.de> wrote:
>> Hi Grant,
>>
>> thank you for the comments.
> 
> Hi Philipp,
> 
> I've got lots of comments and quesitons below, but I must say thank you
> for doing this. It is a helpful description.

Thanks for the comments. I'll answer from my point of view, which may be
different than Philipp's.

> Bear with me, I'm going to comment on each point. I'm not criticizing,
> but I do want to understand the HW design. In particular I want to
> understand why the linkage is treated as bidirectional instead of one
> device being the master.
> 
> In this specific example, what would be managing the transfer? Is there
> an overarching driver that assembles the pieces, or would you expect
> individual drivers to find each other?

The direction of the dataflow depends on the case. For camera, the data
flows from external components towards the SoC. For display, vice versa.
I don't know much about camera, but for display there are bi-directional
buses, like MIPI DBI and MIPI DSI. However, in those cases, the main
flow direction is clear, towards the display.

Then again, these are meant to be generic graph bindings, and the
dataflow could be fully bi-directional.

As for the driver model (does it matter when we are talking about DT
bindings?), I think there are very different opinions. My thought for
display is that there is always an overarching driver, something that
manages the video pipeline as a whole.

However, each of the individual drivers in the video pipeline will
control its input ports. So, say, a panel driver gets a command from the
overarching control driver to enable the panel device. The panel driver
reacts by calling the encoder (i.e. the one that outputs pixels to the
panel), commanding it to enable the video stream.

I know some (many?) people think the other way around, that the encoder
commands the panel to enable itself. I don't think that's versatile
enough. As a theoretical, slightly exaggerated, example, we could have a
panel that wants the pixel clock to be enabled for a short while before
enabling the actual pixel stream. And maybe the panel driver needs to do
something before the pixel stream is enabled (say, send a command to the
panel).

The above is very easy to do if the panel driver is in control of the
received video stream. It's rather difficult to do if the encoder is in
control.

Also, I think a panel (or any encoder) can be compared to a display
controller: a display controller uses DMA to transfer pixel data from
the memory to the display controller. A panel or encoder uses the
incoming video bus to transfer pixel data from the previous display
component to the panel or encoder device. And I don't think anyone says
the DMA driver should be in control of the display controller.

But this is going quite a bit into the SW architecture. Does it matter
when we are talking about the representation of HW in the DT data?

> In all of the above examples I see a unidirectional data flow. There are
> producer ports and consumer ports. Is there any (reasonable) possibility
> of peer ports that are bidirectional?

Yes, as I mentioned above. However, I do believe that at the moment all
the cases have a clear a main direction. But then, if these are generic
bindings, do we want to rely on that?

>> According to video-interfaces.txt, it is expected that endpoints contain
>> phandles pointing to the remote endpoint on both sides. I'd like to
>> leave this up to the more specialized bindings, but I can see that this
>> makes enumerating the connections starting from each device tree node
>> easier, for example at probe time.
> 
> This has come up in the past. That approach comes down to premature
> optimization at the expense of making the data structure more prone to
> inconsistency. I consider it to be a bad pattern.
> 
> Backlinks are good for things like dynamically linked lists that need to
> be fast and the software fully manages the links. For a data structure like
> the FDT it is better to have the data in one place, and one place only.
> Besides, computers are *good* at parsing data structures. :-)
> 
> I appreciate that existing drivers may be using the backlinks, but I
> strongly recommend not doing it for new users.

Ok. If we go for single directional link, the question is then: which
way? And is the direction different for display and camera, which are
kind of reflections of each other?

> Another thought. In terms of the pattern, I would add a recommendation
> that there should be a way to identify ports of a particular type. ie.
> If I were using the pattern to implement an patch bay of DSP filters,
> where each input and output, then each target node should have a unique
> identifier property analogous to "interrupt-controller" or
> "gpio-controller". In this fictitious example I would probably choose
> "audiostream-input-port" and "audiostream-output-port" as empty
> properties in the port nodes. (I'm not suggesting a change to the
> existing binding, but making a recommendation to new users).

I don't see any harm in that, but I don't right away see what could be
done with them? Did you have something in mind?

I guess those could be used to study the graph before the drivers have
been loaded. After the drivers have been loaded, the drivers should
somehow register themselves and the ports/endpoints. And as the driver
knows what kind of ports they are, it can supply this information in the
runtime data structures.

If we do that, would it be better to have two pieces of data:
input/output/bi-directional, and the port type (say, mipi-dpi, lvds, etc.)?

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-07 17:06                 ` Grant Likely
@ 2014-03-08 10:57                   ` Tomi Valkeinen
  0 siblings, 0 replies; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-08 10:57 UTC (permalink / raw)
  To: Grant Likely, Philipp Zabel
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 1324 bytes --]

On 07/03/14 19:06, Grant Likely wrote:
> On Thu, 27 Feb 2014 10:36:36 +0200, Tomi Valkeinen <tomi.valkeinen@ti.com> wrote:
>> On 26/02/14 16:48, Philipp Zabel wrote:
>>
>>>> I would like the document to acknowledge the difference from the
>>>> phandle+args pattern used elsewhere and a description of when it would
>>>> be appropriate to use this instead of a simpler binding.
>>>
>>> Alright. The main point of this binding is that the devices may have
>>> multiple distinct ports that each can be connected to other devices.
>>
>> The other main point with this binding are multiple endpoints per port.
>> So you can have, say, a display controller, with single port, which has
>> two endpoints going to two separate LCD panels.
>>
>> In physical level that would usually mean that the same pins from the
>> display controller are connected to two panels. Most likely this would
>> mean that only one panel can be used at a time, possibly with different
>> settings (say, 16 RGB pins for one panel, 24 RGB pins for the other).
> 
> What device is in control in that scenario?

The endpoints in a single port are exclusive, only one can be active at
a time. So the control for the active path would be no different than in
single panel case (for which people have different opinions).

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-08 10:33                 ` Tomi Valkeinen
@ 2014-03-08 11:41                   ` Grant Likely
  2014-03-08 23:25                     ` Sylwester Nawrocki
  2014-03-10 10:18                     ` Tomi Valkeinen
  0 siblings, 2 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-08 11:41 UTC (permalink / raw)
  To: Tomi Valkeinen, Philipp Zabel
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Sat, 8 Mar 2014 12:33:12 +0200, Tomi Valkeinen <tomi.valkeinen@ti.com> wrote:
> On 07/03/14 19:05, Grant Likely wrote:
> > On Wed, 26 Feb 2014 15:48:49 +0100, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> >> Hi Grant,
> >>
> >> thank you for the comments.
> > 
> > Hi Philipp,
> > 
> > I've got lots of comments and quesitons below, but I must say thank you
> > for doing this. It is a helpful description.
> 
> Thanks for the comments. I'll answer from my point of view, which may be
> different than Philipp's.
> 
> > Bear with me, I'm going to comment on each point. I'm not criticizing,
> > but I do want to understand the HW design. In particular I want to
> > understand why the linkage is treated as bidirectional instead of one
> > device being the master.
> > 
> > In this specific example, what would be managing the transfer? Is there
> > an overarching driver that assembles the pieces, or would you expect
> > individual drivers to find each other?
> 
> The direction of the dataflow depends on the case. For camera, the data
> flows from external components towards the SoC. For display, vice versa.
> I don't know much about camera, but for display there are bi-directional
> buses, like MIPI DBI and MIPI DSI. However, in those cases, the main
> flow direction is clear, towards the display.
> 
> Then again, these are meant to be generic graph bindings, and the
> dataflow could be fully bi-directional.
> 
> As for the driver model (does it matter when we are talking about DT
> bindings?), I think there are very different opinions. My thought for
> display is that there is always an overarching driver, something that
> manages the video pipeline as a whole.
> 
> However, each of the individual drivers in the video pipeline will
> control its input ports. So, say, a panel driver gets a command from the
> overarching control driver to enable the panel device. The panel driver
> reacts by calling the encoder (i.e. the one that outputs pixels to the
> panel), commanding it to enable the video stream.
> 
> I know some (many?) people think the other way around, that the encoder
> commands the panel to enable itself. I don't think that's versatile
> enough. As a theoretical, slightly exaggerated, example, we could have a
> panel that wants the pixel clock to be enabled for a short while before
> enabling the actual pixel stream. And maybe the panel driver needs to do
> something before the pixel stream is enabled (say, send a command to the
> panel).
> 
> The above is very easy to do if the panel driver is in control of the
> received video stream. It's rather difficult to do if the encoder is in
> control.
> 
> Also, I think a panel (or any encoder) can be compared to a display
> controller: a display controller uses DMA to transfer pixel data from
> the memory to the display controller. A panel or encoder uses the
> incoming video bus to transfer pixel data from the previous display
> component to the panel or encoder device. And I don't think anyone says
> the DMA driver should be in control of the display controller.
> 
> But this is going quite a bit into the SW architecture. Does it matter
> when we are talking about the representation of HW in the DT data?

It matter to the point that the binding author makes a decision about
how to represent the hardware. The driver architecture doesn't need to
match the binding layout, but it is nice when they line up.

> > In all of the above examples I see a unidirectional data flow. There are
> > producer ports and consumer ports. Is there any (reasonable) possibility
> > of peer ports that are bidirectional?
> 
> Yes, as I mentioned above. However, I do believe that at the moment all
> the cases have a clear a main direction. But then, if these are generic
> bindings, do we want to rely on that?

Most of my questions are making sure I've got a full understanding. Even
if there are typical devices that have bidirectional interfaces I think
my decisions as a binding author would be the same. Given an arbitrary
graph of devices, I would still choose one place to describe a
connection. If it was a complex setup I may use a completely separate
node that collects all the element devices. On more straightforward
setups I would choose one of the devices as the 'master' and have it
describe the connections to other component devices.

> >> According to video-interfaces.txt, it is expected that endpoints contain
> >> phandles pointing to the remote endpoint on both sides. I'd like to
> >> leave this up to the more specialized bindings, but I can see that this
> >> makes enumerating the connections starting from each device tree node
> >> easier, for example at probe time.
> > 
> > This has come up in the past. That approach comes down to premature
> > optimization at the expense of making the data structure more prone to
> > inconsistency. I consider it to be a bad pattern.
> > 
> > Backlinks are good for things like dynamically linked lists that need to
> > be fast and the software fully manages the links. For a data structure like
> > the FDT it is better to have the data in one place, and one place only.
> > Besides, computers are *good* at parsing data structures. :-)
> > 
> > I appreciate that existing drivers may be using the backlinks, but I
> > strongly recommend not doing it for new users.
> 
> Ok. If we go for single directional link, the question is then: which
> way? And is the direction different for display and camera, which are
> kind of reflections of each other?

In general I would recommend choosing whichever device you would
sensibly think of as a master. In the camera case I would choose the
camera controller node instead of the camera itself, and in the display
case I would choose the display controller instead of the panel. The
binding author needs to choose what she things makes the most sense, but
drivers can still use if it it turns out to be 'backwards'

> > Another thought. In terms of the pattern, I would add a recommendation
> > that there should be a way to identify ports of a particular type. ie.
> > If I were using the pattern to implement an patch bay of DSP filters,
> > where each input and output, then each target node should have a unique
> > identifier property analogous to "interrupt-controller" or
> > "gpio-controller". In this fictitious example I would probably choose
> > "audiostream-input-port" and "audiostream-output-port" as empty
> > properties in the port nodes. (I'm not suggesting a change to the
> > existing binding, but making a recommendation to new users).
> 
> I don't see any harm in that, but I don't right away see what could be
> done with them? Did you have something in mind?

It would help with schema validation and allow ports of the same
interface to get grouped together.

> I guess those could be used to study the graph before the drivers have
> been loaded. After the drivers have been loaded, the drivers should
> somehow register themselves and the ports/endpoints. And as the driver
> knows what kind of ports they are, it can supply this information in the
> runtime data structures.
> 
> If we do that, would it be better to have two pieces of data:
> input/output/bi-directional, and the port type (say, mipi-dpi, lvds, etc.)?

Sure. That's worth considering.

g.


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-07 17:05               ` Grant Likely
  2014-03-08 10:33                 ` Tomi Valkeinen
@ 2014-03-08 11:54                 ` Russell King - ARM Linux
  1 sibling, 0 replies; 74+ messages in thread
From: Russell King - ARM Linux @ 2014-03-08 11:54 UTC (permalink / raw)
  To: Grant Likely
  Cc: Philipp Zabel, Sascha Hauer, Rob Herring, Mauro Carvalho Chehab,
	Rob Herring, Sylwester Nawrocki, Laurent Pinchart,
	Tomi Valkeinen, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Sat, Mar 08, 2014 at 01:05:50AM +0800, Grant Likely wrote:
> On Wed, 26 Feb 2014 15:48:49 +0100, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > Or two separate display controllers with two parallel outputs each, a
> > 4-port multiplexer, and an encoder (e.g. lvds):
> > 
> > 	,------------------.
> > 	| display    [port@0]--.   ,-------------.
> > 	| controller [port@1]-. `-[port@0]       |
> > 	`------------------´   `--[port@1]  mux  |
> > 	,------------------.   ,--[port@2]       |    ,-------------.
> > 	| display    [port@0]-´ ,-[port@3] [port@4]--[port] encoder |
> > 	| controller [port@1]--´   `-------------´    `-------------´
> > 	`------------------´
> 
> Similar question here, how is the above complex assembled into a logical
> device? Is there an overarching controller device for this component
> complex?

This is exactly the problem we discussed at Kernel Summit during the DRM
et.al. session.  It's the multiple component problem.  We have this solved
in a subsystem and firmware independent manner.

The way this works in imx-drm is that the graph of connections is scanned
to discover which component devices are required, and once all the
components are present, we initialise the logical device.  Further than
that, the driver structure is an implemention specific detail.

The starting point is the CPU facing part, because that's what provides
the logical interfaces, especially as there may be more than one logical
interface (which each of which may be entirely separate kernel
subsystems - display vs capture.)

> >  ,--------.    ,---------.
> >  | dc [port]--[port@0]   |
> >  `--------´    | encoder |    ,-----------.
> >                |   [port@1]--[port] panel |
> >                `---------´    `-----------´ 
> 
> In all of the above examples I see a unidirectional data flow. There are
> producer ports and consumer ports. Is there any (reasonable) possibility
> of peer ports that are bidirectional?

Generally, they will be unidirectional, but when you realise that you want
to be able to walk the graph from the CPU point of view, the data flow
direction is meaningless.  What you need to know is how the components
are connected together.

If you model the links as per the data flow direction, then while you may
be able to walk the display side - eg, dc -> encoder -> panel due to your
DT links, you wouldn't be able to walk from the CPU interface down to your
camera sensor - you would need to do a full scan of the DT for every
interation to reverse the direction of the linkages.

> > According to video-interfaces.txt, it is expected that endpoints contain
> > phandles pointing to the remote endpoint on both sides. I'd like to
> > leave this up to the more specialized bindings, but I can see that this
> > makes enumerating the connections starting from each device tree node
> > easier, for example at probe time.
> 
> This has come up in the past. That approach comes down to premature
> optimization at the expense of making the data structure more prone to
> inconsistency. I consider it to be a bad pattern.
> 
> Backlinks are good for things like dynamically linked lists that need to
> be fast and the software fully manages the links. For a data structure like
> the FDT it is better to have the data in one place, and one place only.
> Besides, computers are *good* at parsing data structures. :-)
> 
> I appreciate that existing drivers may be using the backlinks, but I
> strongly recommend not doing it for new users.

"Backlinks" doesn't convey to me exactly what you mean here.  Are you
talking about the ones in the opposite direction to data flow, or the
ones pointing upstream towards the CPU?  (I'm going to use upstream to
refer to the ones pointing towards the CPU.)

That matters: as I've said above, when creating logical devices, you
tend to start at the CPU interfaces and work your way down the graph.
Working the opposite way is much harder, especially if you hit some
kind of muxing arranagement which results in two sensors which combine
into one logical grabbing device.

> > If the back links are not provided in the device tree, a device at the
> > receiving end of a remote-endpoint phandle can only know about connected
> > remote ports after the kernel parsed the whole graph and created the
> > back links itself.
> 
> A whole tree parse isn't expensive. We do it all the time.

A while tree parse may not be expensive, but to do it multiple times
increases the cost manyfold.  Taking the first example above, it may
be needed to do that five times, one time per link, and that may be
just one subsystem.  It could very easily become 10 times, at which
point the expense is an order of magnitude more than it was.

We do have real hardware which is arranged as per that first diagram.
It's the iMX6Q - two IPUs each with two display controllers, giving a
total of four output video streams.  Those are then fed into a set of
muxes which then feed into a set of encoders.  Effectively, all those
muxes make a switching matrix, allowing you to connect any encoder to
any of the four output video streams - even connecting two or more
encoders to the same video stream.

There's HDMI, TV, LDB, and parallel.  So that's 20 links in total (in
reality, it's 16 because we don't represent the mux-to-encoder link
because it's not relevant.)  So, is 16x "a whole tree parse which isn't
expensive" still not expensive?

I'm not saying that we /need/ this, I'm merely pointing out that your
comment may be correct for one-offs, but that may not be what happens
in reality.

I think for the case I mention above, it's entirely possible to do
without the upstream links - whether that suits everyone, I'm not sure.

> > >  Graphs should be one direction only.
> > 
> > But this is not what the current binding in video-interfaces.txt
> > describes. I don't think it is a good idea to explicitly forbid
> > backlinks in this binding.
> 
> Nah, I disagree. The binding document /should/ forbid it to make it
> really clear what the common pattern is. The code doesn't have to
> enforce it (you don't want to break existing platforms), but the
> document should be blunt.
> 
> Another thought. In terms of the pattern, I would add a recommendation
> that there should be a way to identify ports of a particular type. ie.
> If I were using the pattern to implement an patch bay of DSP filters,
> where each input and output, then each target node should have a unique
> identifier property analogous to "interrupt-controller" or
> "gpio-controller". In this fictitious example I would probably choose
> "audiostream-input-port" and "audiostream-output-port" as empty
> properties in the port nodes. (I'm not suggesting a change to the
> existing binding, but making a recommendation to new users).

Given the amount of discussion of these bindings which are already in
the kernel, I have to ask a really obvious question: were they reviewed
before they were merged into the kernel?  It sounds like the above
discussion should already have happened...

-- 
FTTC broadband for 0.8mile line: now at 9.7Mbps down 460kbps up... slowly
improving, and getting towards what was expected from it.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-08 11:41                   ` Grant Likely
@ 2014-03-08 23:25                     ` Sylwester Nawrocki
  2014-03-10 10:18                     ` Tomi Valkeinen
  1 sibling, 0 replies; 74+ messages in thread
From: Sylwester Nawrocki @ 2014-03-08 23:25 UTC (permalink / raw)
  To: Grant Likely, Tomi Valkeinen
  Cc: Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Laurent Pinchart, Kyungmin Park,
	linux-kernel, linux-media, devicetree, Philipp Zabel

On 03/08/2014 12:41 PM, Grant Likely wrote:
>>> Another thought. In terms of the pattern, I would add a recommendation
>>> >  >  that there should be a way to identify ports of a particular type. ie.
>>> >  >  If I were using the pattern to implement an patch bay of DSP filters,
>>> >  >  where each input and output, then each target node should have a unique
>>> >  >  identifier property analogous to "interrupt-controller" or
>>> >  >  "gpio-controller". In this fictitious example I would probably choose
>>> >  >  "audiostream-input-port" and "audiostream-output-port" as empty
>>> >  >  properties in the port nodes. (I'm not suggesting a change to the
>>> >  >  existing binding, but making a recommendation to new users).
>> >
>> >  I don't see any harm in that, but I don't right away see what could be
>> >  done with them? Did you have something in mind?
>
> It would help with schema validation and allow ports of the same
> interface to get grouped together.
>
>> >  I guess those could be used to study the graph before the drivers have
>> >  been loaded. After the drivers have been loaded, the drivers should
>> >  somehow register themselves and the ports/endpoints. And as the driver
>> >  knows what kind of ports they are, it can supply this information in the
>> >  runtime data structures.
>> >
>> >  If we do that, would it be better to have two pieces of data:
>> >  input/output/bi-directional, and the port type (say, mipi-dpi, lvds, etc.)?

I'm not sure about the direction information (it also came up when we
originally discussed this binding), but the port type information would
be useful. As it turns out, it is not always possible to describe a data
bus interface type/data transmission protocol with a set of primitive
properties in an endpoint node. Especially in cases when the physical
layer is basically identical, for instance in case of MIPI CSI-2 and
SMIA CCP2, or in future MIPI CSI-3 and MIPI CSI-2.

In general there could likely be more than one supported, if both the
transmitter and the receiver are sufficiently configurable.

Then, to be able to fully describe a data port, perhaps we could
add a property like 'interface-type' or 'bus-type' with values like
"mipi-dbi", mipi-dsi", "mipi-csi2", "smia-ccp2", "hdmi", etc.

> Sure. That's worth considering.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-08 11:41                   ` Grant Likely
  2014-03-08 23:25                     ` Sylwester Nawrocki
@ 2014-03-10 10:18                     ` Tomi Valkeinen
  2014-03-10 13:52                       ` Laurent Pinchart
  2014-03-10 14:41                       ` Grant Likely
  1 sibling, 2 replies; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-10 10:18 UTC (permalink / raw)
  To: Grant Likely, Philipp Zabel
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 1758 bytes --]

On 08/03/14 13:41, Grant Likely wrote:

>> Ok. If we go for single directional link, the question is then: which
>> way? And is the direction different for display and camera, which are
>> kind of reflections of each other?
> 
> In general I would recommend choosing whichever device you would
> sensibly think of as a master. In the camera case I would choose the
> camera controller node instead of the camera itself, and in the display
> case I would choose the display controller instead of the panel. The
> binding author needs to choose what she things makes the most sense, but
> drivers can still use if it it turns out to be 'backwards'

I would perhaps choose the same approach, but at the same time I think
it's all but clear. The display controller doesn't control the panel any
more than a DMA controller controls, say, the display controller.

In fact, in earlier versions of OMAP DSS DT support I had a simpler port
description, and in that I had the panel as the master (i.e. link from
panel to dispc) because the panel driver uses the display controller's
features to provide the panel device a data stream.

And even with the current OMAP DSS DT version, which uses the v4l2 style
ports/endpoints, the driver model is still the same, and only links
towards upstream are used.

So one reason I'm happy with the dual-linking is that I can easily
follow the links from the downstream entities to upstream entities, and
other people, who have different driver model, can easily do the opposite.

But I agree that single-linking is enough and this can be handled at
runtime, even if it makes the code more complex. And perhaps requires
extra data in the dts, to give the start points for the graph.

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 10:18                     ` Tomi Valkeinen
@ 2014-03-10 13:52                       ` Laurent Pinchart
  2014-03-10 14:10                         ` Tomi Valkeinen
                                           ` (2 more replies)
  2014-03-10 14:41                       ` Grant Likely
  1 sibling, 3 replies; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-10 13:52 UTC (permalink / raw)
  To: Tomi Valkeinen
  Cc: Grant Likely, Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 3583 bytes --]

On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> On 08/03/14 13:41, Grant Likely wrote:
> >> Ok. If we go for single directional link, the question is then: which
> >> way? And is the direction different for display and camera, which are
> >> kind of reflections of each other?
> > 
> > In general I would recommend choosing whichever device you would
> > sensibly think of as a master. In the camera case I would choose the
> > camera controller node instead of the camera itself, and in the display
> > case I would choose the display controller instead of the panel. The
> > binding author needs to choose what she things makes the most sense, but
> > drivers can still use if it it turns out to be 'backwards'
> 
> I would perhaps choose the same approach, but at the same time I think
> it's all but clear. The display controller doesn't control the panel any
> more than a DMA controller controls, say, the display controller.
> 
> In fact, in earlier versions of OMAP DSS DT support I had a simpler port
> description, and in that I had the panel as the master (i.e. link from
> panel to dispc) because the panel driver uses the display controller's
> features to provide the panel device a data stream.
> 
> And even with the current OMAP DSS DT version, which uses the v4l2 style
> ports/endpoints, the driver model is still the same, and only links
> towards upstream are used.
> 
> So one reason I'm happy with the dual-linking is that I can easily
> follow the links from the downstream entities to upstream entities, and
> other people, who have different driver model, can easily do the opposite.
> 
> But I agree that single-linking is enough and this can be handled at
> runtime, even if it makes the code more complex. And perhaps requires
> extra data in the dts, to give the start points for the graph.

In theory unidirectional links in DT are indeed enough. However, let's not 
forget the following.

- There's no such thing as single start points for graphs. Sure, in some 
simple cases the graph will have a single start point, but that's not a 
generic rule. For instance the camera graphs 
http://ideasonboard.org/media/omap3isp.ps and 
http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
starting points from a data flow point of view. And if you want a better 
understanding of how complex media graphs can become, have a look at 
http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example, albeit 
all connections are internal to the SoC in that particular case, and don't 
need to be described in DT).

- There's also no such thing as a master device that can just point to slave 
devices. Once again simple cases exist where that model could work, but real 
world examples exist of complex pipelines with dozens of elements all 
implemented by a separate IP core and handled by separate drivers, forming a 
graph with long chains and branches. We thus need real graph bindings.

- Finally, having no backlinks in DT would make the software implementation 
very complex. We need to be able to walk the graph in a generic way without 
having any of the IP core drivers loaded, and without any specific starting 
point. We would thus need to parse the complete DT tree, looking at all nodes 
and trying to find out whether they're part of the graph we're trying to walk. 
The complexity of the operation would be at best quadratic to the number of 
nodes in the whole DT and to the number of nodes in the graph.

-- 
Regards,

Laurent Pinchart

[-- Attachment #2: This is a digitally signed message part. --]
[-- Type: application/pgp-signature, Size: 490 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 13:52                       ` Laurent Pinchart
@ 2014-03-10 14:10                         ` Tomi Valkeinen
  2014-03-10 14:58                         ` Grant Likely
  2014-03-12 10:25                         ` Russell King - ARM Linux
  2 siblings, 0 replies; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-10 14:10 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Grant Likely, Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 2213 bytes --]

On 10/03/14 15:52, Laurent Pinchart wrote:

> In theory unidirectional links in DT are indeed enough. However, let's not 
> forget the following.
> 
> - There's no such thing as single start points for graphs. Sure, in some 
> simple cases the graph will have a single start point, but that's not a 
> generic rule. For instance the camera graphs 
> http://ideasonboard.org/media/omap3isp.ps and 
> http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
> starting points from a data flow point of view. And if you want a better 
> understanding of how complex media graphs can become, have a look at 
> http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example, albeit 
> all connections are internal to the SoC in that particular case, and don't 
> need to be described in DT).
> 
> - There's also no such thing as a master device that can just point to slave 
> devices. Once again simple cases exist where that model could work, but real 
> world examples exist of complex pipelines with dozens of elements all 
> implemented by a separate IP core and handled by separate drivers, forming a 
> graph with long chains and branches. We thus need real graph bindings.
> 
> - Finally, having no backlinks in DT would make the software implementation 
> very complex. We need to be able to walk the graph in a generic way without 
> having any of the IP core drivers loaded, and without any specific starting 
> point. We would thus need to parse the complete DT tree, looking at all nodes 
> and trying to find out whether they're part of the graph we're trying to walk. 
> The complexity of the operation would be at best quadratic to the number of 
> nodes in the whole DT and to the number of nodes in the graph.

I did use plural when I said "to give the start points...".

If you have a list of starting points in the DT, a "graph helper" or
something could create a runtime representation of the graph at some
early phase during the boot, which would include backlinks. The
individual drivers could use that runtime graph, instead of the DT graph.

But it still sounds considerably more complex than double-links in DT.

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 10:18                     ` Tomi Valkeinen
  2014-03-10 13:52                       ` Laurent Pinchart
@ 2014-03-10 14:41                       ` Grant Likely
  1 sibling, 0 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-10 14:41 UTC (permalink / raw)
  To: Tomi Valkeinen, Philipp Zabel
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Laurent Pinchart, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Mon, 10 Mar 2014 12:18:20 +0200, Tomi Valkeinen <tomi.valkeinen@ti.com> wrote:
> On 08/03/14 13:41, Grant Likely wrote:
> 
> >> Ok. If we go for single directional link, the question is then: which
> >> way? And is the direction different for display and camera, which are
> >> kind of reflections of each other?
> > 
> > In general I would recommend choosing whichever device you would
> > sensibly think of as a master. In the camera case I would choose the
> > camera controller node instead of the camera itself, and in the display
> > case I would choose the display controller instead of the panel. The
> > binding author needs to choose what she things makes the most sense, but
> > drivers can still use if it it turns out to be 'backwards'
> 
> I would perhaps choose the same approach, but at the same time I think
> it's all but clear. The display controller doesn't control the panel any
> more than a DMA controller controls, say, the display controller.

In a sense it doesn't actually matter. You sensibly want to choose the
most likely direction that drivers would go looking for a device it
depends on, but it can be matched up at runtime regardless of the
direction chosen by the binding.

g.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 13:52                       ` Laurent Pinchart
  2014-03-10 14:10                         ` Tomi Valkeinen
@ 2014-03-10 14:58                         ` Grant Likely
  2014-03-10 15:15                           ` Laurent Pinchart
  2014-03-11 15:07                           ` Philipp Zabel
  2014-03-12 10:25                         ` Russell King - ARM Linux
  2 siblings, 2 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-10 14:58 UTC (permalink / raw)
  To: Laurent Pinchart, Tomi Valkeinen
  Cc: Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> > On 08/03/14 13:41, Grant Likely wrote:
> > >> Ok. If we go for single directional link, the question is then: which
> > >> way? And is the direction different for display and camera, which are
> > >> kind of reflections of each other?
> > > 
> > > In general I would recommend choosing whichever device you would
> > > sensibly think of as a master. In the camera case I would choose the
> > > camera controller node instead of the camera itself, and in the display
> > > case I would choose the display controller instead of the panel. The
> > > binding author needs to choose what she things makes the most sense, but
> > > drivers can still use if it it turns out to be 'backwards'
> > 
> > I would perhaps choose the same approach, but at the same time I think
> > it's all but clear. The display controller doesn't control the panel any
> > more than a DMA controller controls, say, the display controller.
> > 
> > In fact, in earlier versions of OMAP DSS DT support I had a simpler port
> > description, and in that I had the panel as the master (i.e. link from
> > panel to dispc) because the panel driver uses the display controller's
> > features to provide the panel device a data stream.
> > 
> > And even with the current OMAP DSS DT version, which uses the v4l2 style
> > ports/endpoints, the driver model is still the same, and only links
> > towards upstream are used.
> > 
> > So one reason I'm happy with the dual-linking is that I can easily
> > follow the links from the downstream entities to upstream entities, and
> > other people, who have different driver model, can easily do the opposite.
> > 
> > But I agree that single-linking is enough and this can be handled at
> > runtime, even if it makes the code more complex. And perhaps requires
> > extra data in the dts, to give the start points for the graph.
> 
> In theory unidirectional links in DT are indeed enough. However, let's not 
> forget the following.
> 
> - There's no such thing as single start points for graphs. Sure, in some 
> simple cases the graph will have a single start point, but that's not a 
> generic rule. For instance the camera graphs 
> http://ideasonboard.org/media/omap3isp.ps and 
> http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
> starting points from a data flow point of view. And if you want a better 
> understanding of how complex media graphs can become, have a look at 
> http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example, albeit 
> all connections are internal to the SoC in that particular case, and don't 
> need to be described in DT).
> 
> - There's also no such thing as a master device that can just point to slave 
> devices. Once again simple cases exist where that model could work, but real 
> world examples exist of complex pipelines with dozens of elements all 
> implemented by a separate IP core and handled by separate drivers, forming a 
> graph with long chains and branches. We thus need real graph bindings.
> 
> - Finally, having no backlinks in DT would make the software implementation 
> very complex. We need to be able to walk the graph in a generic way without 
> having any of the IP core drivers loaded, and without any specific starting 
> point. We would thus need to parse the complete DT tree, looking at all nodes 
> and trying to find out whether they're part of the graph we're trying to walk. 
> The complexity of the operation would be at best quadratic to the number of 
> nodes in the whole DT and to the number of nodes in the graph.

Not really. To being with, you cannot determine any meaning of a node
across the tree (aside from it being an endpoint) without also
understanding the binding that the node is a part of. That means you
need to have something matching against the compatible string on both
ends of the linkage. For instance:

panel {
	compatible = "acme,lvds-panel";
	lvds-port: port {
	};
};

display-controller {
	compatible = "encom,video";
	port {
		remote-endpoint = <&lvds-port>;
	};
};

In the above example, the encom,video driver has absolutely zero
information about what the acme,lvds-panel binding actually implements.
There needs to be both a driver for the "acme,lvds-panel" binding and
one for the "encom,video" binding (even if the acme,lvds-panel binding
is very thin and defers the functionality to the video controller).

What you want here is the drivers to register each side of the
connection. That could be modeled with something like the following
(pseudocode):

struct of_endpoint {
	struct list_head list;
	struct device_node *ep_node;
	void *context;
	void (*cb)(struct of_endpoint *ep, void *data);
}

int of_register_port(struct device *node, void (*cb)(struct of_endpoint *ep, void *data), void *data)
{
	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);

	ep->ep_node = node;
	ep->data = data;
	ep->callback = cb;

	/* store the endpoint to a list */
	/* check if the endpoint has a remote-endpoint link */
		/* If so, then link the two together and call the
		 * callbacks */
}

That's neither expensive or complicated.

Originally I suggested walking the whole tree multiple times, but as
mentioned that doesn't scale, and as I thought about the above it isn't
even a valid thing to do. Everything has to be driven by drivers, so
even if the backlinks are there, nothing can be done with the link until
the other side goes through enumeration independently.

g.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 14:58                         ` Grant Likely
@ 2014-03-10 15:15                           ` Laurent Pinchart
  2014-03-10 15:40                             ` Philipp Zabel
                                               ` (2 more replies)
  2014-03-11 15:07                           ` Philipp Zabel
  1 sibling, 3 replies; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-10 15:15 UTC (permalink / raw)
  To: Grant Likely
  Cc: Tomi Valkeinen, Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Grant,

On Monday 10 March 2014 14:58:15 Grant Likely wrote:
> On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:
> > On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> > > On 08/03/14 13:41, Grant Likely wrote:
> > > >> Ok. If we go for single directional link, the question is then: which
> > > >> way? And is the direction different for display and camera, which are
> > > >> kind of reflections of each other?
> > > > 
> > > > In general I would recommend choosing whichever device you would
> > > > sensibly think of as a master. In the camera case I would choose the
> > > > camera controller node instead of the camera itself, and in the
> > > > display case I would choose the display controller instead of the
> > > > panel. The binding author needs to choose what she things makes the
> > > > most sense, but drivers can still use if it it turns out to be
> > > > 'backwards'
> > > 
> > > I would perhaps choose the same approach, but at the same time I think
> > > it's all but clear. The display controller doesn't control the panel any
> > > more than a DMA controller controls, say, the display controller.
> > > 
> > > In fact, in earlier versions of OMAP DSS DT support I had a simpler port
> > > description, and in that I had the panel as the master (i.e. link from
> > > panel to dispc) because the panel driver uses the display controller's
> > > features to provide the panel device a data stream.
> > > 
> > > And even with the current OMAP DSS DT version, which uses the v4l2 style
> > > ports/endpoints, the driver model is still the same, and only links
> > > towards upstream are used.
> > > 
> > > So one reason I'm happy with the dual-linking is that I can easily
> > > follow the links from the downstream entities to upstream entities, and
> > > other people, who have different driver model, can easily do the
> > > opposite.
> > > 
> > > But I agree that single-linking is enough and this can be handled at
> > > runtime, even if it makes the code more complex. And perhaps requires
> > > extra data in the dts, to give the start points for the graph.
> > 
> > In theory unidirectional links in DT are indeed enough. However, let's not
> > forget the following.
> > 
> > - There's no such thing as single start points for graphs. Sure, in some
> > simple cases the graph will have a single start point, but that's not a
> > generic rule. For instance the camera graphs
> > http://ideasonboard.org/media/omap3isp.ps and
> > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus
> > two starting points from a data flow point of view. And if you want a
> > better understanding of how complex media graphs can become, have a look
> > at http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example,
> > albeit all connections are internal to the SoC in that particular case,
> > and don't need to be described in DT).
> > 
> > - There's also no such thing as a master device that can just point to
> > slave devices. Once again simple cases exist where that model could work,
> > but real world examples exist of complex pipelines with dozens of
> > elements all implemented by a separate IP core and handled by separate
> > drivers, forming a graph with long chains and branches. We thus need real
> > graph bindings.
> > 
> > - Finally, having no backlinks in DT would make the software
> > implementation very complex. We need to be able to walk the graph in a
> > generic way without having any of the IP core drivers loaded, and without
> > any specific starting point. We would thus need to parse the complete DT
> > tree, looking at all nodes and trying to find out whether they're part of
> > the graph we're trying to walk. The complexity of the operation would be
> > at best quadratic to the number of nodes in the whole DT and to the number
> > of nodes in the graph.
> 
> Not really. To being with, you cannot determine any meaning of a node
> across the tree (aside from it being an endpoint)

That's the important part. I can assume the target node of the remote-endpoint 
phandle to be an endpoint, and can thus assume that it implements the of-graph 
bindings. That's all I need to be able to walk the graph in a generic way.

> without also understanding the binding that the node is a part of. That
> means you need to have something matching against the compatible string on
> both ends of the linkage. For instance:
> 
> panel {
> 	compatible = "acme,lvds-panel";
> 	lvds-port: port {
> 	};
> };
> 
> display-controller {
> 	compatible = "encom,video";
> 	port {
> 		remote-endpoint = <&lvds-port>;
> 	};
> };
> 
> In the above example, the encom,video driver has absolutely zero
> information about what the acme,lvds-panel binding actually implements.
> There needs to be both a driver for the "acme,lvds-panel" binding and
> one for the "encom,video" binding (even if the acme,lvds-panel binding
> is very thin and defers the functionality to the video controller).

I absolutely agree with that. We need a driver for each device (in this case 
the acme panel and the encom display controller), and we need those drivers to 
register entities (in the generic sense of the term) for them to be able to 
communicate with each other. The display controller driver must not try to 
parse panel-specific properties from the panel node. However, as described 
above, I believe it can parse ports and endpoints to walk the graph.

> What you want here is the drivers to register each side of the
> connection. That could be modeled with something like the following
> (pseudocode):
> 
> struct of_endpoint {
> 	struct list_head list;
> 	struct device_node *ep_node;
> 	void *context;
> 	void (*cb)(struct of_endpoint *ep, void *data);
> }
> 
> int of_register_port(struct device *node, void (*cb)(struct of_endpoint *ep,
> void *data), void *data) {
> 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> 
> 	ep->ep_node = node;
> 	ep->data = data;
> 	ep->callback = cb;
> 
> 	/* store the endpoint to a list */
> 	/* check if the endpoint has a remote-endpoint link */
> 		/* If so, then link the two together and call the
> 		 * callbacks */
> }
> 
> That's neither expensive or complicated.
> 
> Originally I suggested walking the whole tree multiple times, but as
> mentioned that doesn't scale, and as I thought about the above it isn't
> even a valid thing to do. Everything has to be driven by drivers, so
> even if the backlinks are there, nothing can be done with the link until
> the other side goes through enumeration independently.

For such composite devices, what we need from a drivers point of view is a 
mechanism to wait for all components to be in place before proceeding. This 
isn't DT-related as such, but the graph is obviously described in DT for DT-
based platforms.

There are at least two mainline implementation of such a mechanism. One of 
them can be found in drivers/media/v4l2-core/v4l2-async.c, another more recent 
one in drivers/base/component.c. Neither of them is DT-specific, and they 
don't try to parse DT content.

The main problem, from a DT point of view, is that we need to pick a master 
driver that will initiate the process of waiting for all components to be in 
place. This is usually the driver of the main component inside the SoC. For a 
camera capture pipeline the master is the SoC camera device driver that will 
create the V4L2 device node(s). For a display pipeline the master is the SoC 
display driver that will create the DRM/KMS devices.

The master device driver needs to create a list of all components it needs, 
and wait until all those components have been probed by their respective 
driver. Creating such a list requires walking the graph, starting at the 
master device (using a CPU-centric view as described by Russell). This is why 
we need the backlinks, as the master device can have inbound links.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 15:15                           ` Laurent Pinchart
@ 2014-03-10 15:40                             ` Philipp Zabel
  2014-03-11 11:43                               ` Laurent Pinchart
  2014-03-11 13:04                             ` Andrzej Hajda
  2014-03-20 17:01                             ` Grant Likely
  2 siblings, 1 reply; 74+ messages in thread
From: Philipp Zabel @ 2014-03-10 15:40 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Grant Likely, Tomi Valkeinen, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Am Montag, den 10.03.2014, 16:15 +0100 schrieb Laurent Pinchart:
> Hi Grant,
> 
> On Monday 10 March 2014 14:58:15 Grant Likely wrote:
> > On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:
> > > On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> > > > On 08/03/14 13:41, Grant Likely wrote:
> > > > >> Ok. If we go for single directional link, the question is then: which
> > > > >> way? And is the direction different for display and camera, which are
> > > > >> kind of reflections of each other?
> > > > > 
> > > > > In general I would recommend choosing whichever device you would
> > > > > sensibly think of as a master. In the camera case I would choose the
> > > > > camera controller node instead of the camera itself, and in the
> > > > > display case I would choose the display controller instead of the
> > > > > panel. The binding author needs to choose what she things makes the
> > > > > most sense, but drivers can still use if it it turns out to be
> > > > > 'backwards'
> > > > 
> > > > I would perhaps choose the same approach, but at the same time I think
> > > > it's all but clear. The display controller doesn't control the panel any
> > > > more than a DMA controller controls, say, the display controller.
> > > > 
> > > > In fact, in earlier versions of OMAP DSS DT support I had a simpler port
> > > > description, and in that I had the panel as the master (i.e. link from
> > > > panel to dispc) because the panel driver uses the display controller's
> > > > features to provide the panel device a data stream.
> > > > 
> > > > And even with the current OMAP DSS DT version, which uses the v4l2 style
> > > > ports/endpoints, the driver model is still the same, and only links
> > > > towards upstream are used.
> > > > 
> > > > So one reason I'm happy with the dual-linking is that I can easily
> > > > follow the links from the downstream entities to upstream entities, and
> > > > other people, who have different driver model, can easily do the
> > > > opposite.
> > > > 
> > > > But I agree that single-linking is enough and this can be handled at
> > > > runtime, even if it makes the code more complex. And perhaps requires
> > > > extra data in the dts, to give the start points for the graph.
> > > 
> > > In theory unidirectional links in DT are indeed enough. However, let's not
> > > forget the following.
> > > 
> > > - There's no such thing as single start points for graphs. Sure, in some
> > > simple cases the graph will have a single start point, but that's not a
> > > generic rule. For instance the camera graphs
> > > http://ideasonboard.org/media/omap3isp.ps and
> > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus
> > > two starting points from a data flow point of view. And if you want a
> > > better understanding of how complex media graphs can become, have a look
> > > at http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example,
> > > albeit all connections are internal to the SoC in that particular case,
> > > and don't need to be described in DT).
> > > 
> > > - There's also no such thing as a master device that can just point to
> > > slave devices. Once again simple cases exist where that model could work,
> > > but real world examples exist of complex pipelines with dozens of
> > > elements all implemented by a separate IP core and handled by separate
> > > drivers, forming a graph with long chains and branches. We thus need real
> > > graph bindings.
> > > 
> > > - Finally, having no backlinks in DT would make the software
> > > implementation very complex. We need to be able to walk the graph in a
> > > generic way without having any of the IP core drivers loaded, and without
> > > any specific starting point. We would thus need to parse the complete DT
> > > tree, looking at all nodes and trying to find out whether they're part of
> > > the graph we're trying to walk. The complexity of the operation would be
> > > at best quadratic to the number of nodes in the whole DT and to the number
> > > of nodes in the graph.
> > 
> > Not really. To being with, you cannot determine any meaning of a node
> > across the tree (aside from it being an endpoint)
> 
> That's the important part. I can assume the target node of the remote-endpoint 
> phandle to be an endpoint, and can thus assume that it implements the of-graph 
> bindings. That's all I need to be able to walk the graph in a generic way.
> 
> > without also understanding the binding that the node is a part of. That
> > means you need to have something matching against the compatible string on
> > both ends of the linkage. For instance:
> > 
> > panel {
> > 	compatible = "acme,lvds-panel";
> > 	lvds-port: port {
> > 	};
> > };
> > 
> > display-controller {
> > 	compatible = "encom,video";
> > 	port {
> > 		remote-endpoint = <&lvds-port>;
> > 	};
> > };
> > 
> > In the above example, the encom,video driver has absolutely zero
> > information about what the acme,lvds-panel binding actually implements.
> > There needs to be both a driver for the "acme,lvds-panel" binding and
> > one for the "encom,video" binding (even if the acme,lvds-panel binding
> > is very thin and defers the functionality to the video controller).
> 
> I absolutely agree with that. We need a driver for each device (in this case 
> the acme panel and the encom display controller), and we need those drivers to 
> register entities (in the generic sense of the term) for them to be able to 
> communicate with each other. The display controller driver must not try to 
> parse panel-specific properties from the panel node. However, as described 
> above, I believe it can parse ports and endpoints to walk the graph.
> 
> > What you want here is the drivers to register each side of the
> > connection. That could be modeled with something like the following
> > (pseudocode):
> > 
> > struct of_endpoint {
> > 	struct list_head list;
> > 	struct device_node *ep_node;
> > 	void *context;
> > 	void (*cb)(struct of_endpoint *ep, void *data);
> > }
> > 
> > int of_register_port(struct device *node, void (*cb)(struct of_endpoint *ep,
> > void *data), void *data) {
> > 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> > 
> > 	ep->ep_node = node;
> > 	ep->data = data;
> > 	ep->callback = cb;
> > 
> > 	/* store the endpoint to a list */
> > 	/* check if the endpoint has a remote-endpoint link */
> > 		/* If so, then link the two together and call the
> > 		 * callbacks */
> > }
> > 
> > That's neither expensive or complicated.
> > 
> > Originally I suggested walking the whole tree multiple times, but as
> > mentioned that doesn't scale, and as I thought about the above it isn't
> > even a valid thing to do. Everything has to be driven by drivers, so
> > even if the backlinks are there, nothing can be done with the link until
> > the other side goes through enumeration independently.
> 
> For such composite devices, what we need from a drivers point of view is a 
> mechanism to wait for all components to be in place before proceeding. This 
> isn't DT-related as such, but the graph is obviously described in DT for DT-
> based platforms.
> 
> There are at least two mainline implementation of such a mechanism. One of 
> them can be found in drivers/media/v4l2-core/v4l2-async.c, another more recent 
> one in drivers/base/component.c. Neither of them is DT-specific, and they 
> don't try to parse DT content.
> 
> The main problem, from a DT point of view, is that we need to pick a master 
> driver that will initiate the process of waiting for all components to be in 
> place. This is usually the driver of the main component inside the SoC. For a 
> camera capture pipeline the master is the SoC camera device driver that will 
> create the V4L2 device node(s). For a display pipeline the master is the SoC 
> display driver that will create the DRM/KMS devices.
> 
> The master device driver needs to create a list of all components it needs, 
> and wait until all those components have been probed by their respective 
> driver. Creating such a list requires walking the graph, starting at the 
> master device (using a CPU-centric view as described by Russell). This is why 
> we need the backlinks, as the master device can have inbound links.

We could scan the whole tree for entities, ports and endpoints once, in
the base oftree code, and put that into a graph structure, adding the
backlinks.
The of_graph_* helpers could then use that graph instead of the device
tree.

regards
Philipp


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 15:40                             ` Philipp Zabel
@ 2014-03-11 11:43                               ` Laurent Pinchart
  2014-03-11 12:59                                 ` Tomi Valkeinen
  2014-03-20 17:09                                 ` Grant Likely
  0 siblings, 2 replies; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-11 11:43 UTC (permalink / raw)
  To: Philipp Zabel
  Cc: Grant Likely, Tomi Valkeinen, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Philipp,

On Monday 10 March 2014 16:40:30 Philipp Zabel wrote:
> Am Montag, den 10.03.2014, 16:15 +0100 schrieb Laurent Pinchart:
> > On Monday 10 March 2014 14:58:15 Grant Likely wrote:
> > > On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:
> > > > On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> > > > > On 08/03/14 13:41, Grant Likely wrote:
> > > > > >> Ok. If we go for single directional link, the question is then:
> > > > > >> which way? And is the direction different for display and camera,
> > > > > >> which are kind of reflections of each other?
> > > > > > 
> > > > > > In general I would recommend choosing whichever device you would
> > > > > > sensibly think of as a master. In the camera case I would choose
> > > > > > the camera controller node instead of the camera itself, and in
> > > > > > the display case I would choose the display controller instead of
> > > > > > the panel. The binding author needs to choose what she things
> > > > > > makes the most sense, but drivers can still use if it it turns out
> > > > > > to be 'backwards'
> > > > > 
> > > > > I would perhaps choose the same approach, but at the same time I
> > > > > think it's all but clear. The display controller doesn't control the
> > > > > panel any more than a DMA controller controls, say, the display
> > > > > controller.
> > > > > 
> > > > > In fact, in earlier versions of OMAP DSS DT support I had a simpler
> > > > > port description, and in that I had the panel as the master (i.e.
> > > > > link from panel to dispc) because the panel driver uses the display
> > > > > controller's features to provide the panel device a data stream.
> > > > > 
> > > > > And even with the current OMAP DSS DT version, which uses the v4l2
> > > > > style ports/endpoints, the driver model is still the same, and only
> > > > > links towards upstream are used.
> > > > > 
> > > > > So one reason I'm happy with the dual-linking is that I can easily
> > > > > follow the links from the downstream entities to upstream entities,
> > > > > and other people, who have different driver model, can easily do the
> > > > > opposite.
> > > > > 
> > > > > But I agree that single-linking is enough and this can be handled at
> > > > > runtime, even if it makes the code more complex. And perhaps
> > > > > requires extra data in the dts, to give the start points for the
> > > > > graph.
> > > > 
> > > > In theory unidirectional links in DT are indeed enough. However, let's
> > > > not forget the following.
> > > > 
> > > > - There's no such thing as single start points for graphs. Sure, in
> > > > some simple cases the graph will have a single start point, but that's
> > > > not a generic rule. For instance the camera graphs
> > > > http://ideasonboard.org/media/omap3isp.ps and
> > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > > thus two starting points from a data flow point of view. And if you
> > > > want a better understanding of how complex media graphs can become,
> > > > have a look at http://ideasonboard.org/media/vsp1.0.pdf (that's a real
> > > > world example, albeit all connections are internal to the SoC in that
> > > > particular case, and don't need to be described in DT).
> > > > 
> > > > - There's also no such thing as a master device that can just point to
> > > > slave devices. Once again simple cases exist where that model could
> > > > work, but real world examples exist of complex pipelines with dozens
> > > > of elements all implemented by a separate IP core and handled by
> > > > separate drivers, forming a graph with long chains and branches. We
> > > > thus need real graph bindings.
> > > > 
> > > > - Finally, having no backlinks in DT would make the software
> > > > implementation very complex. We need to be able to walk the graph in a
> > > > generic way without having any of the IP core drivers loaded, and
> > > > without any specific starting point. We would thus need to parse the
> > > > complete DT tree, looking at all nodes and trying to find out whether
> > > > they're part of the graph we're trying to walk. The complexity of the
> > > > operation would be at best quadratic to the number of nodes in the
> > > > whole DT and to the number of nodes in the graph.
> > > 
> > > Not really. To being with, you cannot determine any meaning of a node
> > > across the tree (aside from it being an endpoint)
> > 
> > That's the important part. I can assume the target node of the
> > remote-endpoint phandle to be an endpoint, and can thus assume that it
> > implements the of-graph bindings. That's all I need to be able to walk
> > the graph in a generic way.
> >
> > > without also understanding the binding that the node is a part of. That
> > > means you need to have something matching against the compatible string
> > > on both ends of the linkage. For instance:
> > > 
> > > panel {
> > > 	compatible = "acme,lvds-panel";
> > > 	lvds-port: port {
> > > 	};
> > > };
> > > 
> > > display-controller {
> > > 	compatible = "encom,video";
> > > 	port {
> > > 		remote-endpoint = <&lvds-port>;
> > > 	};
> > > };
> > > 
> > > In the above example, the encom,video driver has absolutely zero
> > > information about what the acme,lvds-panel binding actually implements.
> > > There needs to be both a driver for the "acme,lvds-panel" binding and
> > > one for the "encom,video" binding (even if the acme,lvds-panel binding
> > > is very thin and defers the functionality to the video controller).
> > 
> > I absolutely agree with that. We need a driver for each device (in this
> > case the acme panel and the encom display controller), and we need those
> > drivers to register entities (in the generic sense of the term) for them
> > to be able to communicate with each other. The display controller driver
> > must not try to parse panel-specific properties from the panel node.
> > However, as described above, I believe it can parse ports and endpoints
> > to walk the graph.
> >
> > > What you want here is the drivers to register each side of the
> > > connection. That could be modeled with something like the following
> > > (pseudocode):
> > > 
> > > struct of_endpoint {
> > > 	struct list_head list;
> > > 	struct device_node *ep_node;
> > > 	void *context;
> > > 	void (*cb)(struct of_endpoint *ep, void *data);
> > > }
> > > 
> > > int of_register_port(struct device *node, void (*cb)(struct of_endpoint
> > > *ep, void *data), void *data) {
> > > 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> > > 	
> > > 	ep->ep_node = node;
> > > 	ep->data = data;
> > > 	ep->callback = cb;
> > > 	
> > > 	/* store the endpoint to a list */
> > > 	/* check if the endpoint has a remote-endpoint link */
> > > 		/* If so, then link the two together and call the
> > > 		 * callbacks */
> > > }
> > > 
> > > That's neither expensive or complicated.
> > > 
> > > Originally I suggested walking the whole tree multiple times, but as
> > > mentioned that doesn't scale, and as I thought about the above it isn't
> > > even a valid thing to do. Everything has to be driven by drivers, so
> > > even if the backlinks are there, nothing can be done with the link until
> > > the other side goes through enumeration independently.
> > 
> > For such composite devices, what we need from a drivers point of view is a
> > mechanism to wait for all components to be in place before proceeding.
> > This isn't DT-related as such, but the graph is obviously described in DT
> > for DT- based platforms.
> > 
> > There are at least two mainline implementation of such a mechanism. One of
> > them can be found in drivers/media/v4l2-core/v4l2-async.c, another more
> > recent one in drivers/base/component.c. Neither of them is DT-specific,
> > and they don't try to parse DT content.
> > 
> > The main problem, from a DT point of view, is that we need to pick a
> > master driver that will initiate the process of waiting for all components
> > to be in place. This is usually the driver of the main component inside
> > the SoC. For a camera capture pipeline the master is the SoC camera device
> > driver that will create the V4L2 device node(s). For a display pipeline
> > the master is the SoC display driver that will create the DRM/KMS
> > devices.
> > 
> > The master device driver needs to create a list of all components it
> > needs, and wait until all those components have been probed by their
> > respective driver. Creating such a list requires walking the graph,
> > starting at the master device (using a CPU-centric view as described by
> > Russell). This is why we need the backlinks, as the master device can have
> > inbound links.
>
> We could scan the whole tree for entities, ports and endpoints once, in
> the base oftree code, and put that into a graph structure, adding the
> backlinks.
> The of_graph_* helpers could then use that graph instead of the device
> tree.

That could work. The complexity would still be quadratic, but we would parse 
the full device tree once only.

The runtime complexity would still be increased, as the graph helpers would 
need to find the endpoint object in the parsed graph corresponding to the DT 
node they get as an argument. That's proportional to the number of graph 
elements, not the total number of DT nodes, so I suppose it's not too bad.

We also need to make sure this would work with insertion of DT fragments at 
runtime. Probably not a big deal, but it has to be kept in mind.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 11:43                               ` Laurent Pinchart
@ 2014-03-11 12:59                                 ` Tomi Valkeinen
       [not found]                                   ` <1883687.VdfitvQEN3@samsung.com>
  2014-03-11 13:16                                   ` Laurent Pinchart
  2014-03-20 17:09                                 ` Grant Likely
  1 sibling, 2 replies; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-11 12:59 UTC (permalink / raw)
  To: Laurent Pinchart, Philipp Zabel, Grant Likely
  Cc: Sascha Hauer, Rob Herring, Russell King - ARM Linux,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 2283 bytes --]

On 11/03/14 13:43, Laurent Pinchart wrote:

>> We could scan the whole tree for entities, ports and endpoints once, in
>> the base oftree code, and put that into a graph structure, adding the
>> backlinks.
>> The of_graph_* helpers could then use that graph instead of the device
>> tree.
> 
> That could work. The complexity would still be quadratic, but we would parse 
> the full device tree once only.
> 
> The runtime complexity would still be increased, as the graph helpers would 
> need to find the endpoint object in the parsed graph corresponding to the DT 
> node they get as an argument. That's proportional to the number of graph 
> elements, not the total number of DT nodes, so I suppose it's not too bad.
> 
> We also need to make sure this would work with insertion of DT fragments at 
> runtime. Probably not a big deal, but it has to be kept in mind.

About the endpoint linking direction... As I think was suggested, the
base logic would be to make endpoints point "outward" from the SoC, i.e.
a display controller would point to a panel, and a capture controller
would point to a sensor.

But how about this case:

I have a simple video pipeline with a display controller, an encoder and
a panel, as follows:

dispc -> encoder -> panel

Here the arrows show which way the remote-endpoint links point. So
looking at the encoder, the encoder's input port is pointed at by the
dispc, and the encoder's output port points at the panel.

Then, I have a capture pipeline, with a capture controller, an encoder
(the same one that was used for display above) and a sensor, as follows:

camc -> encoder -> sensor

Again the arrows show the links. Note that here the encoder's _output_
port is pointed at by the camc, and the encoder's _input_ port points at
the sensor.

So depending on the use case, the endpoints would point to opposite
direction from the encoder's point of view.

And if I gathered Grant's opinion correctly (correct me if I'm wrong),
he thinks things should be explicit, i.e. the bindings for, say, an
encoder should state that the encoder's output endpoint _must_ contain a
remote-endpoint property, whereas the encoder's input endpoint _must
not_ contain a remote-endpoint property.

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 15:15                           ` Laurent Pinchart
  2014-03-10 15:40                             ` Philipp Zabel
@ 2014-03-11 13:04                             ` Andrzej Hajda
  2014-03-20 17:33                               ` Grant Likely
  2014-03-20 17:01                             ` Grant Likely
  2 siblings, 1 reply; 74+ messages in thread
From: Andrzej Hajda @ 2014-03-11 13:04 UTC (permalink / raw)
  To: Laurent Pinchart, Grant Likely
  Cc: Tomi Valkeinen, Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On 03/10/2014 04:15 PM, Laurent Pinchart wrote:
> Hi Grant,
> 
> On Monday 10 March 2014 14:58:15 Grant Likely wrote:
>> On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:
>>> On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
>>>> On 08/03/14 13:41, Grant Likely wrote:
>>>>>> Ok. If we go for single directional link, the question is then: which
>>>>>> way? And is the direction different for display and camera, which are
>>>>>> kind of reflections of each other?
>>>>>
>>>>> In general I would recommend choosing whichever device you would
>>>>> sensibly think of as a master. In the camera case I would choose the
>>>>> camera controller node instead of the camera itself, and in the
>>>>> display case I would choose the display controller instead of the
>>>>> panel. The binding author needs to choose what she things makes the
>>>>> most sense, but drivers can still use if it it turns out to be
>>>>> 'backwards'
>>>>
>>>> I would perhaps choose the same approach, but at the same time I think
>>>> it's all but clear. The display controller doesn't control the panel any
>>>> more than a DMA controller controls, say, the display controller.
>>>>
>>>> In fact, in earlier versions of OMAP DSS DT support I had a simpler port
>>>> description, and in that I had the panel as the master (i.e. link from
>>>> panel to dispc) because the panel driver uses the display controller's
>>>> features to provide the panel device a data stream.
>>>>
>>>> And even with the current OMAP DSS DT version, which uses the v4l2 style
>>>> ports/endpoints, the driver model is still the same, and only links
>>>> towards upstream are used.
>>>>
>>>> So one reason I'm happy with the dual-linking is that I can easily
>>>> follow the links from the downstream entities to upstream entities, and
>>>> other people, who have different driver model, can easily do the
>>>> opposite.
>>>>
>>>> But I agree that single-linking is enough and this can be handled at
>>>> runtime, even if it makes the code more complex. And perhaps requires
>>>> extra data in the dts, to give the start points for the graph.
>>>
>>> In theory unidirectional links in DT are indeed enough. However, let's not
>>> forget the following.
>>>
>>> - There's no such thing as single start points for graphs. Sure, in some
>>> simple cases the graph will have a single start point, but that's not a
>>> generic rule. For instance the camera graphs
>>> http://ideasonboard.org/media/omap3isp.ps and
>>> http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus
>>> two starting points from a data flow point of view. And if you want a
>>> better understanding of how complex media graphs can become, have a look
>>> at http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example,
>>> albeit all connections are internal to the SoC in that particular case,
>>> and don't need to be described in DT).
>>>
>>> - There's also no such thing as a master device that can just point to
>>> slave devices. Once again simple cases exist where that model could work,
>>> but real world examples exist of complex pipelines with dozens of
>>> elements all implemented by a separate IP core and handled by separate
>>> drivers, forming a graph with long chains and branches. We thus need real
>>> graph bindings.
>>>
>>> - Finally, having no backlinks in DT would make the software
>>> implementation very complex. We need to be able to walk the graph in a
>>> generic way without having any of the IP core drivers loaded, and without
>>> any specific starting point. We would thus need to parse the complete DT
>>> tree, looking at all nodes and trying to find out whether they're part of
>>> the graph we're trying to walk. The complexity of the operation would be
>>> at best quadratic to the number of nodes in the whole DT and to the number
>>> of nodes in the graph.
>>
>> Not really. To being with, you cannot determine any meaning of a node
>> across the tree (aside from it being an endpoint)
> 
> That's the important part. I can assume the target node of the remote-endpoint 
> phandle to be an endpoint, and can thus assume that it implements the of-graph 
> bindings. That's all I need to be able to walk the graph in a generic way.
> 
>> without also understanding the binding that the node is a part of. That
>> means you need to have something matching against the compatible string on
>> both ends of the linkage. For instance:
>>
>> panel {
>> 	compatible = "acme,lvds-panel";
>> 	lvds-port: port {
>> 	};
>> };
>>
>> display-controller {
>> 	compatible = "encom,video";
>> 	port {
>> 		remote-endpoint = <&lvds-port>;
>> 	};
>> };
>>
>> In the above example, the encom,video driver has absolutely zero
>> information about what the acme,lvds-panel binding actually implements.
>> There needs to be both a driver for the "acme,lvds-panel" binding and
>> one for the "encom,video" binding (even if the acme,lvds-panel binding
>> is very thin and defers the functionality to the video controller).
> 
> I absolutely agree with that. We need a driver for each device (in this case 
> the acme panel and the encom display controller), and we need those drivers to 
> register entities (in the generic sense of the term) for them to be able to 
> communicate with each other. The display controller driver must not try to 
> parse panel-specific properties from the panel node. However, as described 
> above, I believe it can parse ports and endpoints to walk the graph.
> 
>> What you want here is the drivers to register each side of the
>> connection. That could be modeled with something like the following
>> (pseudocode):
>>
>> struct of_endpoint {
>> 	struct list_head list;
>> 	struct device_node *ep_node;
>> 	void *context;
>> 	void (*cb)(struct of_endpoint *ep, void *data);
>> }
>>
>> int of_register_port(struct device *node, void (*cb)(struct of_endpoint *ep,
>> void *data), void *data) {
>> 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
>>
>> 	ep->ep_node = node;
>> 	ep->data = data;
>> 	ep->callback = cb;
>>
>> 	/* store the endpoint to a list */
>> 	/* check if the endpoint has a remote-endpoint link */
>> 		/* If so, then link the two together and call the
>> 		 * callbacks */
>> }
>>
>> That's neither expensive or complicated.
>>
>> Originally I suggested walking the whole tree multiple times, but as
>> mentioned that doesn't scale, and as I thought about the above it isn't
>> even a valid thing to do. Everything has to be driven by drivers, so
>> even if the backlinks are there, nothing can be done with the link until
>> the other side goes through enumeration independently.
> 
> For such composite devices, what we need from a drivers point of view is a 
> mechanism to wait for all components to be in place before proceeding. This 
> isn't DT-related as such, but the graph is obviously described in DT for DT-
> based platforms.
> 
> There are at least two mainline implementation of such a mechanism. One of 
> them can be found in drivers/media/v4l2-core/v4l2-async.c, another more recent 
> one in drivers/base/component.c. Neither of them is DT-specific, and they 
> don't try to parse DT content.
> 
> The main problem, from a DT point of view, is that we need to pick a master 
> driver that will initiate the process of waiting for all components to be in 
> place. This is usually the driver of the main component inside the SoC. For a 
> camera capture pipeline the master is the SoC camera device driver that will 
> create the V4L2 device node(s). For a display pipeline the master is the SoC 
> display driver that will create the DRM/KMS devices.
> 
> The master device driver needs to create a list of all components it needs, 
> and wait until all those components have been probed by their respective 
> driver. Creating such a list requires walking the graph, starting at the 
> master device (using a CPU-centric view as described by Russell). This is why 
> we need the backlinks, as the master device can have inbound links.
> 

I am not sure if the approach with one device driver parsing links
between two other devices is the correct one. For example some links can
be optional, some irrelevant to the pipeline the master device tries to
create,.... I guess it could be sometimes solved using additional
properties, but this will complicate things and will not work if the
routing decision can be taken only during specific driver probe or later.

Regards
Andrzej



^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 12:59                                 ` Tomi Valkeinen
       [not found]                                   ` <1883687.VdfitvQEN3@samsung.com>
@ 2014-03-11 13:16                                   ` Laurent Pinchart
  2014-03-11 13:27                                     ` Tomi Valkeinen
  2014-03-20 17:23                                     ` Grant Likely
  1 sibling, 2 replies; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-11 13:16 UTC (permalink / raw)
  To: Tomi Valkeinen
  Cc: Philipp Zabel, Grant Likely, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 2859 bytes --]

Hi Tomi,

On Tuesday 11 March 2014 14:59:20 Tomi Valkeinen wrote:
> On 11/03/14 13:43, Laurent Pinchart wrote:
> >> We could scan the whole tree for entities, ports and endpoints once, in
> >> the base oftree code, and put that into a graph structure, adding the
> >> backlinks. The of_graph_* helpers could then use that graph instead of
> >> the device tree.
> > 
> > That could work. The complexity would still be quadratic, but we would
> > parse the full device tree once only.
> > 
> > The runtime complexity would still be increased, as the graph helpers
> > would need to find the endpoint object in the parsed graph corresponding
> > to the DT node they get as an argument. That's proportional to the number
> > of graph elements, not the total number of DT nodes, so I suppose it's not
> > too bad.
> > 
> > We also need to make sure this would work with insertion of DT fragments
> > at runtime. Probably not a big deal, but it has to be kept in mind.
> 
> About the endpoint linking direction... As I think was suggested, the
> base logic would be to make endpoints point "outward" from the SoC, i.e.
> a display controller would point to a panel, and a capture controller
> would point to a sensor.
>
> But how about this case:
> 
> I have a simple video pipeline with a display controller, an encoder and
> a panel, as follows:
> 
> dispc -> encoder -> panel
> 
> Here the arrows show which way the remote-endpoint links point. So
> looking at the encoder, the encoder's input port is pointed at by the
> dispc, and the encoder's output port points at the panel.
> 
> Then, I have a capture pipeline, with a capture controller, an encoder
> (the same one that was used for display above) and a sensor, as follows:
> 
> camc -> encoder -> sensor
> 
> Again the arrows show the links. Note that here the encoder's _output_
> port is pointed at by the camc, and the encoder's _input_ port points at
> the sensor.
> 
> So depending on the use case, the endpoints would point to opposite
> direction from the encoder's point of view.
> 
> And if I gathered Grant's opinion correctly (correct me if I'm wrong),
> he thinks things should be explicit, i.e. the bindings for, say, an
> encoder should state that the encoder's output endpoint _must_ contain a
> remote-endpoint property, whereas the encoder's input endpoint _must
> not_ contain a remote-endpoint property.

Actually my understand was that DT links would have the same direction as the 
data flow. There would be no ambiguity in that case as the direction of the 
data flow is known. What happens for bidirectional data flows still need to be 
discussed though. And if we want to use the of-graph bindings to describe 
graphs without a data flow, a decision will need to be taken there too.

-- 
Regards,

Laurent Pinchart

[-- Attachment #2: This is a digitally signed message part. --]
[-- Type: application/pgp-signature, Size: 490 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 13:16                                   ` Laurent Pinchart
@ 2014-03-11 13:27                                     ` Tomi Valkeinen
  2014-03-11 13:44                                       ` Philipp Zabel
  2014-03-20 17:23                                     ` Grant Likely
  1 sibling, 1 reply; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-11 13:27 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Philipp Zabel, Grant Likely, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 1581 bytes --]

On 11/03/14 15:16, Laurent Pinchart wrote:

>> And if I gathered Grant's opinion correctly (correct me if I'm wrong),
>> he thinks things should be explicit, i.e. the bindings for, say, an
>> encoder should state that the encoder's output endpoint _must_ contain a
>> remote-endpoint property, whereas the encoder's input endpoint _must
>> not_ contain a remote-endpoint property.
> 
> Actually my understand was that DT links would have the same direction as the 
> data flow. There would be no ambiguity in that case as the direction of the 

Ok. At least at some point in the discussion the rule of thumb was to
have the "master" point at the "slave", which are a bit vague terms, but
I think both display and camera controllers were given as examples of
"master".

> data flow is known. What happens for bidirectional data flows still need to be 
> discussed though. And if we want to use the of-graph bindings to describe 
> graphs without a data flow, a decision will need to be taken there too.

Yep. My worry is that if the link direction is defined in the bindings
for the device, somebody will at some point have a complex board which
happens to use two devices in such a way, that either neither of them
point to each other, or both point to each other.

Maybe we can make sure that never happens, but I feel a bit nervous
about it especially for bi-directional cases. If the link has no clear
main-dataflow direction, it may be a bit difficult to say which way to link.

With double-linking all those concerns just disappear.

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 13:27                                     ` Tomi Valkeinen
@ 2014-03-11 13:44                                       ` Philipp Zabel
  0 siblings, 0 replies; 74+ messages in thread
From: Philipp Zabel @ 2014-03-11 13:44 UTC (permalink / raw)
  To: Tomi Valkeinen
  Cc: Laurent Pinchart, Grant Likely, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi,

Am Dienstag, den 11.03.2014, 15:27 +0200 schrieb Tomi Valkeinen:
> On 11/03/14 15:16, Laurent Pinchart wrote:
> 
> >> And if I gathered Grant's opinion correctly (correct me if I'm wrong),
> >> he thinks things should be explicit, i.e. the bindings for, say, an
> >> encoder should state that the encoder's output endpoint _must_ contain a
> >> remote-endpoint property, whereas the encoder's input endpoint _must
> >> not_ contain a remote-endpoint property.
> > 
> > Actually my understand was that DT links would have the same direction as the 
> > data flow. There would be no ambiguity in that case as the direction of the 
> 
> Ok. At least at some point in the discussion the rule of thumb was to
> have the "master" point at the "slave", which are a bit vague terms, but
> I think both display and camera controllers were given as examples of
> "master".
> 
> > data flow is known. What happens for bidirectional data flows still need to be 
> > discussed though. And if we want to use the of-graph bindings to describe 
> > graphs without a data flow, a decision will need to be taken there too.
> 
> Yep. My worry is that if the link direction is defined in the bindings
> for the device, somebody will at some point have a complex board which
> happens to use two devices in such a way, that either neither of them
> point to each other, or both point to each other.
> 
> Maybe we can make sure that never happens, but I feel a bit nervous
> about it especially for bi-directional cases. If the link has no clear
> main-dataflow direction, it may be a bit difficult to say which way to link.
> 
> With double-linking all those concerns just disappear.

another possibility would be to just leave it open in the generic
bindings:

"Two endpoints are considered linked together if any of them contains a
 'remote-endpoint' phandle property that points to the other. If both
 contain a 'remote-endpoint' property, both must point at each other."

As long as we make sure that the generic code is able to generate the
undirected graph from this, we could then let more specific bindings
(like video-interfaces.txt) define that the phandle links always should
point in both directions, in data flow direction, north, or outwards
from the 'master'.

regards
Philipp


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 14:58                         ` Grant Likely
  2014-03-10 15:15                           ` Laurent Pinchart
@ 2014-03-11 15:07                           ` Philipp Zabel
  2014-03-11 15:21                             ` Laurent Pinchart
  2014-03-20 17:36                             ` Grant Likely
  1 sibling, 2 replies; 74+ messages in thread
From: Philipp Zabel @ 2014-03-11 15:07 UTC (permalink / raw)
  To: Grant Likely
  Cc: Laurent Pinchart, Tomi Valkeinen, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Grant,

Am Montag, den 10.03.2014, 14:58 +0000 schrieb Grant Likely:
> On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> > On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> > > On 08/03/14 13:41, Grant Likely wrote:
> > > >> Ok. If we go for single directional link, the question is then: which
> > > >> way? And is the direction different for display and camera, which are
> > > >> kind of reflections of each other?
> > > > 
> > > > In general I would recommend choosing whichever device you would
> > > > sensibly think of as a master. In the camera case I would choose the
> > > > camera controller node instead of the camera itself, and in the display
> > > > case I would choose the display controller instead of the panel. The
> > > > binding author needs to choose what she things makes the most sense, but
> > > > drivers can still use if it it turns out to be 'backwards'
> > > 
> > > I would perhaps choose the same approach, but at the same time I think
> > > it's all but clear. The display controller doesn't control the panel any
> > > more than a DMA controller controls, say, the display controller.
> > > 
> > > In fact, in earlier versions of OMAP DSS DT support I had a simpler port
> > > description, and in that I had the panel as the master (i.e. link from
> > > panel to dispc) because the panel driver uses the display controller's
> > > features to provide the panel device a data stream.
> > > 
> > > And even with the current OMAP DSS DT version, which uses the v4l2 style
> > > ports/endpoints, the driver model is still the same, and only links
> > > towards upstream are used.
> > > 
> > > So one reason I'm happy with the dual-linking is that I can easily
> > > follow the links from the downstream entities to upstream entities, and
> > > other people, who have different driver model, can easily do the opposite.
> > > 
> > > But I agree that single-linking is enough and this can be handled at
> > > runtime, even if it makes the code more complex. And perhaps requires
> > > extra data in the dts, to give the start points for the graph.
> > 
> > In theory unidirectional links in DT are indeed enough. However, let's not 
> > forget the following.
> > 
> > - There's no such thing as single start points for graphs. Sure, in some 
> > simple cases the graph will have a single start point, but that's not a 
> > generic rule. For instance the camera graphs 
> > http://ideasonboard.org/media/omap3isp.ps and 
> > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
> > starting points from a data flow point of view. And if you want a better 
> > understanding of how complex media graphs can become, have a look at 
> > http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example, albeit 
> > all connections are internal to the SoC in that particular case, and don't 
> > need to be described in DT).
> > 
> > - There's also no such thing as a master device that can just point to slave 
> > devices. Once again simple cases exist where that model could work, but real 
> > world examples exist of complex pipelines with dozens of elements all 
> > implemented by a separate IP core and handled by separate drivers, forming a 
> > graph with long chains and branches. We thus need real graph bindings.
> > 
> > - Finally, having no backlinks in DT would make the software implementation 
> > very complex. We need to be able to walk the graph in a generic way without 
> > having any of the IP core drivers loaded, and without any specific starting 
> > point. We would thus need to parse the complete DT tree, looking at all nodes 
> > and trying to find out whether they're part of the graph we're trying to walk. 
> > The complexity of the operation would be at best quadratic to the number of 
> > nodes in the whole DT and to the number of nodes in the graph.
> 
> Not really. To being with, you cannot determine any meaning of a node
> across the tree (aside from it being an endpoint) without also
> understanding the binding that the node is a part of. That means you
> need to have something matching against the compatible string on both
> ends of the linkage. For instance:
> 
> panel {
> 	compatible = "acme,lvds-panel";
> 	lvds-port: port {
> 	};
> };
> 
> display-controller {
> 	compatible = "encom,video";
> 	port {
> 		remote-endpoint = <&lvds-port>;
> 	};
> };
> 
> In the above example, the encom,video driver has absolutely zero
> information about what the acme,lvds-panel binding actually implements.
> There needs to be both a driver for the "acme,lvds-panel" binding and
> one for the "encom,video" binding (even if the acme,lvds-panel binding
> is very thin and defers the functionality to the video controller).
> 
> What you want here is the drivers to register each side of the
> connection. That could be modeled with something like the following
> (pseudocode):
> 
> struct of_endpoint {
> 	struct list_head list;
> 	struct device_node *ep_node;
> 	void *context;
> 	void (*cb)(struct of_endpoint *ep, void *data);
> }
> 
> int of_register_port(struct device *node, void (*cb)(struct of_endpoint *ep, void *data), void *data)
> {
> 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> 
> 	ep->ep_node = node;
> 	ep->data = data;
> 	ep->callback = cb;
> 
> 	/* store the endpoint to a list */
> 	/* check if the endpoint has a remote-endpoint link */
> 		/* If so, then link the two together and call the
> 		 * callbacks */
> }
> 
> That's neither expensive or complicated.
> 
> Originally I suggested walking the whole tree multiple times, but as
> mentioned that doesn't scale, and as I thought about the above it isn't
> even a valid thing to do. Everything has to be driven by drivers, so
> even if the backlinks are there, nothing can be done with the link until
> the other side goes through enumeration independently.

I have implemented your suggestion as follows. Basically, this allows
either endpoint to contain the remote-endpoint link, as long as all
drivers register their endpoints in the probe function and return
-EPROBE_DEFER from their component framework bind callback until all
their endpoints are connected.

>From fdda1fb2bd133200d4620adcbb28697cb360e1cb Mon Sep 17 00:00:00 2001
From: Philipp Zabel <p.zabel@pengutronix.de>
Date: Tue, 11 Mar 2014 15:56:18 +0100
Subject: [PATCH] of: Implement of_graph_register_endpoint

This patch adds a function that lets drivers register their endpoints in a
global list. Newly registered endpoints are compared against known endpoints
to check if a connection should be made. If so, the driver is notified via
a simple callback.

Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
---
 drivers/of/base.c        | 69 +++++++++++++++++++++++++++++++++++++++++++++---
 include/linux/of_graph.h | 20 +++++++++++++-
 2 files changed, 84 insertions(+), 5 deletions(-)

diff --git a/drivers/of/base.c b/drivers/of/base.c
index ebb001a..77ae54a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -29,6 +29,7 @@
 #include "of_private.h"
 
 LIST_HEAD(aliases_lookup);
+LIST_HEAD(endpoint_list);
 
 struct device_node *of_allnodes;
 EXPORT_SYMBOL(of_allnodes);
@@ -2002,6 +2003,7 @@ int of_graph_parse_endpoint(const struct device_node *node,
 	memset(endpoint, 0, sizeof(*endpoint));
 
 	endpoint->local_node = node;
+	endpoint->remote_node = of_parse_phandle(node, "remote-endpoint", 0);
 	/*
 	 * It doesn't matter whether the two calls below succeed.
 	 * If they don't then the default value 0 is used.
@@ -2126,6 +2128,19 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
 }
 EXPORT_SYMBOL(of_graph_get_next_endpoint);
 
+static struct of_endpoint *__of_graph_lookup_endpoint(
+				const struct device_node *node)
+{
+	struct of_endpoint *ep;
+
+	list_for_each_entry(ep, &endpoint_list, list) {
+		if (ep->local_node == node)
+			return ep;
+	}
+
+	return NULL;
+}
+
 /**
  * of_graph_get_remote_port_parent() - get remote port's parent node
  * @node: pointer to a local endpoint device_node
@@ -2136,11 +2151,15 @@ EXPORT_SYMBOL(of_graph_get_next_endpoint);
 struct device_node *of_graph_get_remote_port_parent(
 			       const struct device_node *node)
 {
+	struct of_endpoint *ep;
 	struct device_node *np;
 	unsigned int depth;
 
 	/* Get remote endpoint node. */
-	np = of_parse_phandle(node, "remote-endpoint", 0);
+	ep = __of_graph_lookup_endpoint(node);
+	if (!ep || !ep->remote_node)
+		return NULL;
+	np = ep->remote_node;
 
 	/* Walk 3 levels up only if there is 'ports' node */
 	for (depth = 3; depth && np; depth--) {
@@ -2163,13 +2182,14 @@ EXPORT_SYMBOL(of_graph_get_remote_port_parent);
  */
 struct device_node *of_graph_get_remote_port(const struct device_node *node)
 {
+	struct of_endpoint *ep;
 	struct device_node *np;
 
 	/* Get remote endpoint node. */
-	np = of_parse_phandle(node, "remote-endpoint", 0);
-	if (!np)
+	ep = __of_graph_lookup_endpoint(node);
+	if (!ep || !ep->remote_node)
 		return NULL;
-	np = of_get_next_parent(np);
+	np = of_get_next_parent(ep->remote_node);
 	if (of_node_cmp(np->name, "port")) {
 		of_node_put(np);
 		return NULL;
@@ -2177,3 +2197,44 @@ struct device_node *of_graph_get_remote_port(const struct device_node *node)
 	return np;
 }
 EXPORT_SYMBOL(of_graph_get_remote_port);
+
+int of_graph_register_endpoint(const struct device_node *node,
+		void (*cb)(struct of_endpoint *ep, void *data), void *data)
+{
+	struct of_endpoint *remote_ep, *ep = kmalloc(sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	of_graph_parse_endpoint(node, ep);
+	ep->callback = cb;
+	ep->data = data;
+
+	list_add(&ep->list, &endpoint_list);
+
+	list_for_each_entry(remote_ep, &endpoint_list, list) {
+		struct of_endpoint *from, *to;
+		if (ep->remote_node) {
+			from = ep;
+			to = remote_ep;
+		} else {
+			from = remote_ep;
+			to = ep;
+		}
+		if (from->remote_node &&
+		    from->remote_node == to->local_node) {
+			WARN_ON(to->remote_node &&
+				to->remote_node != from->local_node);
+			to->remote_node = from->local_node;
+			to->remote_ep = from;
+			from->remote_ep = to;
+			if (from->callback)
+				from->callback(from, from->data);
+			if (to->callback)
+				to->callback(to, to->data);
+			return 0;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(of_graph_register_endpoint);
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 3a3c5a9..f00ac4e 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -23,7 +23,14 @@
 struct of_endpoint {
 	unsigned int port;
 	unsigned int id;
-	const struct device_node *local_node;
+	struct device_node *local_node;
+	struct device_node *remote_node;
+	struct of_endpoint *remote_ep;
+
+	/* Internal use only */
+	struct list_head list;
+	void (*callback)(struct of_endpoint *ep, void *data);
+	void *data;
 };
 
 #ifdef CONFIG_OF
@@ -35,6 +42,10 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
 struct device_node *of_graph_get_remote_port_parent(
 					const struct device_node *node);
 struct device_node *of_graph_get_remote_port(const struct device_node *node);
+
+int of_graph_register_endpoint(const struct device_node *ep_node,
+				void (*cb)(struct of_endpoint *ep, void *data),
+				void *data);
 #else
 
 static inline int of_graph_parse_endpoint(const struct device_node *node,
@@ -68,6 +79,13 @@ static inline struct device_node *of_graph_get_remote_port(
 	return NULL;
 }
 
+static inline int of_graph_register_endpoint(const struct device_node *ep_node,
+				void (*cb)(struct of_endpoint *ep, void *data),
+				void *data);
+{
+	return -ENOSYS;
+}
+
 #endif /* CONFIG_OF */
 
 #endif /* __LINUX_OF_GRAPH_H */
-- 
1.9.0



^ permalink raw reply related	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 15:07                           ` Philipp Zabel
@ 2014-03-11 15:21                             ` Laurent Pinchart
  2014-03-11 18:52                               ` Philipp Zabel
  2014-03-20 17:53                               ` Grant Likely
  2014-03-20 17:36                             ` Grant Likely
  1 sibling, 2 replies; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-11 15:21 UTC (permalink / raw)
  To: Philipp Zabel
  Cc: Grant Likely, Tomi Valkeinen, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Philipp,

On Tuesday 11 March 2014 16:07:00 Philipp Zabel wrote:
> Am Montag, den 10.03.2014, 14:58 +0000 schrieb Grant Likely:
> > On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:

[snip]

> > > In theory unidirectional links in DT are indeed enough. However, let's
> > > not forget the following.
> > > 
> > > - There's no such thing as single start points for graphs. Sure, in some
> > > simple cases the graph will have a single start point, but that's not a
> > > generic rule. For instance the camera graphs
> > > http://ideasonboard.org/media/omap3isp.ps and
> > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > thus two starting points from a data flow point of view. And if you
> > > want a better understanding of how complex media graphs can become,
> > > have a look at http://ideasonboard.org/media/vsp1.0.pdf (that's a real
> > > world example, albeit all connections are internal to the SoC in that
> > > particular case, and don't need to be described in DT).
> > > 
> > > - There's also no such thing as a master device that can just point to
> > > slave devices. Once again simple cases exist where that model could
> > > work, but real world examples exist of complex pipelines with dozens of
> > > elements all implemented by a separate IP core and handled by separate
> > > drivers, forming a graph with long chains and branches. We thus need
> > > real graph bindings.
> > > 
> > > - Finally, having no backlinks in DT would make the software
> > > implementation very complex. We need to be able to walk the graph in a
> > > generic way without having any of the IP core drivers loaded, and
> > > without any specific starting point. We would thus need to parse the
> > > complete DT tree, looking at all nodes and trying to find out whether
> > > they're part of the graph we're trying to walk. The complexity of the
> > > operation would be at best quadratic to the number of nodes in the whole
> > > DT and to the number of nodes in the graph.
> > 
> > Not really. To being with, you cannot determine any meaning of a node
> > across the tree (aside from it being an endpoint) without also
> > understanding the binding that the node is a part of. That means you
> > need to have something matching against the compatible string on both
> > ends of the linkage. For instance:
> > 
> > panel {
> > 	compatible = "acme,lvds-panel";
> > 	lvds-port: port {
> > 	};
> > };
> > 
> > display-controller {
> > 	compatible = "encom,video";
> > 	port {
> > 		remote-endpoint = <&lvds-port>;
> > 	};
> > };
> > 
> > In the above example, the encom,video driver has absolutely zero
> > information about what the acme,lvds-panel binding actually implements.
> > There needs to be both a driver for the "acme,lvds-panel" binding and
> > one for the "encom,video" binding (even if the acme,lvds-panel binding
> > is very thin and defers the functionality to the video controller).
> > 
> > What you want here is the drivers to register each side of the
> > connection. That could be modeled with something like the following
> > (pseudocode):
> > 
> > struct of_endpoint {
> > 
> > 	struct list_head list;
> > 	struct device_node *ep_node;
> > 	void *context;
> > 	void (*cb)(struct of_endpoint *ep, void *data);
> > 
> > }
> > 
> > int of_register_port(struct device *node, void (*cb)(struct of_endpoint
> > *ep, void *data), void *data) {
> > 
> > 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> > 	
> > 	ep->ep_node = node;
> > 	ep->data = data;
> > 	ep->callback = cb;
> > 	
> > 	/* store the endpoint to a list */
> > 	/* check if the endpoint has a remote-endpoint link */
> > 		/* If so, then link the two together and call the
> > 		 * callbacks */
> > }
> > 
> > That's neither expensive or complicated.
> > 
> > Originally I suggested walking the whole tree multiple times, but as
> > mentioned that doesn't scale, and as I thought about the above it isn't
> > even a valid thing to do. Everything has to be driven by drivers, so
> > even if the backlinks are there, nothing can be done with the link until
> > the other side goes through enumeration independently.
> 
> I have implemented your suggestion as follows. Basically, this allows
> either endpoint to contain the remote-endpoint link, as long as all
> drivers register their endpoints in the probe function and return
> -EPROBE_DEFER from their component framework bind callback until all
> their endpoints are connected.

Beside bringing the whole graph down when a single component can't be probed 
(either because the corresponding hardware devices is missing, broken, or the 
driver isn't loaded), that's adding even one more level of complexity with an 
additional callback. I'm afraid I can't accept it as-is, the result is just 
too complex for device drivers and not flexible enough.

I want to keep the ability to walk the graph without requiring all components 
to be probed by their respective driver. What happened to your suggestion of 
parsing the whole DT once at boot time ?

> From fdda1fb2bd133200d4620adcbb28697cb360e1cb Mon Sep 17 00:00:00 2001
> From: Philipp Zabel <p.zabel@pengutronix.de>
> Date: Tue, 11 Mar 2014 15:56:18 +0100
> Subject: [PATCH] of: Implement of_graph_register_endpoint
> 
> This patch adds a function that lets drivers register their endpoints in a
> global list. Newly registered endpoints are compared against known endpoints
> to check if a connection should be made. If so, the driver is notified via
> a simple callback.
> 
> Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> ---
>  drivers/of/base.c        | 69 ++++++++++++++++++++++++++++++++++++++++++---
>  include/linux/of_graph.h | 20 +++++++++++++-
>  2 files changed, 84 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/of/base.c b/drivers/of/base.c
> index ebb001a..77ae54a 100644
> --- a/drivers/of/base.c
> +++ b/drivers/of/base.c
> @@ -29,6 +29,7 @@
>  #include "of_private.h"
> 
>  LIST_HEAD(aliases_lookup);
> +LIST_HEAD(endpoint_list);
> 
>  struct device_node *of_allnodes;
>  EXPORT_SYMBOL(of_allnodes);
> @@ -2002,6 +2003,7 @@ int of_graph_parse_endpoint(const struct device_node
> *node, memset(endpoint, 0, sizeof(*endpoint));
> 
>  	endpoint->local_node = node;
> +	endpoint->remote_node = of_parse_phandle(node, "remote-endpoint", 0);
>  	/*
>  	 * It doesn't matter whether the two calls below succeed.
>  	 * If they don't then the default value 0 is used.
> @@ -2126,6 +2128,19 @@ struct device_node *of_graph_get_next_endpoint(const
> struct device_node *parent, }
>  EXPORT_SYMBOL(of_graph_get_next_endpoint);
> 
> +static struct of_endpoint *__of_graph_lookup_endpoint(
> +				const struct device_node *node)
> +{
> +	struct of_endpoint *ep;
> +
> +	list_for_each_entry(ep, &endpoint_list, list) {
> +		if (ep->local_node == node)
> +			return ep;
> +	}
> +
> +	return NULL;
> +}
> +
>  /**
>   * of_graph_get_remote_port_parent() - get remote port's parent node
>   * @node: pointer to a local endpoint device_node
> @@ -2136,11 +2151,15 @@ EXPORT_SYMBOL(of_graph_get_next_endpoint);
>  struct device_node *of_graph_get_remote_port_parent(
>  			       const struct device_node *node)
>  {
> +	struct of_endpoint *ep;
>  	struct device_node *np;
>  	unsigned int depth;
> 
>  	/* Get remote endpoint node. */
> -	np = of_parse_phandle(node, "remote-endpoint", 0);
> +	ep = __of_graph_lookup_endpoint(node);
> +	if (!ep || !ep->remote_node)
> +		return NULL;
> +	np = ep->remote_node;
> 
>  	/* Walk 3 levels up only if there is 'ports' node */
>  	for (depth = 3; depth && np; depth--) {
> @@ -2163,13 +2182,14 @@ EXPORT_SYMBOL(of_graph_get_remote_port_parent);
>   */
>  struct device_node *of_graph_get_remote_port(const struct device_node
> *node) {
> +	struct of_endpoint *ep;
>  	struct device_node *np;
> 
>  	/* Get remote endpoint node. */
> -	np = of_parse_phandle(node, "remote-endpoint", 0);
> -	if (!np)
> +	ep = __of_graph_lookup_endpoint(node);
> +	if (!ep || !ep->remote_node)
>  		return NULL;
> -	np = of_get_next_parent(np);
> +	np = of_get_next_parent(ep->remote_node);
>  	if (of_node_cmp(np->name, "port")) {
>  		of_node_put(np);
>  		return NULL;
> @@ -2177,3 +2197,44 @@ struct device_node *of_graph_get_remote_port(const
> struct device_node *node) return np;
>  }
>  EXPORT_SYMBOL(of_graph_get_remote_port);
> +
> +int of_graph_register_endpoint(const struct device_node *node,
> +		void (*cb)(struct of_endpoint *ep, void *data), void *data)
> +{
> +	struct of_endpoint *remote_ep, *ep = kmalloc(sizeof(*ep), GFP_KERNEL);
> +	if (!ep)
> +		return -ENOMEM;
> +
> +	of_graph_parse_endpoint(node, ep);
> +	ep->callback = cb;
> +	ep->data = data;
> +
> +	list_add(&ep->list, &endpoint_list);
> +
> +	list_for_each_entry(remote_ep, &endpoint_list, list) {
> +		struct of_endpoint *from, *to;
> +		if (ep->remote_node) {
> +			from = ep;
> +			to = remote_ep;
> +		} else {
> +			from = remote_ep;
> +			to = ep;
> +		}
> +		if (from->remote_node &&
> +		    from->remote_node == to->local_node) {
> +			WARN_ON(to->remote_node &&
> +				to->remote_node != from->local_node);
> +			to->remote_node = from->local_node;
> +			to->remote_ep = from;
> +			from->remote_ep = to;
> +			if (from->callback)
> +				from->callback(from, from->data);
> +			if (to->callback)
> +				to->callback(to, to->data);
> +			return 0;
> +		}
> +	}
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL(of_graph_register_endpoint);
> diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
> index 3a3c5a9..f00ac4e 100644
> --- a/include/linux/of_graph.h
> +++ b/include/linux/of_graph.h
> @@ -23,7 +23,14 @@
>  struct of_endpoint {
>  	unsigned int port;
>  	unsigned int id;
> -	const struct device_node *local_node;
> +	struct device_node *local_node;
> +	struct device_node *remote_node;
> +	struct of_endpoint *remote_ep;
> +
> +	/* Internal use only */
> +	struct list_head list;
> +	void (*callback)(struct of_endpoint *ep, void *data);
> +	void *data;
>  };
> 
>  #ifdef CONFIG_OF
> @@ -35,6 +42,10 @@ struct device_node *of_graph_get_next_endpoint(const
> struct device_node *parent, struct device_node
> *of_graph_get_remote_port_parent(
>  					const struct device_node *node);
>  struct device_node *of_graph_get_remote_port(const struct device_node
> *node); +
> +int of_graph_register_endpoint(const struct device_node *ep_node,
> +				void (*cb)(struct of_endpoint *ep, void *data),
> +				void *data);
>  #else
> 
>  static inline int of_graph_parse_endpoint(const struct device_node *node,
> @@ -68,6 +79,13 @@ static inline struct device_node
> *of_graph_get_remote_port( return NULL;
>  }
> 
> +static inline int of_graph_register_endpoint(const struct device_node
> *ep_node, +				void (*cb)(struct of_endpoint *ep, void *data),
> +				void *data);
> +{
> +	return -ENOSYS;
> +}
> +
>  #endif /* CONFIG_OF */
> 
>  #endif /* __LINUX_OF_GRAPH_H */

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 15:21                             ` Laurent Pinchart
@ 2014-03-11 18:52                               ` Philipp Zabel
  2014-03-20 17:53                               ` Grant Likely
  1 sibling, 0 replies; 74+ messages in thread
From: Philipp Zabel @ 2014-03-11 18:52 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Grant Likely, Tomi Valkeinen, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Laurent,

Am Dienstag, den 11.03.2014, 16:21 +0100 schrieb Laurent Pinchart:
> Hi Philipp,
> 
> On Tuesday 11 March 2014 16:07:00 Philipp Zabel wrote:
> > Am Montag, den 10.03.2014, 14:58 +0000 schrieb Grant Likely:
> > > On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:
> 
> [snip]
> 
> > > > In theory unidirectional links in DT are indeed enough. However, let's
> > > > not forget the following.
> > > > 
> > > > - There's no such thing as single start points for graphs. Sure, in some
> > > > simple cases the graph will have a single start point, but that's not a
> > > > generic rule. For instance the camera graphs
> > > > http://ideasonboard.org/media/omap3isp.ps and
> > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > > thus two starting points from a data flow point of view. And if you
> > > > want a better understanding of how complex media graphs can become,
> > > > have a look at http://ideasonboard.org/media/vsp1.0.pdf (that's a real
> > > > world example, albeit all connections are internal to the SoC in that
> > > > particular case, and don't need to be described in DT).
> > > > 
> > > > - There's also no such thing as a master device that can just point to
> > > > slave devices. Once again simple cases exist where that model could
> > > > work, but real world examples exist of complex pipelines with dozens of
> > > > elements all implemented by a separate IP core and handled by separate
> > > > drivers, forming a graph with long chains and branches. We thus need
> > > > real graph bindings.
> > > > 
> > > > - Finally, having no backlinks in DT would make the software
> > > > implementation very complex. We need to be able to walk the graph in a
> > > > generic way without having any of the IP core drivers loaded, and
> > > > without any specific starting point. We would thus need to parse the
> > > > complete DT tree, looking at all nodes and trying to find out whether
> > > > they're part of the graph we're trying to walk. The complexity of the
> > > > operation would be at best quadratic to the number of nodes in the whole
> > > > DT and to the number of nodes in the graph.
> > > 
> > > Not really. To being with, you cannot determine any meaning of a node
> > > across the tree (aside from it being an endpoint) without also
> > > understanding the binding that the node is a part of. That means you
> > > need to have something matching against the compatible string on both
> > > ends of the linkage. For instance:
> > > 
> > > panel {
> > > 	compatible = "acme,lvds-panel";
> > > 	lvds-port: port {
> > > 	};
> > > };
> > > 
> > > display-controller {
> > > 	compatible = "encom,video";
> > > 	port {
> > > 		remote-endpoint = <&lvds-port>;
> > > 	};
> > > };
> > > 
> > > In the above example, the encom,video driver has absolutely zero
> > > information about what the acme,lvds-panel binding actually implements.
> > > There needs to be both a driver for the "acme,lvds-panel" binding and
> > > one for the "encom,video" binding (even if the acme,lvds-panel binding
> > > is very thin and defers the functionality to the video controller).
> > > 
> > > What you want here is the drivers to register each side of the
> > > connection. That could be modeled with something like the following
> > > (pseudocode):
> > > 
> > > struct of_endpoint {
> > > 
> > > 	struct list_head list;
> > > 	struct device_node *ep_node;
> > > 	void *context;
> > > 	void (*cb)(struct of_endpoint *ep, void *data);
> > > 
> > > }
> > > 
> > > int of_register_port(struct device *node, void (*cb)(struct of_endpoint
> > > *ep, void *data), void *data) {
> > > 
> > > 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> > > 	
> > > 	ep->ep_node = node;
> > > 	ep->data = data;
> > > 	ep->callback = cb;
> > > 	
> > > 	/* store the endpoint to a list */
> > > 	/* check if the endpoint has a remote-endpoint link */
> > > 		/* If so, then link the two together and call the
> > > 		 * callbacks */
> > > }
> > > 
> > > That's neither expensive or complicated.
> > > 
> > > Originally I suggested walking the whole tree multiple times, but as
> > > mentioned that doesn't scale, and as I thought about the above it isn't
> > > even a valid thing to do. Everything has to be driven by drivers, so
> > > even if the backlinks are there, nothing can be done with the link until
> > > the other side goes through enumeration independently.
> > 
> > I have implemented your suggestion as follows. Basically, this allows
> > either endpoint to contain the remote-endpoint link, as long as all
> > drivers register their endpoints in the probe function and return
> > -EPROBE_DEFER from their component framework bind callback until all
> > their endpoints are connected.
> 
> Beside bringing the whole graph down when a single component can't be probed 
> (either because the corresponding hardware devices is missing, broken, or the 
> driver isn't loaded), that's adding even one more level of complexity with an 
> additional callback.

That callback could be completely optional. But you are right that with
this model, each device in the graph can't look beyond the directly
connected devices (easily). On the other hand, I'm not quite sure how
useful this really is given that you can't know how a device's ports
relate to each other internally, until the driver is probed.

>  I'm afraid I can't accept it as-is, the result is just 
> too complex for device drivers and not flexible enough.

With the right wrapper, most subdevices probably could just call
	of_graph_register_all_endpoints(dev->of_node);
In their probe function. Anyhow, see below...

> I want to keep the ability to walk the graph without requiring all components 
> to be probed by their respective driver.

Could you explain why this is needed? Because for imx-drm, I probably
could live without that.

>What happened to your suggestion of parsing the whole DT once at boot
>time ?

It's not quite ready yet. My current draft looks somewhat like this:

>From 0ae3201b2fb7b7aac47f5a4dfe69b24ff1442040 Mon Sep 17 00:00:00 2001
From: Philipp Zabel <p.zabel@pengutronix.de>
Date: Tue, 11 Mar 2014 15:56:18 +0100
Subject: [PATCH] of: Parse OF graph into graph structure

This patch adds a function of_graph_populate() that parses the complete
device tree for port and endpoint nodes and stores them in a graph
structure.

Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
---
 drivers/of/base.c        | 243
++++++++++++++++++++++++++++++++++++++++-------
 include/linux/of_graph.h |  17 +++-
 2 files changed, 224 insertions(+), 36 deletions(-)

diff --git a/drivers/of/base.c b/drivers/of/base.c
index ebb001a..a844ca2 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -28,7 +28,33 @@
 
 #include "of_private.h"
 
+struct of_graph_entity {
+	struct device_node *of_node;
+	struct list_head list;
+	struct list_head ports;
+};
+
+struct of_graph_port {
+	struct device_node *of_node;
+	struct of_graph_entity *parent;
+	struct list_head list;
+	struct list_head endpoints;
+	int id;
+};
+
+struct of_graph_endpoint {
+	struct device_node *of_node;
+	struct of_graph_port *parent;
+	struct list_head list;
+	struct list_head global_list;
+	struct device_node *remote_node;
+	struct of_graph_endpoint *remote_endpoint;
+	int id;
+};
+
 LIST_HEAD(aliases_lookup);
+LIST_HEAD(entity_list);
+LIST_HEAD(endpoint_list);
 
 struct device_node *of_allnodes;
 EXPORT_SYMBOL(of_allnodes);
@@ -1984,6 +2010,19 @@ struct device_node *of_find_next_cache_node(const
struct device_node *np)
 	return NULL;
 }
 
+static struct of_graph_endpoint *__of_graph_lookup_endpoint(
+					const struct device_node *node)
+{
+	struct of_graph_endpoint *endpoint;
+
+	list_for_each_entry(endpoint, &endpoint_list, global_list) {
+		if (endpoint->of_node == node)
+			return endpoint;
+	}
+
+	return NULL;
+}
+
 /**
  * of_graph_parse_endpoint() - parse common endpoint node properties
  * @node: pointer to endpoint device_node
@@ -1994,22 +2033,17 @@ struct device_node
*of_find_next_cache_node(const struct device_node *np)
 int of_graph_parse_endpoint(const struct device_node *node,
 			    struct of_endpoint *endpoint)
 {
-	struct device_node *port_node = of_get_parent(node);
-
-	WARN_ONCE(!port_node, "%s(): endpoint %s has no parent node\n",
-		  __func__, node->full_name);
+	struct of_graph_endpoint *ep;
 
 	memset(endpoint, 0, sizeof(*endpoint));
 
-	endpoint->local_node = node;
-	/*
-	 * It doesn't matter whether the two calls below succeed.
-	 * If they don't then the default value 0 is used.
-	 */
-	of_property_read_u32(port_node, "reg", &endpoint->port);
-	of_property_read_u32(node, "reg", &endpoint->id);
+	ep = __of_graph_lookup_endpoint(node);
+	if (!ep || !ep->parent)
+		return -EINVAL;
 
-	of_node_put(port_node);
+	endpoint->local_node = ep->of_node;
+	endpoint->port = ep->parent->id;
+	endpoint->id = ep->id;
 
 	return 0;
 }
@@ -2136,21 +2170,16 @@ EXPORT_SYMBOL(of_graph_get_next_endpoint);
 struct device_node *of_graph_get_remote_port_parent(
 			       const struct device_node *node)
 {
-	struct device_node *np;
-	unsigned int depth;
+	struct of_graph_port *port;
+	struct of_graph_endpoint *ep  = __of_graph_lookup_endpoint(node);
+	if (!ep || !ep->remote_endpoint)
+		return NULL;
 
-	/* Get remote endpoint node. */
-	np = of_parse_phandle(node, "remote-endpoint", 0);
+	port = ep->remote_endpoint->parent;
+	if (!port || !port->parent)
+		return NULL;
 
-	/* Walk 3 levels up only if there is 'ports' node */
-	for (depth = 3; depth && np; depth--) {
-		np = of_get_next_parent(np);
-		if (depth == 3 && of_node_cmp(np->name, "port"))
-			break;
-		if (depth == 2 && of_node_cmp(np->name, "ports"))
-			break;
-	}
-	return np;
+	return port->parent->of_node;
 }
 EXPORT_SYMBOL(of_graph_get_remote_port_parent);
 
@@ -2163,17 +2192,161 @@ EXPORT_SYMBOL(of_graph_get_remote_port_parent);
  */
 struct device_node *of_graph_get_remote_port(const struct device_node
*node)
 {
-	struct device_node *np;
+	struct of_graph_endpoint *ep;
 
-	/* Get remote endpoint node. */
-	np = of_parse_phandle(node, "remote-endpoint", 0);
-	if (!np)
+	ep = __of_graph_lookup_endpoint(node);
+	if (!ep || !ep->remote_endpoint || !ep->remote_endpoint->parent)
 		return NULL;
-	np = of_get_next_parent(np);
-	if (of_node_cmp(np->name, "port")) {
-		of_node_put(np);
-		return NULL;
-	}
-	return np;
+
+	return ep->remote_endpoint->parent->of_node;
 }
 EXPORT_SYMBOL(of_graph_get_remote_port);
+
+static int of_graph_add_endpoint(struct of_graph_port *port,
+				struct device_node *node)
+{
+	struct of_graph_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	ep->of_node = node;
+	ep->parent = port;
+	of_property_read_u32(node, "reg", &ep->id);
+	ep->remote_node = of_parse_phandle(node, "remote-endpoint", 0);
+
+	list_add_tail(&ep->global_list, &endpoint_list);
+	list_add_tail(&ep->list, &port->endpoints);
+	return 0;
+}
+
+static int of_graph_add_port(struct of_graph_entity *entity,
+				struct device_node *node)
+{
+	struct device_node *child;
+	struct of_graph_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->of_node = node;
+	port->parent = entity;
+	of_property_read_u32(node, "reg", &port->id);
+	INIT_LIST_HEAD(&port->endpoints);
+
+	for_each_child_of_node(node, child) {
+		int rc;
+		if (of_node_cmp(child->name, "endpoint") != 0) {
+			pr_warn("%s(): non-endpoint node inside port %s\n",
+				__func__, node->full_name);
+			continue;
+		}
+
+		rc = of_graph_add_endpoint(port, child);
+		if (rc)
+			return rc;
+	}
+
+	list_add_tail(&port->list, &entity->ports);
+	return 0;
+}
+
+static int of_graph_add_entity(struct device_node *node,
+				struct device_node *child, bool ports_node)
+{
+	struct device_node *parent = node;
+	struct of_graph_entity *entity;
+	int rc = 0;
+
+	entity = kzalloc(sizeof(*entity), GFP_KERNEL);
+	if (!entity)
+		return -ENOMEM;
+
+	entity->of_node = node;
+	INIT_LIST_HEAD(&entity->ports);
+
+	if (ports_node) {
+		parent = child;
+		child = of_get_next_child(parent, NULL);
+	}
+	while (child) {
+		rc = of_graph_add_port(entity, child);
+		if (rc)
+			return rc;
+
+		do {
+			child = of_get_next_child(parent, child);
+			if (!child)
+				break;
+		} while (of_node_cmp(child->name, "port"));
+	}
+
+	list_add_tail(&entity->list, &entity_list);
+	return 0;
+}
+
+static int of_graph_recurse(struct device_node *node)
+{
+	struct device_node *child;
+	int rc = 0;
+
+	for_each_child_of_node(node, child) {
+		if (of_node_cmp(child->name, "ports") == 0) {
+			rc = of_graph_add_entity(node, child, true);
+			break;
+		} else if ((of_node_cmp(child->name, "port") == 0) &&
+			   (!of_find_property(child, "compatible", NULL))) {
+			rc = of_graph_add_entity(node, child, false);
+			break;
+		} else {
+			rc = of_graph_recurse(child);
+		}
+	}
+
+	return rc;
+}
+
+int of_graph_populate(struct device_node *root)
+{
+	struct of_graph_endpoint *ep1, *ep2;
+	struct of_graph_entity *entity;
+	struct of_graph_port *port;
+	int rc = 0;
+
+	/* Skip if the graph is already populated */
+	if (!list_empty(&endpoint_list))
+		return 0;
+
+	root = root ? of_node_get(root) : of_find_node_by_path("/");
+
+	/* Parse device tree */
+	rc = of_graph_recurse(root);
+	if (rc)
+		return rc;
+
+	/* Connect endpoints */
+	list_for_each_entry(ep1, &endpoint_list, global_list) {
+		ep2 = ep1;
+		list_for_each_entry_continue(ep2, &endpoint_list, global_list) {
+			struct of_graph_endpoint *from, *to;
+
+			if (ep1->remote_node) {
+				from = ep1;
+				to = ep2;
+			} else {
+				from = ep2;
+				to = ep1;
+			}
+			if (from->remote_node &&
+			    from->remote_node == to->of_node) {
+				WARN_ON(to->remote_node &&
+					to->remote_node != from->of_node);
+				to->remote_node = from->of_node;
+				to->remote_endpoint = from;
+				from->remote_endpoint = to;
+			}
+		}
+	}
+
+	of_node_put(root);
+	return rc;
+}
+EXPORT_SYMBOL(of_graph_populate);
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 3a3c5a9..70f5809 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -23,7 +23,14 @@
 struct of_endpoint {
 	unsigned int port;
 	unsigned int id;
-	const struct device_node *local_node;
+	struct device_node *local_node;
+	struct device_node *remote_node;
+	struct of_endpoint *remote_ep;
+
+	/* Internal use only */
+	struct list_head list;
+	void (*callback)(struct of_endpoint *ep, void *data);
+	void *data;
 };
 
 #ifdef CONFIG_OF
@@ -35,6 +42,9 @@ struct device_node *of_graph_get_next_endpoint(const
struct device_node *parent,
 struct device_node *of_graph_get_remote_port_parent(
 					const struct device_node *node);
 struct device_node *of_graph_get_remote_port(const struct device_node
*node);
+
+int of_graph_populate(struct device_node *root);
+
 #else
 
 static inline int of_graph_parse_endpoint(const struct device_node
*node,
@@ -68,6 +78,11 @@ static inline struct device_node
*of_graph_get_remote_port(
 	return NULL;
 }
 
+static inline int of_graph_populate(struct device_node *root)
+{
+	return -ENOSYS;
+}
+
 #endif /* CONFIG_OF */
 
 #endif /* __LINUX_OF_GRAPH_H */
-- 
1.9.0

regards
Philipp


^ permalink raw reply related	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 13:52                       ` Laurent Pinchart
  2014-03-10 14:10                         ` Tomi Valkeinen
  2014-03-10 14:58                         ` Grant Likely
@ 2014-03-12 10:25                         ` Russell King - ARM Linux
  2014-03-12 10:47                           ` Tomi Valkeinen
  2014-03-20 17:54                           ` Grant Likely
  2 siblings, 2 replies; 74+ messages in thread
From: Russell King - ARM Linux @ 2014-03-12 10:25 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Tomi Valkeinen, Grant Likely, Philipp Zabel, Sascha Hauer,
	Rob Herring, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> In theory unidirectional links in DT are indeed enough. However, let's not 
> forget the following.
> 
> - There's no such thing as single start points for graphs. Sure, in some 
> simple cases the graph will have a single start point, but that's not a 
> generic rule. For instance the camera graphs 
> http://ideasonboard.org/media/omap3isp.ps and 
> http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
> starting points from a data flow point of view.

I think we need to stop thinking of a graph linked in terms of data
flow - that's really not useful.

Consider a display subsystem.  The CRTC is the primary interface for
the CPU - this is the "most interesting" interface, it's the interface
which provides access to the picture to be displayed for the CPU.  Other
interfaces are secondary to that purpose - reading the I2C DDC bus for
the display information is all secondary to the primary purpose of
displaying a picture.

For a capture subsystem, the primary interface for the CPU is the frame
grabber (whether it be an already encoded frame or not.)  The sensor
devices are all secondary to that.

So, the primary software interface in each case is where the data for
the primary purpose is transferred.  This is the point at which these
graphs should commence since this is where we would normally start
enumeration of the secondary interfaces.

V4L2 even provides interfaces for this: you open the capture device,
which then allows you to enumerate the capture device's inputs, and
this in turn allows you to enumerate their properties.  You don't open
a particular sensor and work back up the tree.

I believe trying to do this according to the flow of data is just wrong.
You should always describe things from the primary device for the CPU
towards the peripheral devices and never the opposite direction.

-- 
FTTC broadband for 0.8mile line: now at 9.7Mbps down 460kbps up... slowly
improving, and getting towards what was expected from it.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-12 10:25                         ` Russell King - ARM Linux
@ 2014-03-12 10:47                           ` Tomi Valkeinen
  2014-03-12 11:21                             ` Laurent Pinchart
  2014-03-20 17:54                           ` Grant Likely
  1 sibling, 1 reply; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-12 10:47 UTC (permalink / raw)
  To: Russell King - ARM Linux, Laurent Pinchart
  Cc: Grant Likely, Philipp Zabel, Sascha Hauer, Rob Herring,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 3402 bytes --]

On 12/03/14 12:25, Russell King - ARM Linux wrote:
> On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
>> In theory unidirectional links in DT are indeed enough. However, let's not 
>> forget the following.
>>
>> - There's no such thing as single start points for graphs. Sure, in some 
>> simple cases the graph will have a single start point, but that's not a 
>> generic rule. For instance the camera graphs 
>> http://ideasonboard.org/media/omap3isp.ps and 
>> http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
>> starting points from a data flow point of view.
> 
> I think we need to stop thinking of a graph linked in terms of data
> flow - that's really not useful.
> 
> Consider a display subsystem.  The CRTC is the primary interface for
> the CPU - this is the "most interesting" interface, it's the interface
> which provides access to the picture to be displayed for the CPU.  Other
> interfaces are secondary to that purpose - reading the I2C DDC bus for
> the display information is all secondary to the primary purpose of
> displaying a picture.
> 
> For a capture subsystem, the primary interface for the CPU is the frame
> grabber (whether it be an already encoded frame or not.)  The sensor
> devices are all secondary to that.
> 
> So, the primary software interface in each case is where the data for
> the primary purpose is transferred.  This is the point at which these
> graphs should commence since this is where we would normally start
> enumeration of the secondary interfaces.
> 
> V4L2 even provides interfaces for this: you open the capture device,
> which then allows you to enumerate the capture device's inputs, and
> this in turn allows you to enumerate their properties.  You don't open
> a particular sensor and work back up the tree.

We do it the other way around in OMAP DSS. It's the displays the user is
interested in, so we enumerate the displays, and if the user wants to
enable a display, we then follow the links from the display towards the
SoC, configuring and enabling the components on the way.

I don't have a strong opinion on the direction, I think both have their
pros. In any case, that's more of a driver model thing, and I'm fine
with linking in the DT outwards from the SoC (presuming the
double-linking is not ok, which I still like best).

> I believe trying to do this according to the flow of data is just wrong.
> You should always describe things from the primary device for the CPU
> towards the peripheral devices and never the opposite direction.

In that case there's possibly the issue I mentioned in other email in
this thread: an encoder can be used in both a display and a capture
pipeline. Describing the links outwards from CPU means that sometimes
the encoder's input port is pointed at, and sometimes the encoder's
output port is pointed at.

That's possibly ok, but I think Grant was of the opinion that things
should be explicitly described in the binding documentation: either a
device's port must contain a 'remote-endpoint' property, or it must not,
but no "sometimes". But maybe I took his words too literally.

Then there's also the audio example Philipp mentioned, where there is no
clear "outward from Soc" direction for the link, as the link was
bi-directional and between two non-SoC devices.

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-12 10:47                           ` Tomi Valkeinen
@ 2014-03-12 11:21                             ` Laurent Pinchart
  0 siblings, 0 replies; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-12 11:21 UTC (permalink / raw)
  To: Tomi Valkeinen
  Cc: Russell King - ARM Linux, Grant Likely, Philipp Zabel,
	Sascha Hauer, Rob Herring, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 5015 bytes --]

Hi Russell and Tomi,

On Wednesday 12 March 2014 12:47:09 Tomi Valkeinen wrote:
> On 12/03/14 12:25, Russell King - ARM Linux wrote:
> > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> >> In theory unidirectional links in DT are indeed enough. However, let's
> >> not forget the following.
> >> 
> >> - There's no such thing as single start points for graphs. Sure, in some
> >> simple cases the graph will have a single start point, but that's not a
> >> generic rule. For instance the camera graphs
> >> http://ideasonboard.org/media/omap3isp.ps and
> >> http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus
> >> two starting points from a data flow point of view.
> > 
> > I think we need to stop thinking of a graph linked in terms of data
> > flow - that's really not useful.
> > 
> > Consider a display subsystem.  The CRTC is the primary interface for
> > the CPU - this is the "most interesting" interface, it's the interface
> > which provides access to the picture to be displayed for the CPU.  Other
> > interfaces are secondary to that purpose - reading the I2C DDC bus for
> > the display information is all secondary to the primary purpose of
> > displaying a picture.
> > 
> > For a capture subsystem, the primary interface for the CPU is the frame
> > grabber (whether it be an already encoded frame or not.)  The sensor
> > devices are all secondary to that.
> > 
> > So, the primary software interface in each case is where the data for
> > the primary purpose is transferred.  This is the point at which these
> > graphs should commence since this is where we would normally start
> > enumeration of the secondary interfaces.
> > 
> > V4L2 even provides interfaces for this: you open the capture device,
> > which then allows you to enumerate the capture device's inputs, and
> > this in turn allows you to enumerate their properties.  You don't open
> > a particular sensor and work back up the tree.

Please note that this has partly changed a couple of years ago with the 
introduction of the media controller framework. Userspace now opens a logical 
media device that describes the topology of the hardware, and then accesses 
individual components directly, from sensor to DMA engine.

> We do it the other way around in OMAP DSS. It's the displays the user is
> interested in, so we enumerate the displays, and if the user wants to
> enable a display, we then follow the links from the display towards the
> SoC, configuring and enabling the components on the way.

The logical view of a device from a CPU perspective evolves over time, as APIs 
are refactored or created to support new hardware that comes with new 
paradigms and additional complexity. The hardware data flow direction, 
however, doesn't change. Only modeling the data flow direction in DT might be 
tempting but is probably too hasty of a conclusion though : if DT should model 
the hardware, it ends up modeling a logical view of the hardware, and is thus 
not as closed as one might believe.

In the particular case of display devices I believe that using the data flow 
direction for links (assuming we can't use bidirectional links in DT) is a 
good model. It would allow parsing the whole graph at a reasonable cost (still 
higher than with bidirectional links) while making clear how to represent 
links. Let's not forgot that with more complex devices not all components can 
be referenced directly from the CPU-side display controller.

> I don't have a strong opinion on the direction, I think both have their
> pros. In any case, that's more of a driver model thing, and I'm fine
> with linking in the DT outwards from the SoC (presuming the
> double-linking is not ok, which I still like best).
> 
> > I believe trying to do this according to the flow of data is just wrong.
> > You should always describe things from the primary device for the CPU
> > towards the peripheral devices and never the opposite direction.
> 
> In that case there's possibly the issue I mentioned in other email in
> this thread: an encoder can be used in both a display and a capture
> pipeline. Describing the links outwards from CPU means that sometimes
> the encoder's input port is pointed at, and sometimes the encoder's
> output port is pointed at.
> 
> That's possibly ok, but I think Grant was of the opinion that things
> should be explicitly described in the binding documentation: either a
> device's port must contain a 'remote-endpoint' property, or it must not,
> but no "sometimes". But maybe I took his words too literally.
> 
> Then there's also the audio example Philipp mentioned, where there is no
> clear "outward from Soc" direction for the link, as the link was
> bi-directional and between two non-SoC devices.

Even if the link was unidirectional the "outward from SoC" direction isn't 
always defined for links between non-SoC devices.

-- 
Regards,

Laurent Pinchart

[-- Attachment #2: This is a digitally signed message part. --]
[-- Type: application/pgp-signature, Size: 490 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-10 15:15                           ` Laurent Pinchart
  2014-03-10 15:40                             ` Philipp Zabel
  2014-03-11 13:04                             ` Andrzej Hajda
@ 2014-03-20 17:01                             ` Grant Likely
  2014-03-21 12:10                               ` Tomi Valkeinen
  2 siblings, 1 reply; 74+ messages in thread
From: Grant Likely @ 2014-03-20 17:01 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Tomi Valkeinen, Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Mon, 10 Mar 2014 16:15:37 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> Hi Grant,
> 
> On Monday 10 March 2014 14:58:15 Grant Likely wrote:
> > On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:
> > > On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> > > > On 08/03/14 13:41, Grant Likely wrote:
> > > > >> Ok. If we go for single directional link, the question is then: which
> > > > >> way? And is the direction different for display and camera, which are
> > > > >> kind of reflections of each other?
> > > > > 
> > > > > In general I would recommend choosing whichever device you would
> > > > > sensibly think of as a master. In the camera case I would choose the
> > > > > camera controller node instead of the camera itself, and in the
> > > > > display case I would choose the display controller instead of the
> > > > > panel. The binding author needs to choose what she things makes the
> > > > > most sense, but drivers can still use if it it turns out to be
> > > > > 'backwards'
> > > > 
> > > > I would perhaps choose the same approach, but at the same time I think
> > > > it's all but clear. The display controller doesn't control the panel any
> > > > more than a DMA controller controls, say, the display controller.
> > > > 
> > > > In fact, in earlier versions of OMAP DSS DT support I had a simpler port
> > > > description, and in that I had the panel as the master (i.e. link from
> > > > panel to dispc) because the panel driver uses the display controller's
> > > > features to provide the panel device a data stream.
> > > > 
> > > > And even with the current OMAP DSS DT version, which uses the v4l2 style
> > > > ports/endpoints, the driver model is still the same, and only links
> > > > towards upstream are used.
> > > > 
> > > > So one reason I'm happy with the dual-linking is that I can easily
> > > > follow the links from the downstream entities to upstream entities, and
> > > > other people, who have different driver model, can easily do the
> > > > opposite.
> > > > 
> > > > But I agree that single-linking is enough and this can be handled at
> > > > runtime, even if it makes the code more complex. And perhaps requires
> > > > extra data in the dts, to give the start points for the graph.
> > > 
> > > In theory unidirectional links in DT are indeed enough. However, let's not
> > > forget the following.
> > > 
> > > - There's no such thing as single start points for graphs. Sure, in some
> > > simple cases the graph will have a single start point, but that's not a
> > > generic rule. For instance the camera graphs
> > > http://ideasonboard.org/media/omap3isp.ps and
> > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus
> > > two starting points from a data flow point of view. And if you want a
> > > better understanding of how complex media graphs can become, have a look
> > > at http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example,
> > > albeit all connections are internal to the SoC in that particular case,
> > > and don't need to be described in DT).
> > > 
> > > - There's also no such thing as a master device that can just point to
> > > slave devices. Once again simple cases exist where that model could work,
> > > but real world examples exist of complex pipelines with dozens of
> > > elements all implemented by a separate IP core and handled by separate
> > > drivers, forming a graph with long chains and branches. We thus need real
> > > graph bindings.
> > > 
> > > - Finally, having no backlinks in DT would make the software
> > > implementation very complex. We need to be able to walk the graph in a
> > > generic way without having any of the IP core drivers loaded, and without
> > > any specific starting point. We would thus need to parse the complete DT
> > > tree, looking at all nodes and trying to find out whether they're part of
> > > the graph we're trying to walk. The complexity of the operation would be
> > > at best quadratic to the number of nodes in the whole DT and to the number
> > > of nodes in the graph.
> > 
> > Not really. To being with, you cannot determine any meaning of a node
> > across the tree (aside from it being an endpoint)
> 
> That's the important part. I can assume the target node of the remote-endpoint 
> phandle to be an endpoint, and can thus assume that it implements the of-graph 
> bindings. That's all I need to be able to walk the graph in a generic way.

Yes, you can assume the target node is an endpoint, but you cannot
assume anything beyond that. It is not a good idea to go looking for
more endpoint links in the local hierarchy that terminates the link.

> 
> > without also understanding the binding that the node is a part of. That
> > means you need to have something matching against the compatible string on
> > both ends of the linkage. For instance:
> > 
> > panel {
> > 	compatible = "acme,lvds-panel";
> > 	lvds-port: port {
> > 	};
> > };
> > 
> > display-controller {
> > 	compatible = "encom,video";
> > 	port {
> > 		remote-endpoint = <&lvds-port>;
> > 	};
> > };
> > 
> > In the above example, the encom,video driver has absolutely zero
> > information about what the acme,lvds-panel binding actually implements.
> > There needs to be both a driver for the "acme,lvds-panel" binding and
> > one for the "encom,video" binding (even if the acme,lvds-panel binding
> > is very thin and defers the functionality to the video controller).
> 
> I absolutely agree with that. We need a driver for each device (in this case 
> the acme panel and the encom display controller), and we need those drivers to 
> register entities (in the generic sense of the term) for them to be able to 
> communicate with each other. The display controller driver must not try to 
> parse panel-specific properties from the panel node. However, as described 
> above, I believe it can parse ports and endpoints to walk the graph.

I think depending on a generic graph walk is where I have the biggest
concern about the design. I don't think it is a good idea for the master
device to try a generic walk over the graph looking for other devices
that might be components because it cannot know whether or not further
links are relevant, or even if other endpoint nodes in the target
hierarchy actually conform to the graph binding in the same way.

Consider the example of a system with two video controllers (one
embedded and one discrete), a display mux, and a panel. The display
controller depends on the mux, and the mux depends on the panel. It
would be entirely reasonable to start up the display subsystem with the
embedded controller without the discrete adapter being available, but
the way the current graph pattern is proposed there is no dependency
information between the devices.

I really do think the dependency direction needs to be explicit so that
a driver knows whether or not a given link is relevant for it to start,
and there must be driver know that knows how to interpret the target
node. A device that is a master needs to know which links are
dependencies, and which are not.

I'm not even talking about the bi-directional link issue. This issue
remains regardless of whether or not bidirectional links are used.

I would solve it in one of the following two ways:

1) Make masters only look at one level of dependency. Make the component
driver responsible for checking /its/ dependencies. If its dependencies
aren't yet met, then don't register the component as ready. We could
probably make the drivers/base/component code allow components to pull
in additional components as required. This approach shouldn't even
require a change to the binding and eliminates any need for walking the
full graph.

2) Give the master node an explicit list of all devices it depends on. I
don't like this solution as much, but it does the job.

g.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 11:43                               ` Laurent Pinchart
  2014-03-11 12:59                                 ` Tomi Valkeinen
@ 2014-03-20 17:09                                 ` Grant Likely
  1 sibling, 0 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-20 17:09 UTC (permalink / raw)
  To: Laurent Pinchart, Philipp Zabel
  Cc: Tomi Valkeinen, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Tue, 11 Mar 2014 12:43:44 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> Hi Philipp,
> 
> On Monday 10 March 2014 16:40:30 Philipp Zabel wrote:
> > Am Montag, den 10.03.2014, 16:15 +0100 schrieb Laurent Pinchart:
> > > On Monday 10 March 2014 14:58:15 Grant Likely wrote:
> > > > On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:
> > > > > On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> > > > > > On 08/03/14 13:41, Grant Likely wrote:
> > > > > > >> Ok. If we go for single directional link, the question is then:
> > > > > > >> which way? And is the direction different for display and camera,
> > > > > > >> which are kind of reflections of each other?
> > > > > > > 
> > > > > > > In general I would recommend choosing whichever device you would
> > > > > > > sensibly think of as a master. In the camera case I would choose
> > > > > > > the camera controller node instead of the camera itself, and in
> > > > > > > the display case I would choose the display controller instead of
> > > > > > > the panel. The binding author needs to choose what she things
> > > > > > > makes the most sense, but drivers can still use if it it turns out
> > > > > > > to be 'backwards'
> > > > > > 
> > > > > > I would perhaps choose the same approach, but at the same time I
> > > > > > think it's all but clear. The display controller doesn't control the
> > > > > > panel any more than a DMA controller controls, say, the display
> > > > > > controller.
> > > > > > 
> > > > > > In fact, in earlier versions of OMAP DSS DT support I had a simpler
> > > > > > port description, and in that I had the panel as the master (i.e.
> > > > > > link from panel to dispc) because the panel driver uses the display
> > > > > > controller's features to provide the panel device a data stream.
> > > > > > 
> > > > > > And even with the current OMAP DSS DT version, which uses the v4l2
> > > > > > style ports/endpoints, the driver model is still the same, and only
> > > > > > links towards upstream are used.
> > > > > > 
> > > > > > So one reason I'm happy with the dual-linking is that I can easily
> > > > > > follow the links from the downstream entities to upstream entities,
> > > > > > and other people, who have different driver model, can easily do the
> > > > > > opposite.
> > > > > > 
> > > > > > But I agree that single-linking is enough and this can be handled at
> > > > > > runtime, even if it makes the code more complex. And perhaps
> > > > > > requires extra data in the dts, to give the start points for the
> > > > > > graph.
> > > > > 
> > > > > In theory unidirectional links in DT are indeed enough. However, let's
> > > > > not forget the following.
> > > > > 
> > > > > - There's no such thing as single start points for graphs. Sure, in
> > > > > some simple cases the graph will have a single start point, but that's
> > > > > not a generic rule. For instance the camera graphs
> > > > > http://ideasonboard.org/media/omap3isp.ps and
> > > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > > > thus two starting points from a data flow point of view. And if you
> > > > > want a better understanding of how complex media graphs can become,
> > > > > have a look at http://ideasonboard.org/media/vsp1.0.pdf (that's a real
> > > > > world example, albeit all connections are internal to the SoC in that
> > > > > particular case, and don't need to be described in DT).
> > > > > 
> > > > > - There's also no such thing as a master device that can just point to
> > > > > slave devices. Once again simple cases exist where that model could
> > > > > work, but real world examples exist of complex pipelines with dozens
> > > > > of elements all implemented by a separate IP core and handled by
> > > > > separate drivers, forming a graph with long chains and branches. We
> > > > > thus need real graph bindings.
> > > > > 
> > > > > - Finally, having no backlinks in DT would make the software
> > > > > implementation very complex. We need to be able to walk the graph in a
> > > > > generic way without having any of the IP core drivers loaded, and
> > > > > without any specific starting point. We would thus need to parse the
> > > > > complete DT tree, looking at all nodes and trying to find out whether
> > > > > they're part of the graph we're trying to walk. The complexity of the
> > > > > operation would be at best quadratic to the number of nodes in the
> > > > > whole DT and to the number of nodes in the graph.
> > > > 
> > > > Not really. To being with, you cannot determine any meaning of a node
> > > > across the tree (aside from it being an endpoint)
> > > 
> > > That's the important part. I can assume the target node of the
> > > remote-endpoint phandle to be an endpoint, and can thus assume that it
> > > implements the of-graph bindings. That's all I need to be able to walk
> > > the graph in a generic way.
> > >
> > > > without also understanding the binding that the node is a part of. That
> > > > means you need to have something matching against the compatible string
> > > > on both ends of the linkage. For instance:
> > > > 
> > > > panel {
> > > > 	compatible = "acme,lvds-panel";
> > > > 	lvds-port: port {
> > > > 	};
> > > > };
> > > > 
> > > > display-controller {
> > > > 	compatible = "encom,video";
> > > > 	port {
> > > > 		remote-endpoint = <&lvds-port>;
> > > > 	};
> > > > };
> > > > 
> > > > In the above example, the encom,video driver has absolutely zero
> > > > information about what the acme,lvds-panel binding actually implements.
> > > > There needs to be both a driver for the "acme,lvds-panel" binding and
> > > > one for the "encom,video" binding (even if the acme,lvds-panel binding
> > > > is very thin and defers the functionality to the video controller).
> > > 
> > > I absolutely agree with that. We need a driver for each device (in this
> > > case the acme panel and the encom display controller), and we need those
> > > drivers to register entities (in the generic sense of the term) for them
> > > to be able to communicate with each other. The display controller driver
> > > must not try to parse panel-specific properties from the panel node.
> > > However, as described above, I believe it can parse ports and endpoints
> > > to walk the graph.
> > >
> > > > What you want here is the drivers to register each side of the
> > > > connection. That could be modeled with something like the following
> > > > (pseudocode):
> > > > 
> > > > struct of_endpoint {
> > > > 	struct list_head list;
> > > > 	struct device_node *ep_node;
> > > > 	void *context;
> > > > 	void (*cb)(struct of_endpoint *ep, void *data);
> > > > }
> > > > 
> > > > int of_register_port(struct device *node, void (*cb)(struct of_endpoint
> > > > *ep, void *data), void *data) {
> > > > 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> > > > 	
> > > > 	ep->ep_node = node;
> > > > 	ep->data = data;
> > > > 	ep->callback = cb;
> > > > 	
> > > > 	/* store the endpoint to a list */
> > > > 	/* check if the endpoint has a remote-endpoint link */
> > > > 		/* If so, then link the two together and call the
> > > > 		 * callbacks */
> > > > }
> > > > 
> > > > That's neither expensive or complicated.
> > > > 
> > > > Originally I suggested walking the whole tree multiple times, but as
> > > > mentioned that doesn't scale, and as I thought about the above it isn't
> > > > even a valid thing to do. Everything has to be driven by drivers, so
> > > > even if the backlinks are there, nothing can be done with the link until
> > > > the other side goes through enumeration independently.
> > > 
> > > For such composite devices, what we need from a drivers point of view is a
> > > mechanism to wait for all components to be in place before proceeding.
> > > This isn't DT-related as such, but the graph is obviously described in DT
> > > for DT- based platforms.
> > > 
> > > There are at least two mainline implementation of such a mechanism. One of
> > > them can be found in drivers/media/v4l2-core/v4l2-async.c, another more
> > > recent one in drivers/base/component.c. Neither of them is DT-specific,
> > > and they don't try to parse DT content.
> > > 
> > > The main problem, from a DT point of view, is that we need to pick a
> > > master driver that will initiate the process of waiting for all components
> > > to be in place. This is usually the driver of the main component inside
> > > the SoC. For a camera capture pipeline the master is the SoC camera device
> > > driver that will create the V4L2 device node(s). For a display pipeline
> > > the master is the SoC display driver that will create the DRM/KMS
> > > devices.
> > > 
> > > The master device driver needs to create a list of all components it
> > > needs, and wait until all those components have been probed by their
> > > respective driver. Creating such a list requires walking the graph,
> > > starting at the master device (using a CPU-centric view as described by
> > > Russell). This is why we need the backlinks, as the master device can have
> > > inbound links.
> >
> > We could scan the whole tree for entities, ports and endpoints once, in
> > the base oftree code, and put that into a graph structure, adding the
> > backlinks.
> > The of_graph_* helpers could then use that graph instead of the device
> > tree.
> 
> That could work. The complexity would still be quadratic, but we would parse 
> the full device tree once only.
> 
> The runtime complexity would still be increased, as the graph helpers would 
> need to find the endpoint object in the parsed graph corresponding to the DT 
> node they get as an argument. That's proportional to the number of graph 
> elements, not the total number of DT nodes, so I suppose it's not too bad.
> 
> We also need to make sure this would work with insertion of DT fragments at 
> runtime. Probably not a big deal, but it has to be kept in mind.

That is also an important aspect. Another reason to be explicit about
what depends on what. It is a lot easier to handle if each driver knows
which links it has a dependency on, and if the dependency tree is
acyclic.

g.


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 13:16                                   ` Laurent Pinchart
  2014-03-11 13:27                                     ` Tomi Valkeinen
@ 2014-03-20 17:23                                     ` Grant Likely
  2014-03-21 10:44                                       ` Andrzej Hajda
  1 sibling, 1 reply; 74+ messages in thread
From: Grant Likely @ 2014-03-20 17:23 UTC (permalink / raw)
  To: Laurent Pinchart, Tomi Valkeinen
  Cc: Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Tue, 11 Mar 2014 14:16:37 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> On Tuesday 11 March 2014 14:59:20 Tomi Valkeinen wrote:
> > So depending on the use case, the endpoints would point to opposite
> > direction from the encoder's point of view.
> > 
> > And if I gathered Grant's opinion correctly (correct me if I'm wrong),
> > he thinks things should be explicit, i.e. the bindings for, say, an
> > encoder should state that the encoder's output endpoint _must_ contain a
> > remote-endpoint property, whereas the encoder's input endpoint _must
> > not_ contain a remote-endpoint property.
> 
> Actually my understand was that DT links would have the same direction as the 
> data flow. There would be no ambiguity in that case as the direction of the 
> data flow is known. What happens for bidirectional data flows still need to be 
> discussed though. And if we want to use the of-graph bindings to describe 
> graphs without a data flow, a decision will need to be taken there too.

On further thinking, I would say linkage direction should be in the
direction that would be considered the dependency order... I'm going to
soften my position though. I think the generic pattern should still
recommend unidirection links in direction of device dependency, but
I'm okay with allowing the bidirection option if the helper functions
are modified to validate the target endpoint. I think it needs to test
for the following:
- Make sure the endpoint either:
  - does not have a backlink, or
  - the backlink points back to the origin node
- If the target is an endpoint node, then make sure the parent doesn't
  have a link of any kind
- If the target is a port node, make sure it doesn't have any endpoint
  children nodes at all.

g.



^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 13:04                             ` Andrzej Hajda
@ 2014-03-20 17:33                               ` Grant Likely
  0 siblings, 0 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-20 17:33 UTC (permalink / raw)
  To: Andrzej Hajda, Laurent Pinchart
  Cc: Tomi Valkeinen, Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Tue, 11 Mar 2014 14:04:34 +0100, Andrzej Hajda <a.hajda@samsung.com> wrote:
> On 03/10/2014 04:15 PM, Laurent Pinchart wrote:
> > Hi Grant,
> > 
> > On Monday 10 March 2014 14:58:15 Grant Likely wrote:
> >> On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:
> >>> On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> >>>> On 08/03/14 13:41, Grant Likely wrote:
> >>>>>> Ok. If we go for single directional link, the question is then: which
> >>>>>> way? And is the direction different for display and camera, which are
> >>>>>> kind of reflections of each other?
> >>>>>
> >>>>> In general I would recommend choosing whichever device you would
> >>>>> sensibly think of as a master. In the camera case I would choose the
> >>>>> camera controller node instead of the camera itself, and in the
> >>>>> display case I would choose the display controller instead of the
> >>>>> panel. The binding author needs to choose what she things makes the
> >>>>> most sense, but drivers can still use if it it turns out to be
> >>>>> 'backwards'
> >>>>
> >>>> I would perhaps choose the same approach, but at the same time I think
> >>>> it's all but clear. The display controller doesn't control the panel any
> >>>> more than a DMA controller controls, say, the display controller.
> >>>>
> >>>> In fact, in earlier versions of OMAP DSS DT support I had a simpler port
> >>>> description, and in that I had the panel as the master (i.e. link from
> >>>> panel to dispc) because the panel driver uses the display controller's
> >>>> features to provide the panel device a data stream.
> >>>>
> >>>> And even with the current OMAP DSS DT version, which uses the v4l2 style
> >>>> ports/endpoints, the driver model is still the same, and only links
> >>>> towards upstream are used.
> >>>>
> >>>> So one reason I'm happy with the dual-linking is that I can easily
> >>>> follow the links from the downstream entities to upstream entities, and
> >>>> other people, who have different driver model, can easily do the
> >>>> opposite.
> >>>>
> >>>> But I agree that single-linking is enough and this can be handled at
> >>>> runtime, even if it makes the code more complex. And perhaps requires
> >>>> extra data in the dts, to give the start points for the graph.
> >>>
> >>> In theory unidirectional links in DT are indeed enough. However, let's not
> >>> forget the following.
> >>>
> >>> - There's no such thing as single start points for graphs. Sure, in some
> >>> simple cases the graph will have a single start point, but that's not a
> >>> generic rule. For instance the camera graphs
> >>> http://ideasonboard.org/media/omap3isp.ps and
> >>> http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus
> >>> two starting points from a data flow point of view. And if you want a
> >>> better understanding of how complex media graphs can become, have a look
> >>> at http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example,
> >>> albeit all connections are internal to the SoC in that particular case,
> >>> and don't need to be described in DT).
> >>>
> >>> - There's also no such thing as a master device that can just point to
> >>> slave devices. Once again simple cases exist where that model could work,
> >>> but real world examples exist of complex pipelines with dozens of
> >>> elements all implemented by a separate IP core and handled by separate
> >>> drivers, forming a graph with long chains and branches. We thus need real
> >>> graph bindings.
> >>>
> >>> - Finally, having no backlinks in DT would make the software
> >>> implementation very complex. We need to be able to walk the graph in a
> >>> generic way without having any of the IP core drivers loaded, and without
> >>> any specific starting point. We would thus need to parse the complete DT
> >>> tree, looking at all nodes and trying to find out whether they're part of
> >>> the graph we're trying to walk. The complexity of the operation would be
> >>> at best quadratic to the number of nodes in the whole DT and to the number
> >>> of nodes in the graph.
> >>
> >> Not really. To being with, you cannot determine any meaning of a node
> >> across the tree (aside from it being an endpoint)
> > 
> > That's the important part. I can assume the target node of the remote-endpoint 
> > phandle to be an endpoint, and can thus assume that it implements the of-graph 
> > bindings. That's all I need to be able to walk the graph in a generic way.
> > 
> >> without also understanding the binding that the node is a part of. That
> >> means you need to have something matching against the compatible string on
> >> both ends of the linkage. For instance:
> >>
> >> panel {
> >> 	compatible = "acme,lvds-panel";
> >> 	lvds-port: port {
> >> 	};
> >> };
> >>
> >> display-controller {
> >> 	compatible = "encom,video";
> >> 	port {
> >> 		remote-endpoint = <&lvds-port>;
> >> 	};
> >> };
> >>
> >> In the above example, the encom,video driver has absolutely zero
> >> information about what the acme,lvds-panel binding actually implements.
> >> There needs to be both a driver for the "acme,lvds-panel" binding and
> >> one for the "encom,video" binding (even if the acme,lvds-panel binding
> >> is very thin and defers the functionality to the video controller).
> > 
> > I absolutely agree with that. We need a driver for each device (in this case 
> > the acme panel and the encom display controller), and we need those drivers to 
> > register entities (in the generic sense of the term) for them to be able to 
> > communicate with each other. The display controller driver must not try to 
> > parse panel-specific properties from the panel node. However, as described 
> > above, I believe it can parse ports and endpoints to walk the graph.
> > 
> >> What you want here is the drivers to register each side of the
> >> connection. That could be modeled with something like the following
> >> (pseudocode):
> >>
> >> struct of_endpoint {
> >> 	struct list_head list;
> >> 	struct device_node *ep_node;
> >> 	void *context;
> >> 	void (*cb)(struct of_endpoint *ep, void *data);
> >> }
> >>
> >> int of_register_port(struct device *node, void (*cb)(struct of_endpoint *ep,
> >> void *data), void *data) {
> >> 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> >>
> >> 	ep->ep_node = node;
> >> 	ep->data = data;
> >> 	ep->callback = cb;
> >>
> >> 	/* store the endpoint to a list */
> >> 	/* check if the endpoint has a remote-endpoint link */
> >> 		/* If so, then link the two together and call the
> >> 		 * callbacks */
> >> }
> >>
> >> That's neither expensive or complicated.
> >>
> >> Originally I suggested walking the whole tree multiple times, but as
> >> mentioned that doesn't scale, and as I thought about the above it isn't
> >> even a valid thing to do. Everything has to be driven by drivers, so
> >> even if the backlinks are there, nothing can be done with the link until
> >> the other side goes through enumeration independently.
> > 
> > For such composite devices, what we need from a drivers point of view is a 
> > mechanism to wait for all components to be in place before proceeding. This 
> > isn't DT-related as such, but the graph is obviously described in DT for DT-
> > based platforms.
> > 
> > There are at least two mainline implementation of such a mechanism. One of 
> > them can be found in drivers/media/v4l2-core/v4l2-async.c, another more recent 
> > one in drivers/base/component.c. Neither of them is DT-specific, and they 
> > don't try to parse DT content.
> > 
> > The main problem, from a DT point of view, is that we need to pick a master 
> > driver that will initiate the process of waiting for all components to be in 
> > place. This is usually the driver of the main component inside the SoC. For a 
> > camera capture pipeline the master is the SoC camera device driver that will 
> > create the V4L2 device node(s). For a display pipeline the master is the SoC 
> > display driver that will create the DRM/KMS devices.
> > 
> > The master device driver needs to create a list of all components it needs, 
> > and wait until all those components have been probed by their respective 
> > driver. Creating such a list requires walking the graph, starting at the 
> > master device (using a CPU-centric view as described by Russell). This is why 
> > we need the backlinks, as the master device can have inbound links.
> > 
> 
> I am not sure if the approach with one device driver parsing links
> between two other devices is the correct one. For example some links can
> be optional, some irrelevant to the pipeline the master device tries to
> create,....

Yes, that is exactly what I'm concerned about. I think it is important
to be able to have an unambiguous dependency graph. In most cases, only
the driver for a component will actually know which links are
dependencies, and which are optional.... And that doesn't even address hot
plug!

> I guess it could be sometimes solved using additional
> properties, but this will complicate things and will not work if the
> routing decision can be taken only during specific driver probe or later.

If a component is sufficiently complex that the routing is dynamic and
'downstream' components may or may not be present/usable, then there
just has to be a driver that understands its behaviour; either
integrated into the master driver, or a separate driver that abstracts
it from the master driver.

g.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 15:07                           ` Philipp Zabel
  2014-03-11 15:21                             ` Laurent Pinchart
@ 2014-03-20 17:36                             ` Grant Likely
  1 sibling, 0 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-20 17:36 UTC (permalink / raw)
  To: Philipp Zabel
  Cc: Laurent Pinchart, Tomi Valkeinen, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Tue, 11 Mar 2014 16:07:00 +0100, Philipp Zabel <p.zabel@pengutronix.de> wrote:
> Hi Grant,
> 
> Am Montag, den 10.03.2014, 14:58 +0000 schrieb Grant Likely:
> > On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> > > On Monday 10 March 2014 12:18:20 Tomi Valkeinen wrote:
> > > > On 08/03/14 13:41, Grant Likely wrote:
> > > > >> Ok. If we go for single directional link, the question is then: which
> > > > >> way? And is the direction different for display and camera, which are
> > > > >> kind of reflections of each other?
> > > > > 
> > > > > In general I would recommend choosing whichever device you would
> > > > > sensibly think of as a master. In the camera case I would choose the
> > > > > camera controller node instead of the camera itself, and in the display
> > > > > case I would choose the display controller instead of the panel. The
> > > > > binding author needs to choose what she things makes the most sense, but
> > > > > drivers can still use if it it turns out to be 'backwards'
> > > > 
> > > > I would perhaps choose the same approach, but at the same time I think
> > > > it's all but clear. The display controller doesn't control the panel any
> > > > more than a DMA controller controls, say, the display controller.
> > > > 
> > > > In fact, in earlier versions of OMAP DSS DT support I had a simpler port
> > > > description, and in that I had the panel as the master (i.e. link from
> > > > panel to dispc) because the panel driver uses the display controller's
> > > > features to provide the panel device a data stream.
> > > > 
> > > > And even with the current OMAP DSS DT version, which uses the v4l2 style
> > > > ports/endpoints, the driver model is still the same, and only links
> > > > towards upstream are used.
> > > > 
> > > > So one reason I'm happy with the dual-linking is that I can easily
> > > > follow the links from the downstream entities to upstream entities, and
> > > > other people, who have different driver model, can easily do the opposite.
> > > > 
> > > > But I agree that single-linking is enough and this can be handled at
> > > > runtime, even if it makes the code more complex. And perhaps requires
> > > > extra data in the dts, to give the start points for the graph.
> > > 
> > > In theory unidirectional links in DT are indeed enough. However, let's not 
> > > forget the following.
> > > 
> > > - There's no such thing as single start points for graphs. Sure, in some 
> > > simple cases the graph will have a single start point, but that's not a 
> > > generic rule. For instance the camera graphs 
> > > http://ideasonboard.org/media/omap3isp.ps and 
> > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
> > > starting points from a data flow point of view. And if you want a better 
> > > understanding of how complex media graphs can become, have a look at 
> > > http://ideasonboard.org/media/vsp1.0.pdf (that's a real world example, albeit 
> > > all connections are internal to the SoC in that particular case, and don't 
> > > need to be described in DT).
> > > 
> > > - There's also no such thing as a master device that can just point to slave 
> > > devices. Once again simple cases exist where that model could work, but real 
> > > world examples exist of complex pipelines with dozens of elements all 
> > > implemented by a separate IP core and handled by separate drivers, forming a 
> > > graph with long chains and branches. We thus need real graph bindings.
> > > 
> > > - Finally, having no backlinks in DT would make the software implementation 
> > > very complex. We need to be able to walk the graph in a generic way without 
> > > having any of the IP core drivers loaded, and without any specific starting 
> > > point. We would thus need to parse the complete DT tree, looking at all nodes 
> > > and trying to find out whether they're part of the graph we're trying to walk. 
> > > The complexity of the operation would be at best quadratic to the number of 
> > > nodes in the whole DT and to the number of nodes in the graph.
> > 
> > Not really. To being with, you cannot determine any meaning of a node
> > across the tree (aside from it being an endpoint) without also
> > understanding the binding that the node is a part of. That means you
> > need to have something matching against the compatible string on both
> > ends of the linkage. For instance:
> > 
> > panel {
> > 	compatible = "acme,lvds-panel";
> > 	lvds-port: port {
> > 	};
> > };
> > 
> > display-controller {
> > 	compatible = "encom,video";
> > 	port {
> > 		remote-endpoint = <&lvds-port>;
> > 	};
> > };
> > 
> > In the above example, the encom,video driver has absolutely zero
> > information about what the acme,lvds-panel binding actually implements.
> > There needs to be both a driver for the "acme,lvds-panel" binding and
> > one for the "encom,video" binding (even if the acme,lvds-panel binding
> > is very thin and defers the functionality to the video controller).
> > 
> > What you want here is the drivers to register each side of the
> > connection. That could be modeled with something like the following
> > (pseudocode):
> > 
> > struct of_endpoint {
> > 	struct list_head list;
> > 	struct device_node *ep_node;
> > 	void *context;
> > 	void (*cb)(struct of_endpoint *ep, void *data);
> > }
> > 
> > int of_register_port(struct device *node, void (*cb)(struct of_endpoint *ep, void *data), void *data)
> > {
> > 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> > 
> > 	ep->ep_node = node;
> > 	ep->data = data;
> > 	ep->callback = cb;
> > 
> > 	/* store the endpoint to a list */
> > 	/* check if the endpoint has a remote-endpoint link */
> > 		/* If so, then link the two together and call the
> > 		 * callbacks */
> > }
> > 
> > That's neither expensive or complicated.
> > 
> > Originally I suggested walking the whole tree multiple times, but as
> > mentioned that doesn't scale, and as I thought about the above it isn't
> > even a valid thing to do. Everything has to be driven by drivers, so
> > even if the backlinks are there, nothing can be done with the link until
> > the other side goes through enumeration independently.
> 
> I have implemented your suggestion as follows. Basically, this allows
> either endpoint to contain the remote-endpoint link, as long as all
> drivers register their endpoints in the probe function and return
> -EPROBE_DEFER from their component framework bind callback until all
> their endpoints are connected.

This looks reasonable.

g.

> 
> From fdda1fb2bd133200d4620adcbb28697cb360e1cb Mon Sep 17 00:00:00 2001
> From: Philipp Zabel <p.zabel@pengutronix.de>
> Date: Tue, 11 Mar 2014 15:56:18 +0100
> Subject: [PATCH] of: Implement of_graph_register_endpoint
> 
> This patch adds a function that lets drivers register their endpoints in a
> global list. Newly registered endpoints are compared against known endpoints
> to check if a connection should be made. If so, the driver is notified via
> a simple callback.
> 
> Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> ---
>  drivers/of/base.c        | 69 +++++++++++++++++++++++++++++++++++++++++++++---
>  include/linux/of_graph.h | 20 +++++++++++++-
>  2 files changed, 84 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/of/base.c b/drivers/of/base.c
> index ebb001a..77ae54a 100644
> --- a/drivers/of/base.c
> +++ b/drivers/of/base.c
> @@ -29,6 +29,7 @@
>  #include "of_private.h"
>  
>  LIST_HEAD(aliases_lookup);
> +LIST_HEAD(endpoint_list);
>  
>  struct device_node *of_allnodes;
>  EXPORT_SYMBOL(of_allnodes);
> @@ -2002,6 +2003,7 @@ int of_graph_parse_endpoint(const struct device_node *node,
>  	memset(endpoint, 0, sizeof(*endpoint));
>  
>  	endpoint->local_node = node;
> +	endpoint->remote_node = of_parse_phandle(node, "remote-endpoint", 0);
>  	/*
>  	 * It doesn't matter whether the two calls below succeed.
>  	 * If they don't then the default value 0 is used.
> @@ -2126,6 +2128,19 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
>  }
>  EXPORT_SYMBOL(of_graph_get_next_endpoint);
>  
> +static struct of_endpoint *__of_graph_lookup_endpoint(
> +				const struct device_node *node)
> +{
> +	struct of_endpoint *ep;
> +
> +	list_for_each_entry(ep, &endpoint_list, list) {
> +		if (ep->local_node == node)
> +			return ep;
> +	}
> +
> +	return NULL;
> +}
> +
>  /**
>   * of_graph_get_remote_port_parent() - get remote port's parent node
>   * @node: pointer to a local endpoint device_node
> @@ -2136,11 +2151,15 @@ EXPORT_SYMBOL(of_graph_get_next_endpoint);
>  struct device_node *of_graph_get_remote_port_parent(
>  			       const struct device_node *node)
>  {
> +	struct of_endpoint *ep;
>  	struct device_node *np;
>  	unsigned int depth;
>  
>  	/* Get remote endpoint node. */
> -	np = of_parse_phandle(node, "remote-endpoint", 0);
> +	ep = __of_graph_lookup_endpoint(node);
> +	if (!ep || !ep->remote_node)
> +		return NULL;
> +	np = ep->remote_node;
>  
>  	/* Walk 3 levels up only if there is 'ports' node */
>  	for (depth = 3; depth && np; depth--) {
> @@ -2163,13 +2182,14 @@ EXPORT_SYMBOL(of_graph_get_remote_port_parent);
>   */
>  struct device_node *of_graph_get_remote_port(const struct device_node *node)
>  {
> +	struct of_endpoint *ep;
>  	struct device_node *np;
>  
>  	/* Get remote endpoint node. */
> -	np = of_parse_phandle(node, "remote-endpoint", 0);
> -	if (!np)
> +	ep = __of_graph_lookup_endpoint(node);
> +	if (!ep || !ep->remote_node)
>  		return NULL;
> -	np = of_get_next_parent(np);
> +	np = of_get_next_parent(ep->remote_node);
>  	if (of_node_cmp(np->name, "port")) {
>  		of_node_put(np);
>  		return NULL;
> @@ -2177,3 +2197,44 @@ struct device_node *of_graph_get_remote_port(const struct device_node *node)
>  	return np;
>  }
>  EXPORT_SYMBOL(of_graph_get_remote_port);
> +
> +int of_graph_register_endpoint(const struct device_node *node,
> +		void (*cb)(struct of_endpoint *ep, void *data), void *data)
> +{
> +	struct of_endpoint *remote_ep, *ep = kmalloc(sizeof(*ep), GFP_KERNEL);
> +	if (!ep)
> +		return -ENOMEM;
> +
> +	of_graph_parse_endpoint(node, ep);
> +	ep->callback = cb;
> +	ep->data = data;
> +
> +	list_add(&ep->list, &endpoint_list);
> +
> +	list_for_each_entry(remote_ep, &endpoint_list, list) {
> +		struct of_endpoint *from, *to;
> +		if (ep->remote_node) {
> +			from = ep;
> +			to = remote_ep;
> +		} else {
> +			from = remote_ep;
> +			to = ep;
> +		}
> +		if (from->remote_node &&
> +		    from->remote_node == to->local_node) {
> +			WARN_ON(to->remote_node &&
> +				to->remote_node != from->local_node);
> +			to->remote_node = from->local_node;
> +			to->remote_ep = from;
> +			from->remote_ep = to;
> +			if (from->callback)
> +				from->callback(from, from->data);
> +			if (to->callback)
> +				to->callback(to, to->data);
> +			return 0;
> +		}
> +	}
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL(of_graph_register_endpoint);
> diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
> index 3a3c5a9..f00ac4e 100644
> --- a/include/linux/of_graph.h
> +++ b/include/linux/of_graph.h
> @@ -23,7 +23,14 @@
>  struct of_endpoint {
>  	unsigned int port;
>  	unsigned int id;
> -	const struct device_node *local_node;
> +	struct device_node *local_node;
> +	struct device_node *remote_node;
> +	struct of_endpoint *remote_ep;
> +
> +	/* Internal use only */
> +	struct list_head list;
> +	void (*callback)(struct of_endpoint *ep, void *data);
> +	void *data;
>  };
>  
>  #ifdef CONFIG_OF
> @@ -35,6 +42,10 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
>  struct device_node *of_graph_get_remote_port_parent(
>  					const struct device_node *node);
>  struct device_node *of_graph_get_remote_port(const struct device_node *node);
> +
> +int of_graph_register_endpoint(const struct device_node *ep_node,
> +				void (*cb)(struct of_endpoint *ep, void *data),
> +				void *data);
>  #else
>  
>  static inline int of_graph_parse_endpoint(const struct device_node *node,
> @@ -68,6 +79,13 @@ static inline struct device_node *of_graph_get_remote_port(
>  	return NULL;
>  }
>  
> +static inline int of_graph_register_endpoint(const struct device_node *ep_node,
> +				void (*cb)(struct of_endpoint *ep, void *data),
> +				void *data);
> +{
> +	return -ENOSYS;
> +}
> +
>  #endif /* CONFIG_OF */
>  
>  #endif /* __LINUX_OF_GRAPH_H */
> -- 
> 1.9.0
> 
> 


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-11 15:21                             ` Laurent Pinchart
  2014-03-11 18:52                               ` Philipp Zabel
@ 2014-03-20 17:53                               ` Grant Likely
  1 sibling, 0 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-20 17:53 UTC (permalink / raw)
  To: Laurent Pinchart, Philipp Zabel
  Cc: Tomi Valkeinen, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Tue, 11 Mar 2014 16:21:49 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> Hi Philipp,
> 
> On Tuesday 11 March 2014 16:07:00 Philipp Zabel wrote:
> > Am Montag, den 10.03.2014, 14:58 +0000 schrieb Grant Likely:
> > > On Mon, 10 Mar 2014 14:52:53 +0100, Laurent Pinchart wrote:
> 
> [snip]
> 
> > > > In theory unidirectional links in DT are indeed enough. However, let's
> > > > not forget the following.
> > > > 
> > > > - There's no such thing as single start points for graphs. Sure, in some
> > > > simple cases the graph will have a single start point, but that's not a
> > > > generic rule. For instance the camera graphs
> > > > http://ideasonboard.org/media/omap3isp.ps and
> > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > > thus two starting points from a data flow point of view. And if you
> > > > want a better understanding of how complex media graphs can become,
> > > > have a look at http://ideasonboard.org/media/vsp1.0.pdf (that's a real
> > > > world example, albeit all connections are internal to the SoC in that
> > > > particular case, and don't need to be described in DT).
> > > > 
> > > > - There's also no such thing as a master device that can just point to
> > > > slave devices. Once again simple cases exist where that model could
> > > > work, but real world examples exist of complex pipelines with dozens of
> > > > elements all implemented by a separate IP core and handled by separate
> > > > drivers, forming a graph with long chains and branches. We thus need
> > > > real graph bindings.
> > > > 
> > > > - Finally, having no backlinks in DT would make the software
> > > > implementation very complex. We need to be able to walk the graph in a
> > > > generic way without having any of the IP core drivers loaded, and
> > > > without any specific starting point. We would thus need to parse the
> > > > complete DT tree, looking at all nodes and trying to find out whether
> > > > they're part of the graph we're trying to walk. The complexity of the
> > > > operation would be at best quadratic to the number of nodes in the whole
> > > > DT and to the number of nodes in the graph.
> > > 
> > > Not really. To being with, you cannot determine any meaning of a node
> > > across the tree (aside from it being an endpoint) without also
> > > understanding the binding that the node is a part of. That means you
> > > need to have something matching against the compatible string on both
> > > ends of the linkage. For instance:
> > > 
> > > panel {
> > > 	compatible = "acme,lvds-panel";
> > > 	lvds-port: port {
> > > 	};
> > > };
> > > 
> > > display-controller {
> > > 	compatible = "encom,video";
> > > 	port {
> > > 		remote-endpoint = <&lvds-port>;
> > > 	};
> > > };
> > > 
> > > In the above example, the encom,video driver has absolutely zero
> > > information about what the acme,lvds-panel binding actually implements.
> > > There needs to be both a driver for the "acme,lvds-panel" binding and
> > > one for the "encom,video" binding (even if the acme,lvds-panel binding
> > > is very thin and defers the functionality to the video controller).
> > > 
> > > What you want here is the drivers to register each side of the
> > > connection. That could be modeled with something like the following
> > > (pseudocode):
> > > 
> > > struct of_endpoint {
> > > 
> > > 	struct list_head list;
> > > 	struct device_node *ep_node;
> > > 	void *context;
> > > 	void (*cb)(struct of_endpoint *ep, void *data);
> > > 
> > > }
> > > 
> > > int of_register_port(struct device *node, void (*cb)(struct of_endpoint
> > > *ep, void *data), void *data) {
> > > 
> > > 	struct of_endpoint *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> > > 	
> > > 	ep->ep_node = node;
> > > 	ep->data = data;
> > > 	ep->callback = cb;
> > > 	
> > > 	/* store the endpoint to a list */
> > > 	/* check if the endpoint has a remote-endpoint link */
> > > 		/* If so, then link the two together and call the
> > > 		 * callbacks */
> > > }
> > > 
> > > That's neither expensive or complicated.
> > > 
> > > Originally I suggested walking the whole tree multiple times, but as
> > > mentioned that doesn't scale, and as I thought about the above it isn't
> > > even a valid thing to do. Everything has to be driven by drivers, so
> > > even if the backlinks are there, nothing can be done with the link until
> > > the other side goes through enumeration independently.
> > 
> > I have implemented your suggestion as follows. Basically, this allows
> > either endpoint to contain the remote-endpoint link, as long as all
> > drivers register their endpoints in the probe function and return
> > -EPROBE_DEFER from their component framework bind callback until all
> > their endpoints are connected.
> 
> Beside bringing the whole graph down when a single component can't be probed 
> (either because the corresponding hardware devices is missing, broken, or the 
> driver isn't loaded),

I don't think that follows. It creates a list of all port connections as
drivers 'announce' that the device is available. Any master driver can
query the list and determine what links are important. If something is
missing, then with Phillip's approach the master driver can choose to
ignore it.

> that's adding even one more level of complexity with an 
> additional callback. I'm afraid I can't accept it as-is, the result is just 
> too complex for device drivers and not flexible enough.

How is it not flexible enough? It seems to be more flexible to me
because each driver gets to determine the meaning of each link and
whether or not it is important.

The interface could be modifed though to be simpler for drivers by
eliminating the callback and adding a notifier for linkage additions or
removals. Then only code that actually cares about links needs to handle
events.

> I want to keep the ability to walk the graph without requiring all components 
> to be probed by their respective driver. What happened to your suggestion of 
> parsing the whole DT once at boot time ?
> 
> > From fdda1fb2bd133200d4620adcbb28697cb360e1cb Mon Sep 17 00:00:00 2001
> > From: Philipp Zabel <p.zabel@pengutronix.de>
> > Date: Tue, 11 Mar 2014 15:56:18 +0100
> > Subject: [PATCH] of: Implement of_graph_register_endpoint
> > 
> > This patch adds a function that lets drivers register their endpoints in a
> > global list. Newly registered endpoints are compared against known endpoints
> > to check if a connection should be made. If so, the driver is notified via
> > a simple callback.
> > 
> > Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> > ---
> >  drivers/of/base.c        | 69 ++++++++++++++++++++++++++++++++++++++++++---
> >  include/linux/of_graph.h | 20 +++++++++++++-
> >  2 files changed, 84 insertions(+), 5 deletions(-)
> > 
> > diff --git a/drivers/of/base.c b/drivers/of/base.c
> > index ebb001a..77ae54a 100644
> > --- a/drivers/of/base.c
> > +++ b/drivers/of/base.c
> > @@ -29,6 +29,7 @@
> >  #include "of_private.h"
> > 
> >  LIST_HEAD(aliases_lookup);
> > +LIST_HEAD(endpoint_list);
> > 
> >  struct device_node *of_allnodes;
> >  EXPORT_SYMBOL(of_allnodes);
> > @@ -2002,6 +2003,7 @@ int of_graph_parse_endpoint(const struct device_node
> > *node, memset(endpoint, 0, sizeof(*endpoint));
> > 
> >  	endpoint->local_node = node;
> > +	endpoint->remote_node = of_parse_phandle(node, "remote-endpoint", 0);
> >  	/*
> >  	 * It doesn't matter whether the two calls below succeed.
> >  	 * If they don't then the default value 0 is used.
> > @@ -2126,6 +2128,19 @@ struct device_node *of_graph_get_next_endpoint(const
> > struct device_node *parent, }
> >  EXPORT_SYMBOL(of_graph_get_next_endpoint);
> > 
> > +static struct of_endpoint *__of_graph_lookup_endpoint(
> > +				const struct device_node *node)
> > +{
> > +	struct of_endpoint *ep;
> > +
> > +	list_for_each_entry(ep, &endpoint_list, list) {
> > +		if (ep->local_node == node)
> > +			return ep;
> > +	}
> > +
> > +	return NULL;
> > +}
> > +
> >  /**
> >   * of_graph_get_remote_port_parent() - get remote port's parent node
> >   * @node: pointer to a local endpoint device_node
> > @@ -2136,11 +2151,15 @@ EXPORT_SYMBOL(of_graph_get_next_endpoint);
> >  struct device_node *of_graph_get_remote_port_parent(
> >  			       const struct device_node *node)
> >  {
> > +	struct of_endpoint *ep;
> >  	struct device_node *np;
> >  	unsigned int depth;
> > 
> >  	/* Get remote endpoint node. */
> > -	np = of_parse_phandle(node, "remote-endpoint", 0);
> > +	ep = __of_graph_lookup_endpoint(node);
> > +	if (!ep || !ep->remote_node)
> > +		return NULL;
> > +	np = ep->remote_node;
> > 
> >  	/* Walk 3 levels up only if there is 'ports' node */
> >  	for (depth = 3; depth && np; depth--) {
> > @@ -2163,13 +2182,14 @@ EXPORT_SYMBOL(of_graph_get_remote_port_parent);
> >   */
> >  struct device_node *of_graph_get_remote_port(const struct device_node
> > *node) {
> > +	struct of_endpoint *ep;
> >  	struct device_node *np;
> > 
> >  	/* Get remote endpoint node. */
> > -	np = of_parse_phandle(node, "remote-endpoint", 0);
> > -	if (!np)
> > +	ep = __of_graph_lookup_endpoint(node);
> > +	if (!ep || !ep->remote_node)
> >  		return NULL;
> > -	np = of_get_next_parent(np);
> > +	np = of_get_next_parent(ep->remote_node);
> >  	if (of_node_cmp(np->name, "port")) {
> >  		of_node_put(np);
> >  		return NULL;
> > @@ -2177,3 +2197,44 @@ struct device_node *of_graph_get_remote_port(const
> > struct device_node *node) return np;
> >  }
> >  EXPORT_SYMBOL(of_graph_get_remote_port);
> > +
> > +int of_graph_register_endpoint(const struct device_node *node,
> > +		void (*cb)(struct of_endpoint *ep, void *data), void *data)
> > +{
> > +	struct of_endpoint *remote_ep, *ep = kmalloc(sizeof(*ep), GFP_KERNEL);
> > +	if (!ep)
> > +		return -ENOMEM;
> > +
> > +	of_graph_parse_endpoint(node, ep);
> > +	ep->callback = cb;
> > +	ep->data = data;
> > +
> > +	list_add(&ep->list, &endpoint_list);
> > +
> > +	list_for_each_entry(remote_ep, &endpoint_list, list) {
> > +		struct of_endpoint *from, *to;
> > +		if (ep->remote_node) {
> > +			from = ep;
> > +			to = remote_ep;
> > +		} else {
> > +			from = remote_ep;
> > +			to = ep;
> > +		}
> > +		if (from->remote_node &&
> > +		    from->remote_node == to->local_node) {
> > +			WARN_ON(to->remote_node &&
> > +				to->remote_node != from->local_node);
> > +			to->remote_node = from->local_node;
> > +			to->remote_ep = from;
> > +			from->remote_ep = to;
> > +			if (from->callback)
> > +				from->callback(from, from->data);
> > +			if (to->callback)
> > +				to->callback(to, to->data);
> > +			return 0;
> > +		}
> > +	}
> > +
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL(of_graph_register_endpoint);
> > diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
> > index 3a3c5a9..f00ac4e 100644
> > --- a/include/linux/of_graph.h
> > +++ b/include/linux/of_graph.h
> > @@ -23,7 +23,14 @@
> >  struct of_endpoint {
> >  	unsigned int port;
> >  	unsigned int id;
> > -	const struct device_node *local_node;
> > +	struct device_node *local_node;
> > +	struct device_node *remote_node;
> > +	struct of_endpoint *remote_ep;
> > +
> > +	/* Internal use only */
> > +	struct list_head list;
> > +	void (*callback)(struct of_endpoint *ep, void *data);
> > +	void *data;
> >  };
> > 
> >  #ifdef CONFIG_OF
> > @@ -35,6 +42,10 @@ struct device_node *of_graph_get_next_endpoint(const
> > struct device_node *parent, struct device_node
> > *of_graph_get_remote_port_parent(
> >  					const struct device_node *node);
> >  struct device_node *of_graph_get_remote_port(const struct device_node
> > *node); +
> > +int of_graph_register_endpoint(const struct device_node *ep_node,
> > +				void (*cb)(struct of_endpoint *ep, void *data),
> > +				void *data);
> >  #else
> > 
> >  static inline int of_graph_parse_endpoint(const struct device_node *node,
> > @@ -68,6 +79,13 @@ static inline struct device_node
> > *of_graph_get_remote_port( return NULL;
> >  }
> > 
> > +static inline int of_graph_register_endpoint(const struct device_node
> > *ep_node, +				void (*cb)(struct of_endpoint *ep, void *data),
> > +				void *data);
> > +{
> > +	return -ENOSYS;
> > +}
> > +
> >  #endif /* CONFIG_OF */
> > 
> >  #endif /* __LINUX_OF_GRAPH_H */
> 
> -- 
> Regards,
> 
> Laurent Pinchart
> 


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-12 10:25                         ` Russell King - ARM Linux
  2014-03-12 10:47                           ` Tomi Valkeinen
@ 2014-03-20 17:54                           ` Grant Likely
  2014-03-20 18:16                             ` Laurent Pinchart
  2014-03-20 18:38                             ` Mauro Carvalho Chehab
  1 sibling, 2 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-20 17:54 UTC (permalink / raw)
  To: Russell King - ARM Linux, Laurent Pinchart
  Cc: Tomi Valkeinen, Philipp Zabel, Sascha Hauer, Rob Herring,
	Mauro Carvalho Chehab, Rob Herring, Sylwester Nawrocki,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel

On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux <linux@arm.linux.org.uk> wrote:
> On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > In theory unidirectional links in DT are indeed enough. However, let's not 
> > forget the following.
> > 
> > - There's no such thing as single start points for graphs. Sure, in some 
> > simple cases the graph will have a single start point, but that's not a 
> > generic rule. For instance the camera graphs 
> > http://ideasonboard.org/media/omap3isp.ps and 
> > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
> > starting points from a data flow point of view.
> 
> I think we need to stop thinking of a graph linked in terms of data
> flow - that's really not useful.
> 
> Consider a display subsystem.  The CRTC is the primary interface for
> the CPU - this is the "most interesting" interface, it's the interface
> which provides access to the picture to be displayed for the CPU.  Other
> interfaces are secondary to that purpose - reading the I2C DDC bus for
> the display information is all secondary to the primary purpose of
> displaying a picture.
> 
> For a capture subsystem, the primary interface for the CPU is the frame
> grabber (whether it be an already encoded frame or not.)  The sensor
> devices are all secondary to that.
> 
> So, the primary software interface in each case is where the data for
> the primary purpose is transferred.  This is the point at which these
> graphs should commence since this is where we would normally start
> enumeration of the secondary interfaces.
> 
> V4L2 even provides interfaces for this: you open the capture device,
> which then allows you to enumerate the capture device's inputs, and
> this in turn allows you to enumerate their properties.  You don't open
> a particular sensor and work back up the tree.
> 
> I believe trying to do this according to the flow of data is just wrong.
> You should always describe things from the primary device for the CPU
> towards the peripheral devices and never the opposite direction.

Agreed.

g.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 17:54                           ` Grant Likely
@ 2014-03-20 18:16                             ` Laurent Pinchart
  2014-03-20 18:18                               ` Russell King - ARM Linux
  2014-03-20 18:38                             ` Mauro Carvalho Chehab
  1 sibling, 1 reply; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-20 18:16 UTC (permalink / raw)
  To: Grant Likely
  Cc: Russell King - ARM Linux, Tomi Valkeinen, Philipp Zabel,
	Sascha Hauer, Rob Herring, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Thursday 20 March 2014 17:54:31 Grant Likely wrote:
> On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux wrote:
> > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > > In theory unidirectional links in DT are indeed enough. However, let's
> > > not forget the following.
> > > 
> > > - There's no such thing as single start points for graphs. Sure, in some
> > > simple cases the graph will have a single start point, but that's not a
> > > generic rule. For instance the camera graphs
> > > http://ideasonboard.org/media/omap3isp.ps and
> > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > thus two starting points from a data flow point of view.
> > 
> > I think we need to stop thinking of a graph linked in terms of data
> > flow - that's really not useful.
> > 
> > Consider a display subsystem.  The CRTC is the primary interface for
> > the CPU - this is the "most interesting" interface, it's the interface
> > which provides access to the picture to be displayed for the CPU.  Other
> > interfaces are secondary to that purpose - reading the I2C DDC bus for
> > the display information is all secondary to the primary purpose of
> > displaying a picture.
> > 
> > For a capture subsystem, the primary interface for the CPU is the frame
> > grabber (whether it be an already encoded frame or not.)  The sensor
> > devices are all secondary to that.
> > 
> > So, the primary software interface in each case is where the data for
> > the primary purpose is transferred.  This is the point at which these
> > graphs should commence since this is where we would normally start
> > enumeration of the secondary interfaces.
> > 
> > V4L2 even provides interfaces for this: you open the capture device,
> > which then allows you to enumerate the capture device's inputs, and
> > this in turn allows you to enumerate their properties.  You don't open
> > a particular sensor and work back up the tree.
> > 
> > I believe trying to do this according to the flow of data is just wrong.
> > You should always describe things from the primary device for the CPU
> > towards the peripheral devices and never the opposite direction.
> 
> Agreed.

Absolutely not agreed. The whole concept of CPU towards peripherals only makes 
sense for very simple devices and breaks as soon as the hardware gets more 
complex. There's no such thing as CPU towards peripherals when peripherals 
communicate directly.

Please consider use cases more complex than just a display controller and an 
encoder, and you'll realize how messy not being able to parse the whole graph 
at once will become. Let's try to improve things, not to make sure to prevent 
support for future devices.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 18:16                             ` Laurent Pinchart
@ 2014-03-20 18:18                               ` Russell King - ARM Linux
  2014-03-20 18:50                                 ` Laurent Pinchart
  2014-03-20 19:27                                 ` Laurent Pinchart
  0 siblings, 2 replies; 74+ messages in thread
From: Russell King - ARM Linux @ 2014-03-20 18:18 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Grant Likely, Tomi Valkeinen, Philipp Zabel, Sascha Hauer,
	Rob Herring, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Thu, Mar 20, 2014 at 07:16:29PM +0100, Laurent Pinchart wrote:
> On Thursday 20 March 2014 17:54:31 Grant Likely wrote:
> > On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux wrote:
> > > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > > > In theory unidirectional links in DT are indeed enough. However, let's
> > > > not forget the following.
> > > > 
> > > > - There's no such thing as single start points for graphs. Sure, in some
> > > > simple cases the graph will have a single start point, but that's not a
> > > > generic rule. For instance the camera graphs
> > > > http://ideasonboard.org/media/omap3isp.ps and
> > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > > thus two starting points from a data flow point of view.
> > > 
> > > I think we need to stop thinking of a graph linked in terms of data
> > > flow - that's really not useful.
> > > 
> > > Consider a display subsystem.  The CRTC is the primary interface for
> > > the CPU - this is the "most interesting" interface, it's the interface
> > > which provides access to the picture to be displayed for the CPU.  Other
> > > interfaces are secondary to that purpose - reading the I2C DDC bus for
> > > the display information is all secondary to the primary purpose of
> > > displaying a picture.
> > > 
> > > For a capture subsystem, the primary interface for the CPU is the frame
> > > grabber (whether it be an already encoded frame or not.)  The sensor
> > > devices are all secondary to that.
> > > 
> > > So, the primary software interface in each case is where the data for
> > > the primary purpose is transferred.  This is the point at which these
> > > graphs should commence since this is where we would normally start
> > > enumeration of the secondary interfaces.
> > > 
> > > V4L2 even provides interfaces for this: you open the capture device,
> > > which then allows you to enumerate the capture device's inputs, and
> > > this in turn allows you to enumerate their properties.  You don't open
> > > a particular sensor and work back up the tree.
> > > 
> > > I believe trying to do this according to the flow of data is just wrong.
> > > You should always describe things from the primary device for the CPU
> > > towards the peripheral devices and never the opposite direction.
> > 
> > Agreed.
> 
> Absolutely not agreed. The whole concept of CPU towards peripherals only makes 
> sense for very simple devices and breaks as soon as the hardware gets more 
> complex. There's no such thing as CPU towards peripherals when peripherals 
> communicate directly.
> 
> Please consider use cases more complex than just a display controller and an 
> encoder, and you'll realize how messy not being able to parse the whole graph 
> at once will become. Let's try to improve things, not to make sure to prevent 
> support for future devices.

That's odd, I did.

Please draw some (ascii) diagrams of the situations you're saying this
won't work for, because at the moment all I'm seeing is some vague
hand-waving rather than anything factual that I can relate to.  Help
us to actually _see_ the problem you have with this approach so we can
understand it.

-- 
FTTC broadband for 0.8mile line: now at 9.7Mbps down 460kbps up... slowly
improving, and getting towards what was expected from it.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 17:54                           ` Grant Likely
  2014-03-20 18:16                             ` Laurent Pinchart
@ 2014-03-20 18:38                             ` Mauro Carvalho Chehab
  2014-03-20 18:43                               ` Russell King - ARM Linux
  2014-03-20 18:48                               ` Grant Likely
  1 sibling, 2 replies; 74+ messages in thread
From: Mauro Carvalho Chehab @ 2014-03-20 18:38 UTC (permalink / raw)
  To: Grant Likely
  Cc: Russell King - ARM Linux, Laurent Pinchart, Tomi Valkeinen,
	Philipp Zabel, Sascha Hauer, Rob Herring, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Em Thu, 20 Mar 2014 17:54:31 +0000
Grant Likely <grant.likely@linaro.org> escreveu:

> On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux <linux@arm.linux.org.uk> wrote:
> > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > > In theory unidirectional links in DT are indeed enough. However, let's not 
> > > forget the following.
> > > 
> > > - There's no such thing as single start points for graphs. Sure, in some 
> > > simple cases the graph will have a single start point, but that's not a 
> > > generic rule. For instance the camera graphs 
> > > http://ideasonboard.org/media/omap3isp.ps and 
> > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
> > > starting points from a data flow point of view.
> > 
> > I think we need to stop thinking of a graph linked in terms of data
> > flow - that's really not useful.
> > 
> > Consider a display subsystem.  The CRTC is the primary interface for
> > the CPU - this is the "most interesting" interface, it's the interface
> > which provides access to the picture to be displayed for the CPU.  Other
> > interfaces are secondary to that purpose - reading the I2C DDC bus for
> > the display information is all secondary to the primary purpose of
> > displaying a picture.
> > 
> > For a capture subsystem, the primary interface for the CPU is the frame
> > grabber (whether it be an already encoded frame or not.)  The sensor
> > devices are all secondary to that.
> > 
> > So, the primary software interface in each case is where the data for
> > the primary purpose is transferred.  This is the point at which these
> > graphs should commence since this is where we would normally start
> > enumeration of the secondary interfaces.
> > 
> > V4L2 even provides interfaces for this: you open the capture device,
> > which then allows you to enumerate the capture device's inputs, and
> > this in turn allows you to enumerate their properties.  You don't open
> > a particular sensor and work back up the tree.
> > 
> > I believe trying to do this according to the flow of data is just wrong.
> > You should always describe things from the primary device for the CPU
> > towards the peripheral devices and never the opposite direction.
> 
> Agreed.

I don't agree, as what's the primary device is relative. 

Actually, in the case of a media data flow, the CPU is generally not
the primary device.

Even on general purpose computers, if the full data flow is taken into
the account, the CPU is a mere device that will just be used to copy
data either to GPU and speakers or to disk, eventually doing format
conversions, when the hardware is cheap and don't provide format
converters.

On more complex devices, like the ones we want to solve with the
media controller, like an embedded hardware like a TV or a STB, the CPU
is just an ancillary component that could even hang without stopping 
TV reception, as the data flow can be fully done inside the chipset.

Regards,
Mauro

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 18:38                             ` Mauro Carvalho Chehab
@ 2014-03-20 18:43                               ` Russell King - ARM Linux
  2014-03-20 18:49                                 ` Laurent Pinchart
  2014-03-20 18:48                               ` Grant Likely
  1 sibling, 1 reply; 74+ messages in thread
From: Russell King - ARM Linux @ 2014-03-20 18:43 UTC (permalink / raw)
  To: Mauro Carvalho Chehab
  Cc: Grant Likely, Laurent Pinchart, Tomi Valkeinen, Philipp Zabel,
	Sascha Hauer, Rob Herring, Rob Herring, Sylwester Nawrocki,
	Kyungmin Park, linux-kernel, linux-media, devicetree,
	Philipp Zabel

On Thu, Mar 20, 2014 at 03:38:04PM -0300, Mauro Carvalho Chehab wrote:
> Em Thu, 20 Mar 2014 17:54:31 +0000
> Grant Likely <grant.likely@linaro.org> escreveu:
> 
> > On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux <linux@arm.linux.org.uk> wrote:
> > > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > > > In theory unidirectional links in DT are indeed enough. However, let's not 
> > > > forget the following.
> > > > 
> > > > - There's no such thing as single start points for graphs. Sure, in some 
> > > > simple cases the graph will have a single start point, but that's not a 
> > > > generic rule. For instance the camera graphs 
> > > > http://ideasonboard.org/media/omap3isp.ps and 
> > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
> > > > starting points from a data flow point of view.
> > > 
> > > I think we need to stop thinking of a graph linked in terms of data
> > > flow - that's really not useful.
> > > 
> > > Consider a display subsystem.  The CRTC is the primary interface for
> > > the CPU - this is the "most interesting" interface, it's the interface
> > > which provides access to the picture to be displayed for the CPU.  Other
> > > interfaces are secondary to that purpose - reading the I2C DDC bus for
> > > the display information is all secondary to the primary purpose of
> > > displaying a picture.
> > > 
> > > For a capture subsystem, the primary interface for the CPU is the frame
> > > grabber (whether it be an already encoded frame or not.)  The sensor
> > > devices are all secondary to that.
> > > 
> > > So, the primary software interface in each case is where the data for
> > > the primary purpose is transferred.  This is the point at which these
> > > graphs should commence since this is where we would normally start
> > > enumeration of the secondary interfaces.
> > > 
> > > V4L2 even provides interfaces for this: you open the capture device,
> > > which then allows you to enumerate the capture device's inputs, and
> > > this in turn allows you to enumerate their properties.  You don't open
> > > a particular sensor and work back up the tree.
> > > 
> > > I believe trying to do this according to the flow of data is just wrong.
> > > You should always describe things from the primary device for the CPU
> > > towards the peripheral devices and never the opposite direction.
> > 
> > Agreed.
> 
> I don't agree, as what's the primary device is relative. 
> 
> Actually, in the case of a media data flow, the CPU is generally not
> the primary device.
> 
> Even on general purpose computers, if the full data flow is taken into
> the account, the CPU is a mere device that will just be used to copy
> data either to GPU and speakers or to disk, eventually doing format
> conversions, when the hardware is cheap and don't provide format
> converters.
> 
> On more complex devices, like the ones we want to solve with the
> media controller, like an embedded hardware like a TV or a STB, the CPU
> is just an ancillary component that could even hang without stopping 
> TV reception, as the data flow can be fully done inside the chipset.

The CPU is the _controlling_ component - it's the component that has to
configure the peripherals so they all talk to each other in the right
way.  Therefore, the view of it needs to be CPU centric.

If we were providing a DT description for consumption by some other
device in the system, then the view should be as seen from that device
instead.

Think about this.  Would you describe a system starting at, say, the
system keyboard, and branching all the way through just becuase that's
how you interact with it, or would you describe it from the CPUs point
of view because that's what has to be in control of the system.

-- 
FTTC broadband for 0.8mile line: now at 9.7Mbps down 460kbps up... slowly
improving, and getting towards what was expected from it.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 18:38                             ` Mauro Carvalho Chehab
  2014-03-20 18:43                               ` Russell King - ARM Linux
@ 2014-03-20 18:48                               ` Grant Likely
  2014-03-20 18:52                                 ` Laurent Pinchart
  1 sibling, 1 reply; 74+ messages in thread
From: Grant Likely @ 2014-03-20 18:48 UTC (permalink / raw)
  To: Mauro Carvalho Chehab
  Cc: Russell King - ARM Linux, Laurent Pinchart, Tomi Valkeinen,
	Philipp Zabel, Sascha Hauer, Rob Herring, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Thu, 20 Mar 2014 15:38:04 -0300, Mauro Carvalho Chehab <m.chehab@samsung.com> wrote:
> Em Thu, 20 Mar 2014 17:54:31 +0000
> Grant Likely <grant.likely@linaro.org> escreveu:
> 
> > On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux <linux@arm.linux.org.uk> wrote:
> > > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > > > In theory unidirectional links in DT are indeed enough. However, let's not 
> > > > forget the following.
> > > > 
> > > > - There's no such thing as single start points for graphs. Sure, in some 
> > > > simple cases the graph will have a single start point, but that's not a 
> > > > generic rule. For instance the camera graphs 
> > > > http://ideasonboard.org/media/omap3isp.ps and 
> > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and thus two 
> > > > starting points from a data flow point of view.
> > > 
> > > I think we need to stop thinking of a graph linked in terms of data
> > > flow - that's really not useful.
> > > 
> > > Consider a display subsystem.  The CRTC is the primary interface for
> > > the CPU - this is the "most interesting" interface, it's the interface
> > > which provides access to the picture to be displayed for the CPU.  Other
> > > interfaces are secondary to that purpose - reading the I2C DDC bus for
> > > the display information is all secondary to the primary purpose of
> > > displaying a picture.
> > > 
> > > For a capture subsystem, the primary interface for the CPU is the frame
> > > grabber (whether it be an already encoded frame or not.)  The sensor
> > > devices are all secondary to that.
> > > 
> > > So, the primary software interface in each case is where the data for
> > > the primary purpose is transferred.  This is the point at which these
> > > graphs should commence since this is where we would normally start
> > > enumeration of the secondary interfaces.
> > > 
> > > V4L2 even provides interfaces for this: you open the capture device,
> > > which then allows you to enumerate the capture device's inputs, and
> > > this in turn allows you to enumerate their properties.  You don't open
> > > a particular sensor and work back up the tree.
> > > 
> > > I believe trying to do this according to the flow of data is just wrong.
> > > You should always describe things from the primary device for the CPU
> > > towards the peripheral devices and never the opposite direction.
> > 
> > Agreed.
> 
> I don't agree, as what's the primary device is relative. 
> 
> Actually, in the case of a media data flow, the CPU is generally not
> the primary device.
> 
> Even on general purpose computers, if the full data flow is taken into
> the account, the CPU is a mere device that will just be used to copy
> data either to GPU and speakers or to disk, eventually doing format
> conversions, when the hardware is cheap and don't provide format
> converters.
> 
> On more complex devices, like the ones we want to solve with the
> media controller, like an embedded hardware like a TV or a STB, the CPU
> is just an ancillary component that could even hang without stopping 
> TV reception, as the data flow can be fully done inside the chipset.

We're talking about wiring up device drivers here, not data flow. Yes, I
completely understand that data flow is often not even remotely
cpu-centric. However, device drivers are, and the kernel needs to know
the dependency graph for choosing what devices depend on other devices.

g.


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 18:43                               ` Russell King - ARM Linux
@ 2014-03-20 18:49                                 ` Laurent Pinchart
  2014-03-21  8:20                                   ` Tomi Valkeinen
  0 siblings, 1 reply; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-20 18:49 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: Mauro Carvalho Chehab, Grant Likely, Tomi Valkeinen,
	Philipp Zabel, Sascha Hauer, Rob Herring, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Thursday 20 March 2014 18:43:16 Russell King - ARM Linux wrote:
> On Thu, Mar 20, 2014 at 03:38:04PM -0300, Mauro Carvalho Chehab wrote:
> > Em Thu, 20 Mar 2014 17:54:31 +0000 Grant Likely escreveu:
> > > On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux wrote:
> > > > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > > > > In theory unidirectional links in DT are indeed enough. However,
> > > > > let's not forget the following.
> > > > > 
> > > > > - There's no such thing as single start points for graphs. Sure, in
> > > > > some simple cases the graph will have a single start point, but
> > > > > that's not a generic rule. For instance the camera graphs
> > > > > http://ideasonboard.org/media/omap3isp.ps and
> > > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > > > thus two starting points from a data flow point of view.
> > > > 
> > > > I think we need to stop thinking of a graph linked in terms of data
> > > > flow - that's really not useful.
> > > > 
> > > > Consider a display subsystem.  The CRTC is the primary interface for
> > > > the CPU - this is the "most interesting" interface, it's the interface
> > > > which provides access to the picture to be displayed for the CPU. 
> > > > Other interfaces are secondary to that purpose - reading the I2C DDC
> > > > bus for the display information is all secondary to the primary
> > > > purpose of displaying a picture.
> > > > 
> > > > For a capture subsystem, the primary interface for the CPU is the
> > > > frame grabber (whether it be an already encoded frame or not.)  The
> > > > sensor devices are all secondary to that.
> > > > 
> > > > So, the primary software interface in each case is where the data for
> > > > the primary purpose is transferred.  This is the point at which these
> > > > graphs should commence since this is where we would normally start
> > > > enumeration of the secondary interfaces.
> > > > 
> > > > V4L2 even provides interfaces for this: you open the capture device,
> > > > which then allows you to enumerate the capture device's inputs, and
> > > > this in turn allows you to enumerate their properties.  You don't open
> > > > a particular sensor and work back up the tree.
> > > > 
> > > > I believe trying to do this according to the flow of data is just
> > > > wrong.
> > > > You should always describe things from the primary device for the CPU
> > > > towards the peripheral devices and never the opposite direction.
> > > 
> > > Agreed.
> > 
> > I don't agree, as what's the primary device is relative.
> > 
> > Actually, in the case of a media data flow, the CPU is generally not
> > the primary device.
> > 
> > Even on general purpose computers, if the full data flow is taken into
> > the account, the CPU is a mere device that will just be used to copy
> > data either to GPU and speakers or to disk, eventually doing format
> > conversions, when the hardware is cheap and don't provide format
> > converters.
> > 
> > On more complex devices, like the ones we want to solve with the
> > media controller, like an embedded hardware like a TV or a STB, the CPU
> > is just an ancillary component that could even hang without stopping
> > TV reception, as the data flow can be fully done inside the chipset.
> 
> The CPU is the _controlling_ component - it's the component that has to
> configure the peripherals so they all talk to each other in the right
> way.  Therefore, the view of it needs to be CPU centric.
> 
> If we were providing a DT description for consumption by some other
> device in the system, then the view should be as seen from that device
> instead.
> 
> Think about this.  Would you describe a system starting at, say, the
> system keyboard, and branching all the way through just becuase that's
> how you interact with it, or would you describe it from the CPUs point
> of view because that's what has to be in control of the system.

DT has been designed to represent a control-based view of the system. It does 
so pretty well using its tree-based model. However, it doesn't have a native 
way to represent a flow-based graph, hence the OF graph solution we're 
discussing. The whole point of this proposal is to represent the topology of 
the media device, not how each entity is controlled.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 18:18                               ` Russell King - ARM Linux
@ 2014-03-20 18:50                                 ` Laurent Pinchart
  2014-03-20 19:27                                 ` Laurent Pinchart
  1 sibling, 0 replies; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-20 18:50 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: Grant Likely, Tomi Valkeinen, Philipp Zabel, Sascha Hauer,
	Rob Herring, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Russell,

On Thursday 20 March 2014 18:18:20 Russell King - ARM Linux wrote:
> On Thu, Mar 20, 2014 at 07:16:29PM +0100, Laurent Pinchart wrote:
> > On Thursday 20 March 2014 17:54:31 Grant Likely wrote:
> > > On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux wrote:

[snip]

> > > > I believe trying to do this according to the flow of data is just
> > > > wrong. You should always describe things from the primary device for
> > > > the CPU towards the peripheral devices and never the opposite
> > > > direction.
> > > 
> > > Agreed.
> > 
> > Absolutely not agreed. The whole concept of CPU towards peripherals only
> > makes sense for very simple devices and breaks as soon as the hardware
> > gets more complex. There's no such thing as CPU towards peripherals when
> > peripherals communicate directly.
> > 
> > Please consider use cases more complex than just a display controller and
> > an encoder, and you'll realize how messy not being able to parse the
> > whole graph at once will become. Let's try to improve things, not to make
> > sure to prevent support for future devices.
> 
> That's odd, I did.
> 
> Please draw some (ascii) diagrams of the situations you're saying this
> won't work for, because at the moment all I'm seeing is some vague
> hand-waving rather than anything factual that I can relate to.  Help
> us to actually _see_ the problem you have with this approach so we can
> understand it.

Working on it. Given my drawing skills this will take a bit of time.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 18:48                               ` Grant Likely
@ 2014-03-20 18:52                                 ` Laurent Pinchart
  2014-03-20 23:12                                   ` Grant Likely
  0 siblings, 1 reply; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-20 18:52 UTC (permalink / raw)
  To: Grant Likely
  Cc: Mauro Carvalho Chehab, Russell King - ARM Linux, Tomi Valkeinen,
	Philipp Zabel, Sascha Hauer, Rob Herring, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Thursday 20 March 2014 18:48:16 Grant Likely wrote:
> On Thu, 20 Mar 2014 15:38:04 -0300, Mauro Carvalho Chehab wrote:
> > Em Thu, 20 Mar 2014 17:54:31 +0000 Grant Likely escreveu:
> > > On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux wrote:
> > > > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > > > > In theory unidirectional links in DT are indeed enough. However,
> > > > > let's not forget the following.
> > > > > 
> > > > > - There's no such thing as single start points for graphs. Sure, in
> > > > > some simple cases the graph will have a single start point, but
> > > > > that's not a generic rule. For instance the camera graphs
> > > > > http://ideasonboard.org/media/omap3isp.ps and
> > > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > > > thus two starting points from a data flow point of view.
> > > > 
> > > > I think we need to stop thinking of a graph linked in terms of data
> > > > flow - that's really not useful.
> > > > 
> > > > Consider a display subsystem.  The CRTC is the primary interface for
> > > > the CPU - this is the "most interesting" interface, it's the interface
> > > > which provides access to the picture to be displayed for the CPU. 
> > > > Other interfaces are secondary to that purpose - reading the I2C DDC
> > > > bus for the display information is all secondary to the primary
> > > > purpose of displaying a picture.
> > > > 
> > > > For a capture subsystem, the primary interface for the CPU is the
> > > > frame grabber (whether it be an already encoded frame or not.)  The
> > > > sensor devices are all secondary to that.
> > > > 
> > > > So, the primary software interface in each case is where the data for
> > > > the primary purpose is transferred.  This is the point at which these
> > > > graphs should commence since this is where we would normally start
> > > > enumeration of the secondary interfaces.
> > > > 
> > > > V4L2 even provides interfaces for this: you open the capture device,
> > > > which then allows you to enumerate the capture device's inputs, and
> > > > this in turn allows you to enumerate their properties.  You don't open
> > > > a particular sensor and work back up the tree.
> > > > 
> > > > I believe trying to do this according to the flow of data is just
> > > > wrong. You should always describe things from the primary device for
> > > > the CPU towards the peripheral devices and never the opposite
> > > > direction.
> > > 
> > > Agreed.
> > 
> > I don't agree, as what's the primary device is relative.
> > 
> > Actually, in the case of a media data flow, the CPU is generally not
> > the primary device.
> > 
> > Even on general purpose computers, if the full data flow is taken into
> > the account, the CPU is a mere device that will just be used to copy
> > data either to GPU and speakers or to disk, eventually doing format
> > conversions, when the hardware is cheap and don't provide format
> > converters.
> > 
> > On more complex devices, like the ones we want to solve with the
> > media controller, like an embedded hardware like a TV or a STB, the CPU
> > is just an ancillary component that could even hang without stopping
> > TV reception, as the data flow can be fully done inside the chipset.
> 
> We're talking about wiring up device drivers here, not data flow. Yes, I
> completely understand that data flow is often not even remotely
> cpu-centric. However, device drivers are, and the kernel needs to know
> the dependency graph for choosing what devices depend on other devices.

Then we might not be talking about the same thing. I'm talking about DT 
bindings to represent the topology of the device, not how drivers are wired 
together. The latter isn't a property of the hardware.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 18:18                               ` Russell King - ARM Linux
  2014-03-20 18:50                                 ` Laurent Pinchart
@ 2014-03-20 19:27                                 ` Laurent Pinchart
  1 sibling, 0 replies; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-20 19:27 UTC (permalink / raw)
  To: Russell King - ARM Linux
  Cc: Grant Likely, Tomi Valkeinen, Philipp Zabel, Sascha Hauer,
	Rob Herring, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Russell,

On Thursday 20 March 2014 18:18:20 Russell King - ARM Linux wrote:
> On Thu, Mar 20, 2014 at 07:16:29PM +0100, Laurent Pinchart wrote:
> > On Thursday 20 March 2014 17:54:31 Grant Likely wrote:
> > > On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux wrote:
> > > > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > > > > In theory unidirectional links in DT are indeed enough. However,
> > > > > let's not forget the following.
> > > > > 
> > > > > - There's no such thing as single start points for graphs. Sure, in
> > > > > some simple cases the graph will have a single start point, but
> > > > > that's not a generic rule. For instance the camera graphs
> > > > > http://ideasonboard.org/media/omap3isp.ps and
> > > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > > > thus two starting points from a data flow point of view.
> > > > 
> > > > I think we need to stop thinking of a graph linked in terms of data
> > > > flow - that's really not useful.
> > > > 
> > > > Consider a display subsystem.  The CRTC is the primary interface for
> > > > the CPU - this is the "most interesting" interface, it's the interface
> > > > which provides access to the picture to be displayed for the CPU. 
> > > > Other interfaces are secondary to that purpose - reading the I2C DDC
> > > > bus for the display information is all secondary to the primary
> > > > purpose of displaying a picture.
> > > > 
> > > > For a capture subsystem, the primary interface for the CPU is the
> > > > frame grabber (whether it be an already encoded frame or not.)  The
> > > > sensor devices are all secondary to that.
> > > > 
> > > > So, the primary software interface in each case is where the data for
> > > > the primary purpose is transferred.  This is the point at which these
> > > > graphs should commence since this is where we would normally start
> > > > enumeration of the secondary interfaces.
> > > > 
> > > > V4L2 even provides interfaces for this: you open the capture device,
> > > > which then allows you to enumerate the capture device's inputs, and
> > > > this in turn allows you to enumerate their properties.  You don't open
> > > > a particular sensor and work back up the tree.
> > > > 
> > > > I believe trying to do this according to the flow of data is just
> > > > wrong. You should always describe things from the primary device for
> > > > the CPU towards the peripheral devices and never the opposite
> > > > direction.
> > > 
> > > Agreed.
> > 
> > Absolutely not agreed. The whole concept of CPU towards peripherals only
> > makes sense for very simple devices and breaks as soon as the hardware
> > gets more complex. There's no such thing as CPU towards peripherals when
> > peripherals communicate directly.
> > 
> > Please consider use cases more complex than just a display controller and
> > an encoder, and you'll realize how messy not being able to parse the
> > whole graph at once will become. Let's try to improve things, not to make
> > sure to prevent support for future devices.
> 
> That's odd, I did.
> 
> Please draw some (ascii) diagrams of the situations you're saying this
> won't work for, because at the moment all I'm seeing is some vague
> hand-waving rather than anything factual that I can relate to.  Help
> us to actually _see_ the problem you have with this approach so we can
> understand it.

I've broken the diagram into pieces as it won't fit in 80 columns. Ports named 
by a capital letter enclosed in parentheses are connected together. 

Let's start with the on-board input paths, with an HDMI input connector and a 
camera sensor.

 ,-----------.      ,----------.
 | HDMI      |      | ADV7611  |
 | Connector | ---> | HDMI     | ---> (A)
 |           |      | Decoder  |
 `-----------'      `----------'

 ,-----------.
 | AR0331    |
 | Camera    | ---> (B)
 | Sensor    |
 `-----------'

We then get to the SoC.

      |
      |    ,-----------.      ,------------.
      |    | BT.656    |      | Test       |
(A) -[0]-> | Decoder   | ---> | Pattern    | ---> (C)
      |    |           |      | Generator  |
      |    `-----------'      `------------'
      |
      |    ,-----------.      ,------------.      ,------------.
      |    | CSI-2     |      |            |      | Test       |
(B) -[1]-> | Receiver  | ---> | Processing | ---> | Pattern    |  ---> (D)
      |    |           |  `   |            |      | Generator  |
      |    `-----------'  |   `------------'      `------------'
      |                   |   ,------------.
      |                   |   |            |      ,-----.
      | --SOC--           `-> | Statistics | ---> | DMA |
      |                       |            |      `-----'
      |                       `------------'

The blocks named Processing is made of

      ,------------.      ,------------.      ,-------------.
      | Faulty     |      | Bayer      |      | White       |
 ---> | Pixels     | ---> | to RGB     | ---> | Balance     | ---.
      | Correction |      | Conversion |      | Correction  |    |
      `------------'      `------------'      `-------------'    |
                                                                 |
 ,---------------------------------------------------------------'
 |
 |    ,------------.      ,------------.      ,-------------.
 |    | Gamma      |      | RGB to YUV |      | Image       |
 `--> | Correction | ---> | Conversion | ---> | Enhancement | --->
      |            |      |            |      |             |
      `------------'      `------------'      `-------------'

The two flows then go through a crossbar switch.

                                ,--------.
                                |        |      ,-----.
           ,-----------.     -> | Scaler | ---> | DMA |
           |           |   ,'   |        |      `-----'
           |     ,----[2] -     `--------'
 (C) ---> [0]---'      |
           |   `,-----[3] ----> (E)
 (D) ---> [1]--'       |
           |    `-----[4] -
           |           |   `--> (F)
           `-----------'

One of the video streams went to memory through DMA. The second one enters the 
compositor through a scaler, with two additional streams coming from memory.

                  ,------------.       ,--------------.
     ,-----.      | RGB to YUV |       |              |
     | DMA | ---> | Conversion | ---> [0]             |
     `-----'      |            |       |              |
                  `------------'       |              |
                  ,--------.           |              |
     ,-----.      |        |           |  Compositor  |
     | DMA | ---> | Scaler | -------> [1]            [3] ---> (G)
     `-----'      |        |           |              |
                  `--------'           |              |
                  ,--------.           |              |
                  |        |           |              |
     (E) -------> | Scaler | -------> [2]             |
                  |        |           |              |
                  `--------'           `--------------'

The remaining video capture stream and the video display stream exit the SoC 
and are output to HDMI connectors through HDMI encoders.

          |
          |    ,---------.      ,-----------.
          |    | ADV7511 |      | HDMI      |
    (G) -[2]-> | HDMI    | ---> | Output    |
          |    | Encoder |      | Connector |
          |    `---------'      `-----------'
          |
          |    ,---------.      ,-----------.
          |    | ADV7511 |      | HDMI      |
    (F) -[3]-> | HDMI    | ---> | Output    |
          |    | Encoder |      | Connector |
  --SOC-- |    `---------'      `-----------'
          |

Every block in this diagram (including the DMA engines) corresponds to a 
device instance and has a device node in the device tree.

Note how the same RGB to YUV conversion and scaler blocks are used in both 
capture and display pipelines, how the capture and display pipelines are 
merged at the hardware level, and how the same HDMI encoder is used for one 
video output that comes from the display device (G) and one video output that 
comes straight from the capture device (F).

I wish this was an example I made up instead of real hardware I need to 
support.

-- 
Regards,

Laurent Pinchart

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 18:52                                 ` Laurent Pinchart
@ 2014-03-20 23:12                                   ` Grant Likely
  2014-03-20 23:26                                     ` Laurent Pinchart
  0 siblings, 1 reply; 74+ messages in thread
From: Grant Likely @ 2014-03-20 23:12 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Mauro Carvalho Chehab, Russell King - ARM Linux, Tomi Valkeinen,
	Philipp Zabel, Sascha Hauer, Rob Herring, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Thu, 20 Mar 2014 19:52:53 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> On Thursday 20 March 2014 18:48:16 Grant Likely wrote:
> > On Thu, 20 Mar 2014 15:38:04 -0300, Mauro Carvalho Chehab wrote:
> > > Em Thu, 20 Mar 2014 17:54:31 +0000 Grant Likely escreveu:
> > > > On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux wrote:
> > > > > On Mon, Mar 10, 2014 at 02:52:53PM +0100, Laurent Pinchart wrote:
> > > > > > In theory unidirectional links in DT are indeed enough. However,
> > > > > > let's not forget the following.
> > > > > > 
> > > > > > - There's no such thing as single start points for graphs. Sure, in
> > > > > > some simple cases the graph will have a single start point, but
> > > > > > that's not a generic rule. For instance the camera graphs
> > > > > > http://ideasonboard.org/media/omap3isp.ps and
> > > > > > http://ideasonboard.org/media/eyecam.ps have two camera sensors, and
> > > > > > thus two starting points from a data flow point of view.
> > > > > 
> > > > > I think we need to stop thinking of a graph linked in terms of data
> > > > > flow - that's really not useful.
> > > > > 
> > > > > Consider a display subsystem.  The CRTC is the primary interface for
> > > > > the CPU - this is the "most interesting" interface, it's the interface
> > > > > which provides access to the picture to be displayed for the CPU. 
> > > > > Other interfaces are secondary to that purpose - reading the I2C DDC
> > > > > bus for the display information is all secondary to the primary
> > > > > purpose of displaying a picture.
> > > > > 
> > > > > For a capture subsystem, the primary interface for the CPU is the
> > > > > frame grabber (whether it be an already encoded frame or not.)  The
> > > > > sensor devices are all secondary to that.
> > > > > 
> > > > > So, the primary software interface in each case is where the data for
> > > > > the primary purpose is transferred.  This is the point at which these
> > > > > graphs should commence since this is where we would normally start
> > > > > enumeration of the secondary interfaces.
> > > > > 
> > > > > V4L2 even provides interfaces for this: you open the capture device,
> > > > > which then allows you to enumerate the capture device's inputs, and
> > > > > this in turn allows you to enumerate their properties.  You don't open
> > > > > a particular sensor and work back up the tree.
> > > > > 
> > > > > I believe trying to do this according to the flow of data is just
> > > > > wrong. You should always describe things from the primary device for
> > > > > the CPU towards the peripheral devices and never the opposite
> > > > > direction.
> > > > 
> > > > Agreed.
> > > 
> > > I don't agree, as what's the primary device is relative.
> > > 
> > > Actually, in the case of a media data flow, the CPU is generally not
> > > the primary device.
> > > 
> > > Even on general purpose computers, if the full data flow is taken into
> > > the account, the CPU is a mere device that will just be used to copy
> > > data either to GPU and speakers or to disk, eventually doing format
> > > conversions, when the hardware is cheap and don't provide format
> > > converters.
> > > 
> > > On more complex devices, like the ones we want to solve with the
> > > media controller, like an embedded hardware like a TV or a STB, the CPU
> > > is just an ancillary component that could even hang without stopping
> > > TV reception, as the data flow can be fully done inside the chipset.
> > 
> > We're talking about wiring up device drivers here, not data flow. Yes, I
> > completely understand that data flow is often not even remotely
> > cpu-centric. However, device drivers are, and the kernel needs to know
> > the dependency graph for choosing what devices depend on other devices.
> 
> Then we might not be talking about the same thing. I'm talking about DT 
> bindings to represent the topology of the device, not how drivers are wired 
> together.

Possibly. I'm certainly confused now. You brought up the component
helpers in drivers/base/component.c, so I thought working out
dependencies is part of the purpose of this binding. Everything I've
heard so far has given me the impression that the graph binding is tied
up with knowing when all of the devices exist.

How device drivers get connected together may not strictly be a property
of hardware, but it absolutely is informed by hardware topology.

g.


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 23:12                                   ` Grant Likely
@ 2014-03-20 23:26                                     ` Laurent Pinchart
  2014-03-21  8:15                                       ` Grant Likely
  0 siblings, 1 reply; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-20 23:26 UTC (permalink / raw)
  To: Grant Likely
  Cc: Mauro Carvalho Chehab, Russell King - ARM Linux, Tomi Valkeinen,
	Philipp Zabel, Sascha Hauer, Rob Herring, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Grant,

On Thursday 20 March 2014 23:12:50 Grant Likely wrote:
> On Thu, 20 Mar 2014 19:52:53 +0100, Laurent Pinchart wrote:
> > On Thursday 20 March 2014 18:48:16 Grant Likely wrote:
> > > On Thu, 20 Mar 2014 15:38:04 -0300, Mauro Carvalho Chehab wrote:
> > > > Em Thu, 20 Mar 2014 17:54:31 +0000 Grant Likely escreveu:
> > > > > On Wed, 12 Mar 2014 10:25:56 +0000, Russell King - ARM Linux wrote:

[snip]

> > > > > > I believe trying to do this according to the flow of data is just
> > > > > > wrong. You should always describe things from the primary device
> > > > > > for the CPU towards the peripheral devices and never the opposite
> > > > > > direction.
> > > > > 
> > > > > Agreed.
> > > > 
> > > > I don't agree, as what's the primary device is relative.
> > > > 
> > > > Actually, in the case of a media data flow, the CPU is generally not
> > > > the primary device.
> > > > 
> > > > Even on general purpose computers, if the full data flow is taken into
> > > > the account, the CPU is a mere device that will just be used to copy
> > > > data either to GPU and speakers or to disk, eventually doing format
> > > > conversions, when the hardware is cheap and don't provide format
> > > > converters.
> > > > 
> > > > On more complex devices, like the ones we want to solve with the
> > > > media controller, like an embedded hardware like a TV or a STB, the
> > > > CPU is just an ancillary component that could even hang without
> > > > stopping TV reception, as the data flow can be fully done inside the
> > > > chipset.
> > > 
> > > We're talking about wiring up device drivers here, not data flow. Yes, I
> > > completely understand that data flow is often not even remotely
> > > cpu-centric. However, device drivers are, and the kernel needs to know
> > > the dependency graph for choosing what devices depend on other devices.
> > 
> > Then we might not be talking about the same thing. I'm talking about DT
> > bindings to represent the topology of the device, not how drivers are
> > wired together.
> 
> Possibly. I'm certainly confused now. You brought up the component helpers
> in drivers/base/component.c, so I thought working out dependencies is part
> of the purpose of this binding. Everything I've heard so far has given me
> the impression that the graph binding is tied up with knowing when all of
> the devices exist.

The two are related, you're of course right about that.

We're not really moving forward here. Part of our disagreement comes in my 
opinion from having different requirements and different views of the problem, 
caused by experiences with different kind of devices. This is much easier to 
solve by sitting around the same table than discussing on mailing lists. I 
would propose a meeting at the ELC but that's still a bit far away and would 
delay progress by more than one month, which is probably not acceptable.

I can reply to the e-mail where I've drawn one use case I have to deal with to 
detail my needs if that can help.

Alternatively the UK isn't that far away and I could jump in a train if you 
can provide tea for the discussion :-)

> How device drivers get connected together may not strictly be a property
> of hardware, but it absolutely is informed by hardware topology.

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 23:26                                     ` Laurent Pinchart
@ 2014-03-21  8:15                                       ` Grant Likely
  2014-03-21 12:44                                         ` Laurent Pinchart
  0 siblings, 1 reply; 74+ messages in thread
From: Grant Likely @ 2014-03-21  8:15 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Mauro Carvalho Chehab, Russell King - ARM Linux, Tomi Valkeinen,
	Philipp Zabel, Sascha Hauer, Rob Herring, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Fri, 21 Mar 2014 00:26:12 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> On Thursday 20 March 2014 23:12:50 Grant Likely wrote:
> > On Thu, 20 Mar 2014 19:52:53 +0100, Laurent Pinchart wrote:
> > > Then we might not be talking about the same thing. I'm talking about DT
> > > bindings to represent the topology of the device, not how drivers are
> > > wired together.
> > 
> > Possibly. I'm certainly confused now. You brought up the component helpers
> > in drivers/base/component.c, so I thought working out dependencies is part
> > of the purpose of this binding. Everything I've heard so far has given me
> > the impression that the graph binding is tied up with knowing when all of
> > the devices exist.
> 
> The two are related, you're of course right about that.
> 
> We're not really moving forward here. Part of our disagreement comes in my 
> opinion from having different requirements and different views of the problem, 
> caused by experiences with different kind of devices. This is much easier to 
> solve by sitting around the same table than discussing on mailing lists. I 
> would propose a meeting at the ELC but that's still a bit far away and would 
> delay progress by more than one month, which is probably not acceptable.
> 
> I can reply to the e-mail where I've drawn one use case I have to deal with to 
> detail my needs if that can help.
> 
> Alternatively the UK isn't that far away and I could jump in a train if you 
> can provide tea for the discussion :-)

I'm game for that, but it is a long train ride. I'm up in Aberdeen which
is 8 hours from London by train. Also, I'm travelling next week to
California (Collaboration summit), so it will have to be in 2 weeks
time.

Why don't we instead try a Google Hangout or a phone call today.
Anywhere between 11:30 and 14:00 GMT would work for me. I'd offer to
provide the tea, but I haven't quite perfected transporter technology
yet.

g.


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 18:49                                 ` Laurent Pinchart
@ 2014-03-21  8:20                                   ` Tomi Valkeinen
  0 siblings, 0 replies; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-21  8:20 UTC (permalink / raw)
  To: Laurent Pinchart, Russell King - ARM Linux
  Cc: Mauro Carvalho Chehab, Grant Likely, Philipp Zabel, Sascha Hauer,
	Rob Herring, Rob Herring, Sylwester Nawrocki, Kyungmin Park,
	linux-kernel, linux-media, devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 1367 bytes --]

On 20/03/14 20:49, Laurent Pinchart wrote:

>> The CPU is the _controlling_ component - it's the component that has to
>> configure the peripherals so they all talk to each other in the right
>> way.  Therefore, the view of it needs to be CPU centric.
>>
>> If we were providing a DT description for consumption by some other
>> device in the system, then the view should be as seen from that device
>> instead.
>>
>> Think about this.  Would you describe a system starting at, say, the
>> system keyboard, and branching all the way through just becuase that's
>> how you interact with it, or would you describe it from the CPUs point
>> of view because that's what has to be in control of the system.
> 
> DT has been designed to represent a control-based view of the system. It does 
> so pretty well using its tree-based model. However, it doesn't have a native 
> way to represent a flow-based graph, hence the OF graph solution we're 
> discussing. The whole point of this proposal is to represent the topology of 
> the media device, not how each entity is controlled.

I agree with Laurent here. I think this is an important point to keep in
mind. We already describe the control graph in the DT via the
parent-child relationships. There's no point in describing the same
thing again with the graph links being discussed.

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 17:23                                     ` Grant Likely
@ 2014-03-21 10:44                                       ` Andrzej Hajda
  2014-03-21 11:47                                         ` Grant Likely
  0 siblings, 1 reply; 74+ messages in thread
From: Andrzej Hajda @ 2014-03-21 10:44 UTC (permalink / raw)
  To: Grant Likely, Laurent Pinchart, Tomi Valkeinen
  Cc: Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On 03/20/2014 06:23 PM, Grant Likely wrote:
> On Tue, 11 Mar 2014 14:16:37 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
>> On Tuesday 11 March 2014 14:59:20 Tomi Valkeinen wrote:
>>> So depending on the use case, the endpoints would point to opposite
>>> direction from the encoder's point of view.
>>>
>>> And if I gathered Grant's opinion correctly (correct me if I'm wrong),
>>> he thinks things should be explicit, i.e. the bindings for, say, an
>>> encoder should state that the encoder's output endpoint _must_ contain a
>>> remote-endpoint property, whereas the encoder's input endpoint _must
>>> not_ contain a remote-endpoint property.
>>
>> Actually my understand was that DT links would have the same direction as the 
>> data flow. There would be no ambiguity in that case as the direction of the 
>> data flow is known. What happens for bidirectional data flows still need to be 
>> discussed though. And if we want to use the of-graph bindings to describe 
>> graphs without a data flow, a decision will need to be taken there too.
> 
> On further thinking, I would say linkage direction should be in the
> direction that would be considered the dependency order... I'm going to
> soften my position though. I think the generic pattern should still
> recommend unidirection links in direction of device dependency, but

I am not sure what you mean by 'device dependency' but I am sure it will
not be difficult to present problematic cases, maybe circular
dependencies, two-way dependencies, etc.

The only problem of unidirectional links from programming point of view
is that destination port/interface should be exposed using some
framework and driver of source link should grab it using the same
framework, using port/endpoint node for identification. In case of
bi-directional links the same process should happen but DT do not
dictates who should expose and who grabs.

So from programming point of view it should be easy to handle
unidirectional links regardless of the direction. So I guess the best
is to use data flow direction as it seems to be the most natural.


> I'm okay with allowing the bidirection option if the helper functions
> are modified to validate the target endpoint. I think it needs to test
> for the following:
> - Make sure the endpoint either:
>   - does not have a backlink, or
>   - the backlink points back to the origin node
> - If the target is an endpoint node, then make sure the parent doesn't
>   have a link of any kind
> - If the target is a port node, make sure it doesn't have any endpoint
>   children nodes at all.

I think link validation can be done at dts compile time.

Regards
Andrzej

> 
> g.
> 
> 


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-21 10:44                                       ` Andrzej Hajda
@ 2014-03-21 11:47                                         ` Grant Likely
  2014-03-21 12:16                                           ` Tomi Valkeinen
  0 siblings, 1 reply; 74+ messages in thread
From: Grant Likely @ 2014-03-21 11:47 UTC (permalink / raw)
  To: Andrzej Hajda, Laurent Pinchart, Tomi Valkeinen
  Cc: Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Fri, 21 Mar 2014 11:44:24 +0100, Andrzej Hajda <a.hajda@samsung.com> wrote:
> On 03/20/2014 06:23 PM, Grant Likely wrote:
> > On Tue, 11 Mar 2014 14:16:37 +0100, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
> >> On Tuesday 11 March 2014 14:59:20 Tomi Valkeinen wrote:
> >>> So depending on the use case, the endpoints would point to opposite
> >>> direction from the encoder's point of view.
> >>>
> >>> And if I gathered Grant's opinion correctly (correct me if I'm wrong),
> >>> he thinks things should be explicit, i.e. the bindings for, say, an
> >>> encoder should state that the encoder's output endpoint _must_ contain a
> >>> remote-endpoint property, whereas the encoder's input endpoint _must
> >>> not_ contain a remote-endpoint property.
> >>
> >> Actually my understand was that DT links would have the same direction as the 
> >> data flow. There would be no ambiguity in that case as the direction of the 
> >> data flow is known. What happens for bidirectional data flows still need to be 
> >> discussed though. And if we want to use the of-graph bindings to describe 
> >> graphs without a data flow, a decision will need to be taken there too.
> > 
> > On further thinking, I would say linkage direction should be in the
> > direction that would be considered the dependency order... I'm going to
> > soften my position though. I think the generic pattern should still
> > recommend unidirection links in direction of device dependency, but
> 
> I am not sure what you mean by 'device dependency' but I am sure it will
> not be difficult to present problematic cases, maybe circular
> dependencies, two-way dependencies, etc.

My understanding has been that the link data would be used determine
when the controller driver can be brought up and active. Certainly both
sides of a link need to be 'live' before the link can be used. The
kernel must have a way to resolve the question of "who starts first?",
whether it be the situation of the consumer starts before the producer,
or the situation of two components need to start before the controller
driver can start. That is the dependency chain I'm talking about.

> The only problem of unidirectional links from programming point of view
> is that destination port/interface should be exposed using some
> framework and driver of source link should grab it using the same
> framework, using port/endpoint node for identification. In case of
> bi-directional links the same process should happen but DT do not
> dictates who should expose and who grabs.
> 
> So from programming point of view it should be easy to handle
> unidirectional links regardless of the direction. So I guess the best
> is to use data flow direction as it seems to be the most natural.

right.

> 
> 
> > I'm okay with allowing the bidirection option if the helper functions
> > are modified to validate the target endpoint. I think it needs to test
> > for the following:
> > - Make sure the endpoint either:
> >   - does not have a backlink, or
> >   - the backlink points back to the origin node
> > - If the target is an endpoint node, then make sure the parent doesn't
> >   have a link of any kind
> > - If the target is a port node, make sure it doesn't have any endpoint
> >   children nodes at all.
> 
> I think link validation can be done at dts compile time.

I'm firm on the opinion that the checking must also happen at runtime.
The biggest part of my objection has been how easy it would be to get a
linkage out of sync, and dtc is not necessarily the last tool to touch
the dtb before the kernel gets booted. I want the kernel to flat out
reject any linkage that is improperly formed.

g.

> 
> Regards
> Andrzej
> 
> > 
> > g.
> > 
> > 
> 


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-20 17:01                             ` Grant Likely
@ 2014-03-21 12:10                               ` Tomi Valkeinen
  0 siblings, 0 replies; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-21 12:10 UTC (permalink / raw)
  To: Grant Likely, Laurent Pinchart
  Cc: Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 6638 bytes --]

On 20/03/14 19:01, Grant Likely wrote:

> I think depending on a generic graph walk is where I have the biggest
> concern about the design. I don't think it is a good idea for the master
> device to try a generic walk over the graph looking for other devices
> that might be components because it cannot know whether or not further
> links are relevant, or even if other endpoint nodes in the target
> hierarchy actually conform to the graph binding in the same way.
> 
> Consider the example of a system with two video controllers (one
> embedded and one discrete), a display mux, and a panel. The display
> controller depends on the mux, and the mux depends on the panel. It
> would be entirely reasonable to start up the display subsystem with the
> embedded controller without the discrete adapter being available, but
> the way the current graph pattern is proposed there is no dependency
> information between the devices.

For some reason I don't understand this master and dependency way of
thinking. I just can't twist my brain to it, please help me =).

With the setup you describe above, why does the video controller depend
on the mux, and why it is the master? Why the DMA engine does not depend
on the embedded video controller, and why is the DMA engine not the master?

With the setup above, what are we really interested in? It's the
display, right? We want to have the display working, with resolution and
video timings that work for the display. The mux and the display
controllers are just necessary evils to make the display work. The
display depends on the mux to provide it a video stream. The mux depends
on the two video controllers to provide the mux two video streams. The
video controllers depend (possibly) on SoC's DMA, or PCI bus to provide
them video data.

And if we consider the same setup as above, but the mux has two
exclusive outputs, it again works fine with the dependency I described.
If you want to enable panel 1, you'll depend on mux and video
controllers, but not on panel 2. So you can leave the panel 2 driver out
and things still work ok.

But note that I don't think this dependency has strict relation to the
DT graph representation, see below.

> I really do think the dependency direction needs to be explicit so that
> a driver knows whether or not a given link is relevant for it to start,

I think that comes implicitly from the driver, it doesn't need to be
described in the DT. If a device has an input port, and the device is
configured to use that input port, the device depends on whatever is on
the other side of the input port. The device driver must know that.

Somehow a device driver needs to find if the driver behind its input
ports are ready. It could use the links in DT directly, if they are
supplied in that direction, or it could rely on someone else parsing the
DT, and exposing the information via some API.

I think it's simpler for the SW to follow the links directly, but that
would mean having the links in the opposite direction than the data
flow, which feels a bit odd to me.

> and there must be driver know that knows how to interpret the target
> node. A device that is a master needs to know which links are
> dependencies, and which are not.

Well, again, I may not quite understand what the master means here. But
for me, the display is the master of the pipeline. The driver for the
display is the one that has to decide what kind of video signal is
acceptable, how the signal must be enabled, and disabled, etc.

When someone (the master's master =) tells the panel to enable itself,
the panel needs to use an API to configure and enable its input ports.
The devices on the other end of the input ports then configure and
enable their inputs. And so on.

Anyway, I do think this is more of a SW organization topic than how we
should describe the hardware. As I see it, the parent-child
relationships in the DT describe the control paths and the graph
describes the data paths. Having the data paths described in the
direction of data flow (or double-linked in case of bi-dir link) sounds
logical to me, but I think the inverse could work fine too.

But using some kind of CPU centric direction doesn't sound very usable,
it makes no sense for cases with peripheral-to-peripheral links, and the
control bus already describes the CPU centric direction in cases where
there exists a clear CPU-to-peripheral direction.

> I'm not even talking about the bi-directional link issue. This issue
> remains regardless of whether or not bidirectional links are used.
> 
> I would solve it in one of the following two ways:
> 
> 1) Make masters only look at one level of dependency. Make the component
> driver responsible for checking /its/ dependencies. If its dependencies
> aren't yet met, then don't register the component as ready. We could
> probably make the drivers/base/component code allow components to pull
> in additional components as required. This approach shouldn't even
> require a change to the binding and eliminates any need for walking the
> full graph.
> 
> 2) Give the master node an explicit list of all devices it depends on. I
> don't like this solution as much, but it does the job.

I'd suggest the first one (without the "masters" part, which I don't
quite follow, and presuming the component's inputs are its
dependencies...). Each device driver should only check its dependencies.
If this happens by using the DT data, then the easiest way is to have
the graph links go opposite to data-flow.

But I think it should work fine with data-flow direction also. Each
driver would register itself, including it's outputs. After that, the
components behind the outputs can find their inputs via an API. However,
that prevents us from using EPROBE_DEFER to handle missing inputs.

However, I do think it could be useful in some cases to observe the
whole pipeline, even the components that have not been loaded. It could
be used to, say, decide which modules to load when a particular output
needs to be enabled. I'm not sure how important such feature is, though.

And I don't really see why we should forbid that. If the graph bindings
are standard, we can reliably follow the links to form an image of the
whole graph. Of course, the component doing that should not look at the
properties of the ports or endpoints, as those are private to the node
in question, but it could follow the graph and find out if a node has a
driver available, and in those cases maybe get more information via an API.

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-21 11:47                                         ` Grant Likely
@ 2014-03-21 12:16                                           ` Tomi Valkeinen
  2014-03-21 14:33                                             ` Grant Likely
  0 siblings, 1 reply; 74+ messages in thread
From: Tomi Valkeinen @ 2014-03-21 12:16 UTC (permalink / raw)
  To: Grant Likely, Andrzej Hajda, Laurent Pinchart
  Cc: Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

[-- Attachment #1: Type: text/plain, Size: 798 bytes --]

On 21/03/14 13:47, Grant Likely wrote:

> I'm firm on the opinion that the checking must also happen at runtime.
> The biggest part of my objection has been how easy it would be to get a
> linkage out of sync, and dtc is not necessarily the last tool to touch
> the dtb before the kernel gets booted. I want the kernel to flat out
> reject any linkage that is improperly formed.

Isn't it trivial to verify it with the current v4l2 bindings? And
endpoint must have a 'remote-endpoint' property, and the endpoint on the
other end must have similar property, pointing in the first endpoint.
Anything else is an error.

I agree that it's easier to write bad links in the dts with
double-linking than with single-linking, but it's still trivial to
verify it in the kernel.

 Tomi



[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 901 bytes --]

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-21  8:15                                       ` Grant Likely
@ 2014-03-21 12:44                                         ` Laurent Pinchart
  2014-03-21 13:26                                           ` Grant Likely
  0 siblings, 1 reply; 74+ messages in thread
From: Laurent Pinchart @ 2014-03-21 12:44 UTC (permalink / raw)
  To: Grant Likely
  Cc: Mauro Carvalho Chehab, Russell King - ARM Linux, Tomi Valkeinen,
	Philipp Zabel, Sascha Hauer, Rob Herring, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

Hi Grant,

On Friday 21 March 2014 08:15:34 Grant Likely wrote:
> On Fri, 21 Mar 2014 00:26:12 +0100, Laurent Pinchart wrote:
> > On Thursday 20 March 2014 23:12:50 Grant Likely wrote:
> > > On Thu, 20 Mar 2014 19:52:53 +0100, Laurent Pinchart wrote:
> > > > Then we might not be talking about the same thing. I'm talking about
> > > > DT bindings to represent the topology of the device, not how drivers
> > > > are wired together.
> > > 
> > > Possibly. I'm certainly confused now. You brought up the component
> > > helpers in drivers/base/component.c, so I thought working out
> > > dependencies is part of the purpose of this binding. Everything I've
> > > heard so far has given me the impression that the graph binding is tied
> > > up with knowing when all of the devices exist.
> > 
> > The two are related, you're of course right about that.
> > 
> > We're not really moving forward here. Part of our disagreement comes in my
> > opinion from having different requirements and different views of the
> > problem, caused by experiences with different kind of devices. This is
> > much easier to solve by sitting around the same table than discussing on
> > mailing lists. I would propose a meeting at the ELC but that's still a
> > bit far away and would delay progress by more than one month, which is
> > probably not acceptable.
> > 
> > I can reply to the e-mail where I've drawn one use case I have to deal
> > with to detail my needs if that can help.
> > 
> > Alternatively the UK isn't that far away and I could jump in a train if
> > you can provide tea for the discussion :-)
> 
> I'm game for that, but it is a long train ride. I'm up in Aberdeen which
> is 8 hours from London by train. Also, I'm travelling next week to
> California (Collaboration summit), so it will have to be in 2 weeks
> time.
> 
> Why don't we instead try a Google Hangout or a phone call today.
> Anywhere between 11:30 and 14:00 GMT would work for me. I'd offer to
> provide the tea, but I haven't quite perfected transporter technology
> yet.

That would work for me, but not today I'm afraid. Will you already be in 
California on Monday ?

-- 
Regards,

Laurent Pinchart


^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-21 12:44                                         ` Laurent Pinchart
@ 2014-03-21 13:26                                           ` Grant Likely
  0 siblings, 0 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-21 13:26 UTC (permalink / raw)
  To: Laurent Pinchart
  Cc: Mauro Carvalho Chehab, Russell King - ARM Linux, Tomi Valkeinen,
	Philipp Zabel, Sascha Hauer, Rob Herring, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Fri, Mar 21, 2014 at 12:44 PM, Laurent Pinchart
<laurent.pinchart@ideasonboard.com> wrote:
> Hi Grant,
>
> On Friday 21 March 2014 08:15:34 Grant Likely wrote:
>> Why don't we instead try a Google Hangout or a phone call today.
>> Anywhere between 11:30 and 14:00 GMT would work for me. I'd offer to
>> provide the tea, but I haven't quite perfected transporter technology
>> yet.
>
> That would work for me, but not today I'm afraid. Will you already be in
> California on Monday ?

Yes. Let's try the week after.

g.

^ permalink raw reply	[flat|nested] 74+ messages in thread

* Re: [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of
  2014-03-21 12:16                                           ` Tomi Valkeinen
@ 2014-03-21 14:33                                             ` Grant Likely
  0 siblings, 0 replies; 74+ messages in thread
From: Grant Likely @ 2014-03-21 14:33 UTC (permalink / raw)
  To: Tomi Valkeinen, Andrzej Hajda, Laurent Pinchart
  Cc: Philipp Zabel, Sascha Hauer, Rob Herring,
	Russell King - ARM Linux, Mauro Carvalho Chehab, Rob Herring,
	Sylwester Nawrocki, Kyungmin Park, linux-kernel, linux-media,
	devicetree, Philipp Zabel

On Fri, 21 Mar 2014 14:16:20 +0200, Tomi Valkeinen <tomi.valkeinen@ti.com> wrote:
> On 21/03/14 13:47, Grant Likely wrote:
> 
> > I'm firm on the opinion that the checking must also happen at runtime.
> > The biggest part of my objection has been how easy it would be to get a
> > linkage out of sync, and dtc is not necessarily the last tool to touch
> > the dtb before the kernel gets booted. I want the kernel to flat out
> > reject any linkage that is improperly formed.
> 
> Isn't it trivial to verify it with the current v4l2 bindings? And
> endpoint must have a 'remote-endpoint' property, and the endpoint on the
> other end must have similar property, pointing in the first endpoint.
> Anything else is an error.
> 
> I agree that it's easier to write bad links in the dts with
> double-linking than with single-linking, but it's still trivial to
> verify it in the kernel.

Right, which is exactly what I'm asking for.

g.


^ permalink raw reply	[flat|nested] 74+ messages in thread

end of thread, other threads:[~2014-03-21 14:33 UTC | newest]

Thread overview: 74+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-02-11 11:45 [RFC PATCH] [media]: of: move graph helpers from drivers/media/v4l2-core to drivers/of Philipp Zabel
     [not found] ` < 20140320153804.35d5b835@samsung.com>
     [not found] ` < 4339286.FzhQ2m6hoA@avalon>
     [not found] ` < CAL_Jsq+U9zU1i+STLHMBjY5BeEP6djYnJVE5X1ix-D2q_zWztQ@mail.gmail.com>
     [not found]   ` < 20140217181451.7EB7FC4044D@trevor.secretlab.ca>
     [not found] ` < 20140310145815.17595C405FA@trevor.secretlab.ca>
2014-02-11 13:56 ` Rob Herring
2014-02-11 14:52   ` Russell King - ARM Linux
2014-02-11 15:23     ` Laurent Pinchart
2014-02-11 16:36       ` Philipp Zabel
2014-02-11 17:22         ` Sylwester Nawrocki
2014-02-11 17:41           ` Mauro Carvalho Chehab
2014-02-11 21:00             ` Guennadi Liakhovetski
2014-02-11 21:46           ` Philipp Zabel
2014-02-11 17:24         ` Laurent Pinchart
2014-02-11 16:30     ` Rob Herring
2014-02-11 15:27   ` Philipp Zabel
2014-02-17 18:14   ` Grant Likely
2014-02-18  7:06     ` Sascha Hauer
2014-02-18 16:26       ` Grant Likely
     [not found]         ` < 1393263389.3091.82.camel@pizza.hi.pengutronix.de>
     [not found]           ` <20140226110114. CF2C7C40A89@trevor.secretlab.ca>
2014-02-24 17:36         ` Philipp Zabel
2014-02-26 11:01           ` Grant Likely
2014-02-26 14:48             ` Philipp Zabel
2014-02-27  8:36               ` Tomi Valkeinen
2014-03-07 17:06                 ` Grant Likely
2014-03-08 10:57                   ` Tomi Valkeinen
2014-03-07 17:05               ` Grant Likely
2014-03-08 10:33                 ` Tomi Valkeinen
2014-03-08 11:41                   ` Grant Likely
2014-03-08 23:25                     ` Sylwester Nawrocki
2014-03-10 10:18                     ` Tomi Valkeinen
2014-03-10 13:52                       ` Laurent Pinchart
2014-03-10 14:10                         ` Tomi Valkeinen
2014-03-10 14:58                         ` Grant Likely
2014-03-10 15:15                           ` Laurent Pinchart
2014-03-10 15:40                             ` Philipp Zabel
2014-03-11 11:43                               ` Laurent Pinchart
2014-03-11 12:59                                 ` Tomi Valkeinen
     [not found]                                   ` <1883687.VdfitvQEN3@samsung.com>
     [not found]                                     ` <avalon@samsung.com>
2014-03-11 13:16                                   ` Laurent Pinchart
2014-03-11 13:27                                     ` Tomi Valkeinen
2014-03-11 13:44                                       ` Philipp Zabel
2014-03-20 17:23                                     ` Grant Likely
2014-03-21 10:44                                       ` Andrzej Hajda
2014-03-21 11:47                                         ` Grant Likely
2014-03-21 12:16                                           ` Tomi Valkeinen
2014-03-21 14:33                                             ` Grant Likely
2014-03-20 17:09                                 ` Grant Likely
2014-03-11 13:04                             ` Andrzej Hajda
2014-03-20 17:33                               ` Grant Likely
2014-03-20 17:01                             ` Grant Likely
2014-03-21 12:10                               ` Tomi Valkeinen
2014-03-11 15:07                           ` Philipp Zabel
2014-03-11 15:21                             ` Laurent Pinchart
2014-03-11 18:52                               ` Philipp Zabel
2014-03-20 17:53                               ` Grant Likely
2014-03-20 17:36                             ` Grant Likely
2014-03-12 10:25                         ` Russell King - ARM Linux
2014-03-12 10:47                           ` Tomi Valkeinen
2014-03-12 11:21                             ` Laurent Pinchart
2014-03-20 17:54                           ` Grant Likely
2014-03-20 18:16                             ` Laurent Pinchart
2014-03-20 18:18                               ` Russell King - ARM Linux
2014-03-20 18:50                                 ` Laurent Pinchart
2014-03-20 19:27                                 ` Laurent Pinchart
2014-03-20 18:38                             ` Mauro Carvalho Chehab
2014-03-20 18:43                               ` Russell King - ARM Linux
2014-03-20 18:49                                 ` Laurent Pinchart
2014-03-21  8:20                                   ` Tomi Valkeinen
2014-03-20 18:48                               ` Grant Likely
2014-03-20 18:52                                 ` Laurent Pinchart
2014-03-20 23:12                                   ` Grant Likely
2014-03-20 23:26                                     ` Laurent Pinchart
2014-03-21  8:15                                       ` Grant Likely
2014-03-21 12:44                                         ` Laurent Pinchart
2014-03-21 13:26                                           ` Grant Likely
2014-03-10 14:41                       ` Grant Likely
2014-03-08 11:54                 ` Russell King - ARM Linux
2014-02-18 10:43     ` Sylwester Nawrocki
2014-02-18 13:41     ` Philipp Zabel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).