linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH v2 0/7] media: introduce object detection(OD) driver
@ 2011-12-14 14:00 Ming Lei
  2011-12-14 14:00 ` [RFC PATCH v2 1/8] omap4: introduce fdif(face detect module) hwmod Ming Lei
                   ` (7 more replies)
  0 siblings, 8 replies; 24+ messages in thread
From: Ming Lei @ 2011-12-14 14:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab, Tony Lindgren
  Cc: Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media

Hi,

These v2 patches(against -next tree) introduce v4l2 based object
detection(OD) device driver, and enable face detection hardware[1]
on omap4 SoC.. The idea of implementing it on v4l2 is from from
Alan Cox, Sylwester and Greg-Kh.

For verification purpose, I write one user space utility[2] to
test the module and driver, follows its basic functions:

	- detect faces in input grayscal picture(PGM raw, 320 by 240)
	- detect faces in input y8 format video stream
	- plot a rectangle to mark the detected faces, and save it as
	another same format picture or video stream

Looks the performance of the module is not bad, see some detection
results on the link[3][4].

Face detection can be used to implement some interesting applications
(camera, face unlock, baby monitor, ...).

v2<-v1:
	- extend face detection API to object detection API
	- extend face detection generic module to object detection module
	- introduce subdevice and media entity to object detection module
	- some minor fixes

TODO:
	- implement OD setting interfaces with v4l2 controls or
	ext controls

 arch/arm/mach-omap2/devices.c              |   33 +
 arch/arm/mach-omap2/omap_hwmod_44xx_data.c |   81 +++
 drivers/media/video/Kconfig                |    6 +
 drivers/media/video/Makefile               |    2 +
 drivers/media/video/odif/Kconfig           |   13 +
 drivers/media/video/odif/Makefile          |    2 +
 drivers/media/video/odif/fdif_omap4.c      |  685 +++++++++++++++++++++
 drivers/media/video/odif/odif.c            |  890 ++++++++++++++++++++++++++++
 drivers/media/video/odif/odif.h            |  157 +++++
 drivers/media/video/v4l2-ioctl.c           |   72 +++-
 drivers/media/video/videobuf2-dma-contig.c |    1 +
 drivers/media/video/videobuf2-memops.c     |    1 -
 drivers/media/video/videobuf2-page.c       |  117 ++++
 include/linux/videodev2.h                  |  124 ++++
 include/media/v4l2-ioctl.h                 |    6 +
 include/media/videobuf2-page.h             |   20 +
 16 files changed, 2207 insertions(+), 3 deletions(-)



thanks,
--
Ming Lei

[1], Ch9 of OMAP4 Technical Reference Manual
[2], http://kernel.ubuntu.com/git?p=ming/fdif.git;a=shortlog;h=refs/heads/v4l2-fdif
[3], http://kernel.ubuntu.com/~ming/dev/fdif/output
[4], All pictures are taken from http://www.google.com/imghp
and converted to pnm from jpeg format, only for test purpose.


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [RFC PATCH v2 1/8] omap4: introduce fdif(face detect module) hwmod
  2011-12-14 14:00 [RFC PATCH v2 0/7] media: introduce object detection(OD) driver Ming Lei
@ 2011-12-14 14:00 ` Ming Lei
  2011-12-16  5:53   ` Paul Walmsley
  2011-12-14 14:00 ` [RFC PATCH v2 2/8] omap4: build fdif omap device from hwmod Ming Lei
                   ` (6 subsequent siblings)
  7 siblings, 1 reply; 24+ messages in thread
From: Ming Lei @ 2011-12-14 14:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab, Tony Lindgren
  Cc: Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media, Ming Lei

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
 arch/arm/mach-omap2/omap_hwmod_44xx_data.c |   81 ++++++++++++++++++++++++++++
 1 files changed, 81 insertions(+), 0 deletions(-)

diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 6cf21ee..30db754 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -53,6 +53,7 @@ static struct omap_hwmod omap44xx_dmm_hwmod;
 static struct omap_hwmod omap44xx_dsp_hwmod;
 static struct omap_hwmod omap44xx_dss_hwmod;
 static struct omap_hwmod omap44xx_emif_fw_hwmod;
+static struct omap_hwmod omap44xx_fdif_hwmod;
 static struct omap_hwmod omap44xx_hsi_hwmod;
 static struct omap_hwmod omap44xx_ipu_hwmod;
 static struct omap_hwmod omap44xx_iss_hwmod;
@@ -354,6 +355,14 @@ static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* fdif -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_fdif__l3_main_2 = {
+	.master		= &omap44xx_fdif_hwmod,
+	.slave		= &omap44xx_l3_main_2_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* hsi -> l3_main_2 */
 static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = {
 	.master		= &omap44xx_hsi_hwmod,
@@ -5444,6 +5453,75 @@ static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
 	.slaves_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_slaves),
 };
 
+/* 'fdif' class */
+static struct omap_hwmod_class_sysconfig omap44xx_fdif_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   MSTANDBY_FORCE | MSTANDBY_NO |
+			   MSTANDBY_SMART),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_fdif_hwmod_class = {
+	.name	= "fdif",
+	.sysc	= &omap44xx_fdif_sysc,
+};
+
+/*fdif*/
+static struct omap_hwmod_addr_space omap44xx_fdif_addrs[] = {
+	{
+		.pa_start	= 0x4a10a000,
+		.pa_end		= 0x4a10afff,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_cfg -> fdif */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__fdif = {
+	.master		= &omap44xx_l4_cfg_hwmod,
+	.slave		= &omap44xx_fdif_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_fdif_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* fdif slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_fdif_slaves[] = {
+	&omap44xx_l4_cfg__fdif,
+};
+static struct omap_hwmod_irq_info omap44xx_fdif_irqs[] = {
+	{ .irq = 69 + OMAP44XX_IRQ_GIC_START },
+	{ .irq = -1 }
+};
+
+/* fdif master ports */
+static struct omap_hwmod_ocp_if *omap44xx_fdif_masters[] = {
+	&omap44xx_fdif__l3_main_2,
+};
+
+static struct omap_hwmod omap44xx_fdif_hwmod = {
+	.name		= "fdif",
+	.class		= &omap44xx_fdif_hwmod_class,
+	.clkdm_name	= "iss_clkdm",
+	.mpu_irqs	= omap44xx_fdif_irqs,
+	.main_clk	= "fdif_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET,
+			.context_offs = OMAP4_RM_CAM_FDIF_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.slaves		= omap44xx_fdif_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_fdif_slaves),
+	.masters	= omap44xx_fdif_masters,
+	.masters_cnt	= ARRAY_SIZE(omap44xx_fdif_masters),
+};
+
 static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
 
 	/* dmm class */
@@ -5593,6 +5671,9 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
 	&omap44xx_wd_timer2_hwmod,
 	&omap44xx_wd_timer3_hwmod,
 
+	/* fdif class */
+	&omap44xx_fdif_hwmod,
+
 	NULL,
 };
 
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [RFC PATCH v2 2/8] omap4: build fdif omap device from hwmod
  2011-12-14 14:00 [RFC PATCH v2 0/7] media: introduce object detection(OD) driver Ming Lei
  2011-12-14 14:00 ` [RFC PATCH v2 1/8] omap4: introduce fdif(face detect module) hwmod Ming Lei
@ 2011-12-14 14:00 ` Ming Lei
  2011-12-14 14:00 ` [RFC PATCH v2 3/8] media: videobuf2: move out of setting pgprot_noncached from vb2_mmap_pfn_range Ming Lei
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 24+ messages in thread
From: Ming Lei @ 2011-12-14 14:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab, Tony Lindgren
  Cc: Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media, Ming Lei

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
 arch/arm/mach-omap2/devices.c |   33 +++++++++++++++++++++++++++++++++
 1 files changed, 33 insertions(+), 0 deletions(-)

diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 1166bdc..bd7f9b3 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -728,6 +728,38 @@ void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
 
 #endif
 
+static __init struct platform_device *omap4_init_fdif(void)
+{
+	struct platform_device *pd;
+	struct omap_hwmod *oh;
+	const char *dev_name = "omap-fdif";
+
+	oh = omap_hwmod_lookup("fdif");
+	if (!oh) {
+		pr_err("Could not look up fdif hwmod\n");
+		return NULL;
+	}
+
+	pd = omap_device_build(dev_name, -1, oh, NULL, 0, NULL, 0, 0);
+	WARN(IS_ERR(pd), "Can't build omap_device for %s.\n",
+				dev_name);
+	return pd;
+}
+
+static void __init omap_init_fdif(void)
+{
+	struct platform_device *pd;
+
+	if (!cpu_is_omap44xx())
+		return;
+
+	pd = omap4_init_fdif();
+	if (!pd)
+		return;
+
+	pm_runtime_enable(&pd->dev);
+}
+
 /*-------------------------------------------------------------------------*/
 
 #if defined(CONFIG_HDQ_MASTER_OMAP) || defined(CONFIG_HDQ_MASTER_OMAP_MODULE)
@@ -808,6 +840,7 @@ static int __init omap2_init_devices(void)
 	omap_init_sham();
 	omap_init_aes();
 	omap_init_vout();
+	omap_init_fdif();
 
 	return 0;
 }
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [RFC PATCH v2 3/8] media: videobuf2: move out of setting pgprot_noncached from vb2_mmap_pfn_range
  2011-12-14 14:00 [RFC PATCH v2 0/7] media: introduce object detection(OD) driver Ming Lei
  2011-12-14 14:00 ` [RFC PATCH v2 1/8] omap4: introduce fdif(face detect module) hwmod Ming Lei
  2011-12-14 14:00 ` [RFC PATCH v2 2/8] omap4: build fdif omap device from hwmod Ming Lei
@ 2011-12-14 14:00 ` Ming Lei
  2011-12-14 14:00 ` [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops Ming Lei
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 24+ messages in thread
From: Ming Lei @ 2011-12-14 14:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab, Tony Lindgren
  Cc: Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media, Ming Lei

So that we can reuse vb2_mmap_pfn_range for the coming videobuf2_page
memops.

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
 drivers/media/video/videobuf2-dma-contig.c |    1 +
 drivers/media/video/videobuf2-memops.c     |    1 -
 2 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/drivers/media/video/videobuf2-dma-contig.c b/drivers/media/video/videobuf2-dma-contig.c
index f17ad98..0ea8866 100644
--- a/drivers/media/video/videobuf2-dma-contig.c
+++ b/drivers/media/video/videobuf2-dma-contig.c
@@ -106,6 +106,7 @@ static int vb2_dma_contig_mmap(void *buf_priv, struct vm_area_struct *vma)
 		return -EINVAL;
 	}
 
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 	return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
 				  &vb2_common_vm_ops, &buf->handler);
 }
diff --git a/drivers/media/video/videobuf2-memops.c b/drivers/media/video/videobuf2-memops.c
index 71a7a78..77e0def 100644
--- a/drivers/media/video/videobuf2-memops.c
+++ b/drivers/media/video/videobuf2-memops.c
@@ -162,7 +162,6 @@ int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
 
 	size = min_t(unsigned long, vma->vm_end - vma->vm_start, size);
 
-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 	ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT,
 				size, vma->vm_page_prot);
 	if (ret) {
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops
  2011-12-14 14:00 [RFC PATCH v2 0/7] media: introduce object detection(OD) driver Ming Lei
                   ` (2 preceding siblings ...)
  2011-12-14 14:00 ` [RFC PATCH v2 3/8] media: videobuf2: move out of setting pgprot_noncached from vb2_mmap_pfn_range Ming Lei
@ 2011-12-14 14:00 ` Ming Lei
  2011-12-22  9:28   ` Marek Szyprowski
  2011-12-14 14:00 ` [RFC PATCH v2 5/8] media: v4l2-ioctl: support 64/32 compatible array parameter Ming Lei
                   ` (3 subsequent siblings)
  7 siblings, 1 reply; 24+ messages in thread
From: Ming Lei @ 2011-12-14 14:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab, Tony Lindgren
  Cc: Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media, Ming Lei

DMA contig memory resource is very limited and precious, also
accessing to it from CPU is very slow on some platform.

For some cases(such as the comming face detection driver), DMA Streaming
buffer is enough, so introduce VIDEOBUF2_PAGE to allocate continuous
physical memory but letting video device driver to handle DMA buffer mapping
and unmapping things.

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
 drivers/media/video/Kconfig          |    4 +
 drivers/media/video/Makefile         |    1 +
 drivers/media/video/videobuf2-page.c |  117 ++++++++++++++++++++++++++++++++++
 include/media/videobuf2-page.h       |   20 ++++++
 4 files changed, 142 insertions(+), 0 deletions(-)
 create mode 100644 drivers/media/video/videobuf2-page.c
 create mode 100644 include/media/videobuf2-page.h

diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 4e8a0c4..5684a00 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -60,6 +60,10 @@ config VIDEOBUF2_VMALLOC
 	select VIDEOBUF2_MEMOPS
 	tristate
 
+config VIDEOBUF2_PAGE
+	select VIDEOBUF2_CORE
+	select VIDEOBUF2_MEMOPS
+	tristate
 
 config VIDEOBUF2_DMA_SG
 	#depends on HAS_DMA
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index ddeaa6c..bc797f2 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -125,6 +125,7 @@ obj-$(CONFIG_VIDEO_BTCX)  += btcx-risc.o
 obj-$(CONFIG_VIDEOBUF2_CORE)		+= videobuf2-core.o
 obj-$(CONFIG_VIDEOBUF2_MEMOPS)		+= videobuf2-memops.o
 obj-$(CONFIG_VIDEOBUF2_VMALLOC)		+= videobuf2-vmalloc.o
+obj-$(CONFIG_VIDEOBUF2_PAGE)		+= videobuf2-page.o
 obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG)	+= videobuf2-dma-contig.o
 obj-$(CONFIG_VIDEOBUF2_DMA_SG)		+= videobuf2-dma-sg.o
 
diff --git a/drivers/media/video/videobuf2-page.c b/drivers/media/video/videobuf2-page.c
new file mode 100644
index 0000000..6a24a34
--- /dev/null
+++ b/drivers/media/video/videobuf2-page.c
@@ -0,0 +1,117 @@
+/*
+ * videobuf2-page.c - page memory allocator for videobuf2
+ *
+ * Copyright (C) 2011 Canonical Ltd.
+ *
+ * Author: Ming Lei <ming.lei@canonical.com>
+ *
+ * This file is based on videobuf2-vmalloc.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_page_buf {
+	void				*vaddr;
+	unsigned long			size;
+	atomic_t			refcount;
+	struct vb2_vmarea_handler	handler;
+};
+
+static void vb2_page_put(void *buf_priv);
+
+static void *vb2_page_alloc(void *alloc_ctx, unsigned long size)
+{
+	struct vb2_page_buf *buf;
+
+	buf = kzalloc(sizeof *buf, GFP_KERNEL);
+	if (!buf)
+		return NULL;
+
+	buf->size = size;
+	buf->vaddr = (void *)__get_free_pages(GFP_KERNEL,
+			get_order(buf->size));
+	buf->handler.refcount = &buf->refcount;
+	buf->handler.put = vb2_page_put;
+	buf->handler.arg = buf;
+
+	if (!buf->vaddr) {
+		printk(KERN_ERR "page of size %ld failed\n", buf->size);
+		kfree(buf);
+		return NULL;
+	}
+
+	atomic_inc(&buf->refcount);
+	printk(KERN_DEBUG "Allocated page buffer of size %ld at vaddr=%p\n",
+			buf->size, buf->vaddr);
+
+	return buf;
+}
+
+static void vb2_page_put(void *buf_priv)
+{
+	struct vb2_page_buf *buf = buf_priv;
+
+	if (atomic_dec_and_test(&buf->refcount)) {
+		printk(KERN_DEBUG "%s: Freeing page mem at vaddr=%p\n",
+			__func__, buf->vaddr);
+		free_pages((unsigned long)buf->vaddr, get_order(buf->size));
+		kfree(buf);
+	}
+}
+
+static void *vb2_page_vaddr(void *buf_priv)
+{
+	struct vb2_page_buf *buf = buf_priv;
+
+	BUG_ON(!buf);
+
+	if (!buf->vaddr) {
+		printk(KERN_ERR "Address of an unallocated plane requested\n");
+		return NULL;
+	}
+
+	return buf->vaddr;
+}
+
+static unsigned int vb2_page_num_users(void *buf_priv)
+{
+	struct vb2_page_buf *buf = buf_priv;
+	return atomic_read(&buf->refcount);
+}
+
+static int vb2_page_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+	struct vb2_page_buf *buf = buf_priv;
+
+	if (!buf) {
+		printk(KERN_ERR "No memory to map\n");
+		return -EINVAL;
+	}
+
+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	return vb2_mmap_pfn_range(vma, virt_to_phys(buf->vaddr),
+			buf->size, &vb2_common_vm_ops,
+			&buf->handler);
+}
+
+const struct vb2_mem_ops vb2_page_memops = {
+	.alloc		= vb2_page_alloc,
+	.put		= vb2_page_put,
+	.vaddr		= vb2_page_vaddr,
+	.mmap		= vb2_page_mmap,
+	.num_users	= vb2_page_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_page_memops);
+
+MODULE_DESCRIPTION("page memory handling routines for videobuf2");
+MODULE_AUTHOR("Ming Lei");
+MODULE_LICENSE("GPL");
diff --git a/include/media/videobuf2-page.h b/include/media/videobuf2-page.h
new file mode 100644
index 0000000..c837456
--- /dev/null
+++ b/include/media/videobuf2-page.h
@@ -0,0 +1,20 @@
+/*
+ * videobuf2-page.h - page memory allocator for videobuf2
+ *
+ * Copyright (C) 2011 Canonical Ltd.
+ *
+ * Author: Ming Lei <ming.lei@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _MEDIA_VIDEOBUF2_PAGE_H
+#define _MEDIA_VIDEOBUF2_PAGE_H
+
+#include <media/videobuf2-core.h>
+
+extern const struct vb2_mem_ops vb2_page_memops;
+
+#endif
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [RFC PATCH v2 5/8] media: v4l2-ioctl: support 64/32 compatible array parameter
  2011-12-14 14:00 [RFC PATCH v2 0/7] media: introduce object detection(OD) driver Ming Lei
                   ` (3 preceding siblings ...)
  2011-12-14 14:00 ` [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops Ming Lei
@ 2011-12-14 14:00 ` Ming Lei
  2011-12-14 14:00 ` [RFC PATCH v2 6/8] media: v4l2: introduce two IOCTLs for object detection Ming Lei
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 24+ messages in thread
From: Ming Lei @ 2011-12-14 14:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab, Tony Lindgren
  Cc: Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media, Ming Lei, Arnd Bergmann

This patch supports to handle 64/32 compatible array parameter,
such as below:

	struct v4l2_fd_result {
	       __u32   buf_index;
	       __u32   face_cnt;
	       __u32   reserved[6];
	       struct v4l2_fd_detection fd[];
	};

With this patch, the pointer to user space array needn't be passed
to kernel any more.

Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
 drivers/media/video/v4l2-ioctl.c |   33 +++++++++++++++++++++++++++++++--
 1 files changed, 31 insertions(+), 2 deletions(-)

diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index e1da8fc..ded8b72 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -2239,6 +2239,11 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
 	return ret;
 }
 
+static int is_64_32_array_args(unsigned int cmd, void *parg, int *extra_len)
+{
+	return 0;
+}
+
 long
 video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
 	       v4l2_kioctl func)
@@ -2251,6 +2256,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
 	size_t  array_size = 0;
 	void __user *user_ptr = NULL;
 	void	**kernel_ptr = NULL;
+	int	extra = 0;
 
 	/*  Copy arguments into temp kernel buffer  */
 	if (_IOC_DIR(cmd) != _IOC_NONE) {
@@ -2280,9 +2286,32 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
 		}
 	}
 
-	err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
+	if (is_64_32_array_args(cmd, parg, &extra)) {
+		int size;
+		void *old_mbuf;
+
+		err = 0;
+		if (!extra)
+			goto handle_array_args;
+		old_mbuf = mbuf;
+		size = extra + _IOC_SIZE(cmd);
+		mbuf = kmalloc(size, GFP_KERNEL);
+		if (NULL == mbuf) {
+			mbuf = old_mbuf;
+			err = -ENOMEM;
+			goto out;
+		}
+		memcpy(mbuf, parg, _IOC_SIZE(cmd));
+		parg = mbuf;
+		kfree(old_mbuf);
+	} else {
+		err = check_array_args(cmd, parg, &array_size,
+				&user_ptr, &kernel_ptr);
+	}
+
 	if (err < 0)
 		goto out;
+handle_array_args:
 	has_array_args = err;
 
 	if (has_array_args) {
@@ -2321,7 +2350,7 @@ out_array_args:
 	switch (_IOC_DIR(cmd)) {
 	case _IOC_READ:
 	case (_IOC_WRITE | _IOC_READ):
-		if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
+		if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd) + extra))
 			err = -EFAULT;
 		break;
 	}
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [RFC PATCH v2 6/8] media: v4l2: introduce two IOCTLs for object detection
  2011-12-14 14:00 [RFC PATCH v2 0/7] media: introduce object detection(OD) driver Ming Lei
                   ` (4 preceding siblings ...)
  2011-12-14 14:00 ` [RFC PATCH v2 5/8] media: v4l2-ioctl: support 64/32 compatible array parameter Ming Lei
@ 2011-12-14 14:00 ` Ming Lei
  2012-01-13 21:16   ` Sylwester Nawrocki
  2011-12-14 14:00 ` [RFC PATCH v2 7/8] media: video: introduce object detection driver module Ming Lei
  2011-12-14 14:00 ` [RFC PATCH v2 8/8] media: video: introduce omap4 face detection module driver Ming Lei
  7 siblings, 1 reply; 24+ messages in thread
From: Ming Lei @ 2011-12-14 14:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab, Tony Lindgren
  Cc: Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media, Ming Lei

This patch introduces two new IOCTLs and related data
structure which will be used by the coming video device
with object detect capability.

The two IOCTLs and related data structure will be used by
user space application to retrieve the results of object
detection.

The utility fdif[1] is useing the two IOCTLs to find
objects(faces) deteced in raw images or video streams.

[1],http://kernel.ubuntu.com/git?p=ming/fdif.git;a=shortlog;h=refs/heads/v4l2-fdif

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
v2:
	- extend face detection API to object detection API
	- introduce capability of V4L2_CAP_OBJ_DETECTION for object detection
	- 32/64 safe array parameter
---
 drivers/media/video/v4l2-ioctl.c |   41 ++++++++++++-
 include/linux/videodev2.h        |  124 ++++++++++++++++++++++++++++++++++++++
 include/media/v4l2-ioctl.h       |    6 ++
 3 files changed, 170 insertions(+), 1 deletions(-)

diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index ded8b72..575d445 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -2140,6 +2140,30 @@ static long __video_do_ioctl(struct file *file,
 		dbgarg(cmd, "index=%d", b->index);
 		break;
 	}
+	case VIDIOC_G_OD_RESULT:
+	{
+		struct v4l2_od_result *or = arg;
+
+		if (!ops->vidioc_g_od_result)
+			break;
+
+		ret = ops->vidioc_g_od_result(file, fh, or);
+
+		dbgarg(cmd, "index=%d", or->frm_seq);
+		break;
+	}
+	case VIDIOC_G_OD_COUNT:
+	{
+		struct v4l2_od_count *oc = arg;
+
+		if (!ops->vidioc_g_od_count)
+			break;
+
+		ret = ops->vidioc_g_od_count(file, fh, oc);
+
+		dbgarg(cmd, "index=%d", oc->frm_seq);
+		break;
+	}
 	default:
 		if (!ops->vidioc_default)
 			break;
@@ -2241,7 +2265,22 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
 
 static int is_64_32_array_args(unsigned int cmd, void *parg, int *extra_len)
 {
-	return 0;
+	int ret = 0;
+
+	switch (cmd) {
+	case VIDIOC_G_OD_RESULT: {
+		struct v4l2_od_result *or = parg;
+
+		*extra_len = or->obj_cnt *
+			sizeof(struct v4l2_od_object);
+		ret = 1;
+		break;
+	}
+	default:
+		break;
+	}
+
+	return ret;
 }
 
 long
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 4b752d5..c08ceaf 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -270,6 +270,9 @@ struct v4l2_capability {
 #define V4L2_CAP_RADIO			0x00040000  /* is a radio device */
 #define V4L2_CAP_MODULATOR		0x00080000  /* has a modulator */
 
+/* The device has capability of object detection */
+#define V4L2_CAP_OBJ_DETECTION		0x00100000
+
 #define V4L2_CAP_READWRITE              0x01000000  /* read/write systemcalls */
 #define V4L2_CAP_ASYNCIO                0x02000000  /* async I/O */
 #define V4L2_CAP_STREAMING              0x04000000  /* streaming I/O ioctls */
@@ -2160,6 +2163,125 @@ struct v4l2_create_buffers {
 	__u32			reserved[8];
 };
 
+/**
+ * struct v4l2_od_obj_desc
+ * @centerx:	return, position in x direction of detected object
+ * @centery:	return, position in y direction of detected object
+ * @sizex:	return, size in x direction of detected object
+ * @sizey:	return, size in y direction of detected object
+ * @angle:	return, angle of detected object
+ * 		0 deg ~ 359 deg, vertical is 0 deg, clockwise
+ * @reserved:	future extensions
+ */
+struct v4l2_od_obj_desc {
+	__u16		centerx;
+	__u16		centery;
+	__u16		sizex;
+	__u16		sizey;
+	__u16		angle;
+	__u16		reserved[5];
+};
+
+/**
+ * struct v4l2_od_face_desc
+ * @id:		return, used to be associated with detected eyes, mouth,
+ * 		and other objects inside this face, and each face in one
+ * 		frame has a unique id, start from 1
+ * @smile_level:return, smile level of the face
+ * @f:		return, face description
+ */
+struct v4l2_od_face_desc {
+	__u16	id;
+	__u8	smile_level;
+	__u8    reserved[15];
+
+	struct v4l2_od_obj_desc	f;
+};
+
+/**
+ * struct v4l2_od_eye_desc
+ * @face_id:	return, used to associate with which face, 0 means
+ * 		no face associated with the eye
+ * @blink_level:return, blink level of the eye
+ * @e:		return, eye description
+ */
+struct v4l2_od_eye_desc {
+	__u16	face_id;
+	__u8	blink_level;
+	__u8    reserved[15];
+
+	struct v4l2_od_obj_desc	e;
+};
+
+/**
+ * struct v4l2_od_mouth_desc
+ * @face_id:	return, used to associate with which face, 0 means
+ * 		no face associated with the mouth
+ * @m:		return, mouth description
+ */
+struct v4l2_od_mouth_desc {
+	__u16	face_id;
+	__u8    reserved[16];
+
+	struct v4l2_od_obj_desc	m;
+};
+
+enum v4l2_od_type {
+	V4L2_OD_TYPE_FACE		= 1,
+	V4L2_OD_TYPE_LEFT_EYE		= 2,
+	V4L2_OD_TYPE_RIGHT_EYE		= 3,
+	V4L2_OD_TYPE_MOUTH		= 4,
+	V4L2_OD_TYPE_USER_DEFINED	= 255,
+	V4L2_OD_TYPE_MAX_CNT		= 256,
+};
+
+/**
+ * struct v4l2_od_object
+ * @type:	return, type of detected object
+ * @confidence:	return, confidence level of detection result
+ * 		0: the heighest level, 100: the lowest level
+ * @face:	return, detected face object description
+ * @eye:	return, detected eye object description
+ * @mouth:	return, detected mouth object description
+ * @rawdata:	return, user defined data
+ */
+struct v4l2_od_object {
+	enum v4l2_od_type	type;
+	__u16			confidence;
+	union {
+		struct v4l2_od_face_desc face;
+		struct v4l2_od_face_desc eye;
+		struct v4l2_od_face_desc mouth;
+		__u8	rawdata[60];
+	} o;
+};
+
+/**
+ * struct v4l2_od_result - VIDIOC_G_OD_RESULT argument
+ * @frm_seq:	entry, frame sequence No.
+ * @obj_cnt:	return, how many objects detected in frame @frame_seq
+ * @reserved:	reserved for future use
+ * @od:		return, result of detected objects in frame @frame_seq
+ */
+struct v4l2_od_result {
+	__u32			frm_seq;
+	__u32			obj_cnt;
+	__u32			reserved[6];
+	struct v4l2_od_object	od[0];
+};
+
+/**
+ * struct v4l2_od_count - VIDIOC_G_OD_COUNT argument
+ * @frm_seq:	entry, frame sequence No. for ojbect detection
+ * @obj_cnt:	return, how many objects detected from the @frm_seq
+ * @reserved:	reserved for future useage.
+ */
+struct v4l2_od_count {
+	__u32	frm_seq;
+	__u32	obj_cnt;
+	__u32	reserved[6];
+};
+
 /*
  *	I O C T L   C O D E S   F O R   V I D E O   D E V I C E S
  *
@@ -2254,6 +2376,8 @@ struct v4l2_create_buffers {
    versions */
 #define VIDIOC_CREATE_BUFS	_IOWR('V', 92, struct v4l2_create_buffers)
 #define VIDIOC_PREPARE_BUF	_IOWR('V', 93, struct v4l2_buffer)
+#define VIDIOC_G_OD_COUNT	_IOWR('V', 94, struct v4l2_od_count)
+#define VIDIOC_G_OD_RESULT	_IOWR('V', 95, struct v4l2_od_result)
 
 /* Reminder: when adding new ioctls please add support for them to
    drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 4d1c74a..81a32a3 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -270,6 +270,12 @@ struct v4l2_ioctl_ops {
 	int (*vidioc_unsubscribe_event)(struct v4l2_fh *fh,
 					struct v4l2_event_subscription *sub);
 
+	/* object detect IOCTLs */
+	int (*vidioc_g_od_count) (struct file *file, void *fh,
+					struct v4l2_od_count *arg);
+	int (*vidioc_g_od_result) (struct file *file, void *fh,
+					struct v4l2_od_result *arg);
+
 	/* For other private ioctls */
 	long (*vidioc_default)	       (struct file *file, void *fh,
 					bool valid_prio, int cmd, void *arg);
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [RFC PATCH v2 7/8] media: video: introduce object detection driver module
  2011-12-14 14:00 [RFC PATCH v2 0/7] media: introduce object detection(OD) driver Ming Lei
                   ` (5 preceding siblings ...)
  2011-12-14 14:00 ` [RFC PATCH v2 6/8] media: v4l2: introduce two IOCTLs for object detection Ming Lei
@ 2011-12-14 14:00 ` Ming Lei
  2011-12-29 17:16   ` Sylwester Nawrocki
  2011-12-14 14:00 ` [RFC PATCH v2 8/8] media: video: introduce omap4 face detection module driver Ming Lei
  7 siblings, 1 reply; 24+ messages in thread
From: Ming Lei @ 2011-12-14 14:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab, Tony Lindgren
  Cc: Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media, Ming Lei

This patch introduces object detection generic driver.

The driver is responsible for all v4l2 stuff, buffer management
and other general things, and doesn't touch object detection hardware
directly. Several interfaces are exported to low level drivers
(such as the coming omap4 FD driver) which will communicate with
object detection hw module.

So the driver will make driving object detection hw modules more
easy.

TODO:
	- implement object detection setting interfaces with v4l2
	controls or ext controls

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
v2:
	- extend face detection driver to object detection driver
	- introduce subdevice and media entity
	- provide support to detect object from media HW
---
 drivers/media/video/Kconfig       |    2 +
 drivers/media/video/Makefile      |    1 +
 drivers/media/video/odif/Kconfig  |    7 +
 drivers/media/video/odif/Makefile |    1 +
 drivers/media/video/odif/odif.c   |  890 +++++++++++++++++++++++++++++++++++++
 drivers/media/video/odif/odif.h   |  157 +++++++
 6 files changed, 1058 insertions(+), 0 deletions(-)
 create mode 100644 drivers/media/video/odif/Kconfig
 create mode 100644 drivers/media/video/odif/Makefile
 create mode 100644 drivers/media/video/odif/odif.c
 create mode 100644 drivers/media/video/odif/odif.h

diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 5684a00..8740ee9 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -1166,3 +1166,5 @@ config VIDEO_SAMSUNG_S5P_MFC
 	    MFC 5.1 driver for V4L2.
 
 endif # V4L_MEM2MEM_DRIVERS
+
+source "drivers/media/video/odif/Kconfig"
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index bc797f2..259c8d8 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -197,6 +197,7 @@ obj-$(CONFIG_VIDEO_IR_I2C)  += ir-kbd-i2c.o
 obj-y	+= davinci/
 
 obj-$(CONFIG_ARCH_OMAP)	+= omap/
+obj-$(CONFIG_ODIF)	+= odif/
 
 ccflags-y += -Idrivers/media/dvb/dvb-core
 ccflags-y += -Idrivers/media/dvb/frontends
diff --git a/drivers/media/video/odif/Kconfig b/drivers/media/video/odif/Kconfig
new file mode 100644
index 0000000..5090bd6
--- /dev/null
+++ b/drivers/media/video/odif/Kconfig
@@ -0,0 +1,7 @@
+config ODIF
+	depends on VIDEO_DEV && VIDEO_V4L2
+	select VIDEOBUF2_PAGE
+	tristate "Object Detection module"
+	help
+	  The ODIF is a object detection module, which can be integrated into
+	  some SoCs to detect objects in images or video.
diff --git a/drivers/media/video/odif/Makefile b/drivers/media/video/odif/Makefile
new file mode 100644
index 0000000..a55ff66
--- /dev/null
+++ b/drivers/media/video/odif/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ODIF)		+= odif.o
diff --git a/drivers/media/video/odif/odif.c b/drivers/media/video/odif/odif.c
new file mode 100644
index 0000000..381ab9d
--- /dev/null
+++ b/drivers/media/video/odif/odif.c
@@ -0,0 +1,890 @@
+/*
+ *      odif.c  --  object detection module driver
+ *
+ *      Copyright (C) 2011  Ming Lei (ming.lei@canonical.com)
+ *
+ *	This file is based on drivers/media/video/vivi.c.
+ *
+ *      This program is free software; you can redistribute it and/or modify
+ *      it under the terms of the GNU General Public License as published by
+ *      the Free Software Foundation; either version 2 of the License, or
+ *      (at your option) any later version.
+ *
+ *      This program is distributed in the hope that it will be useful,
+ *      but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *      GNU General Public License for more details.
+ *
+ *      You should have received a copy of the GNU General Public License
+ *      along with this program; if not, write to the Free Software
+ *      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+/*****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/signal.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/mman.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include "odif.h"
+
+#define	input_from_user(dev) \
+	(dev->input == OD_INPUT_FROM_USER_SPACE)
+
+#define	DEFAULT_PENDING_RESULT_CNT	8
+
+static unsigned debug = 0;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activates debug info");
+
+static unsigned result_cnt_threshold = DEFAULT_PENDING_RESULT_CNT;
+module_param(result_cnt_threshold, uint, 0644);
+MODULE_PARM_DESC(result_cnt, "max pending result count");
+
+static LIST_HEAD(odif_devlist);
+static unsigned video_nr = -1;
+
+int odif_open(struct file *file)
+{
+	struct odif_dev *dev = video_drvdata(file);
+
+	kref_get(&dev->ref);
+	return v4l2_fh_open(file);
+}
+
+static unsigned int
+odif_poll(struct file *file, struct poll_table_struct *wait)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	unsigned int mask = 0;
+	unsigned long flags;
+
+	poll_wait(file, &dev->odif_dq.wq, wait);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if ((file->f_mode & FMODE_READ) &&
+		!list_empty(&dev->odif_dq.complete))
+		mask |= POLLIN | POLLWRNORM;
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return mask;
+}
+
+static int odif_close(struct file *file)
+{
+	struct video_device  *vdev = video_devdata(file);
+	struct odif_dev *dev = video_drvdata(file);
+	int ret;
+
+	dprintk(dev, 1, "close called (dev=%s), file %p\n",
+		video_device_node_name(vdev), file);
+
+	if (v4l2_fh_is_singular_file(file))
+		vb2_queue_release(&dev->vbq);
+
+	ret = v4l2_fh_release(file);
+	kref_put(&dev->ref, odif_release);
+
+	return ret;
+}
+
+static int odif_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	int ret;
+
+	dprintk(dev, 1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
+
+	if (!input_from_user(dev))
+		return -EBUSY;
+
+	ret = vb2_mmap(&dev->vbq, vma);
+	dprintk(dev, 1, "vma start=0x%08lx, size=%ld, ret=%d\n",
+		(unsigned long)vma->vm_start,
+		(unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
+		ret);
+	return ret;
+}
+
+static const struct v4l2_file_operations odif_fops = {
+	.owner		= THIS_MODULE,
+	.open           = odif_open,
+	.release        = odif_close,
+	.poll		= odif_poll,
+	.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+	.mmap           = odif_mmap,
+};
+
+/* ------------------------------------------------------------------
+	IOCTL vidioc handling
+   ------------------------------------------------------------------*/
+static void free_result(struct v4l2_odif_result *result)
+{
+	kfree(result);
+}
+
+static int odif_start_detect(struct odif_dev *dev)
+{
+	int ret;
+
+	dprintk(dev, 1, "%s\n", __func__);
+
+	ret = dev->ops->start_detect(dev);
+
+	dprintk(dev, 1, "returning from %s, ret is %d\n",
+			__func__, ret);
+	return ret;
+}
+
+static void odif_stop_detect(struct odif_dev *dev)
+{
+	struct odif_dmaqueue *dma_q = &dev->odif_dq;
+	unsigned long flags;
+
+	dprintk(dev, 1, "%s\n", __func__);
+
+	/*stop hardware first*/
+	dev->ops->stop_detect(dev);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	/* Release all active buffers */
+	while (!list_empty(&dma_q->active)) {
+		struct odif_buffer *buf;
+		buf = list_entry(dma_q->active.next, struct odif_buffer, list);
+		list_del(&buf->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+		dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
+		spin_lock_irqsave(&dev->lock, flags);
+	}
+
+	/* Release all complete detect result, so user space __must__ read
+	 * the results before stream off*/
+	while (!list_empty(&dma_q->complete)) {
+		struct v4l2_odif_result *result;
+		result = list_entry(dma_q->complete.next, struct v4l2_odif_result, list);
+		list_del(&result->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		free_result(result);
+		dprintk(dev, 2, "[frm_seq:%d] result removed\n", result->frm_seq);
+		spin_lock_irqsave(&dev->lock, flags);
+	}
+	atomic_set(&dma_q->complete_cnt, 0);
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+static int vidioc_querycap(struct file *file, void  *priv,
+					struct v4l2_capability *cap)
+{
+	struct odif_dev *dev = video_drvdata(file);
+
+	strcpy(cap->driver, "odif");
+	strcpy(cap->card, "odif");
+	strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
+	cap->capabilities = dev->ops->capability | V4L2_CAP_OBJ_DETECTION;
+
+	return 0;
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void  *priv,
+					struct v4l2_fmtdesc *f)
+{
+	struct odif_fmt *fmt;
+	struct odif_dev *dev = video_drvdata(file);
+
+	if (f->index >= dev->ops->fmt_cnt) {
+		return -EINVAL;
+	}
+
+	fmt = &dev->ops->table[f->index];
+
+	strlcpy(f->description, fmt->name, sizeof(f->description));
+	f->pixelformat = fmt->fourcc;
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
+					struct v4l2_format *f)
+{
+	struct odif_dev *dev = video_drvdata(file);
+
+	f->fmt.pix.width        = dev->s.width;
+	f->fmt.pix.height       = dev->s.height;
+	f->fmt.pix.field        = dev->s.field;
+	f->fmt.pix.pixelformat  = dev->s.fmt->fourcc;
+	f->fmt.pix.bytesperline =
+		(f->fmt.pix.width * dev->s.fmt->depth) >> 3;
+	f->fmt.pix.sizeimage =
+		f->fmt.pix.height * f->fmt.pix.bytesperline;
+	return 0;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+			  struct v4l2_requestbuffers *p)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	dprintk(dev, 1, "%s\n", __func__);
+
+	if (!input_from_user(dev))
+		return -EBUSY;
+
+	return vb2_reqbufs(&dev->vbq, p);
+}
+
+static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	dprintk(dev, 1, "%s\n", __func__);
+
+	if (!input_from_user(dev))
+		return -EBUSY;
+
+	return vb2_querybuf(&dev->vbq, p);
+}
+
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	dprintk(dev, 1, "%s\n", __func__);
+
+	if (!input_from_user(dev))
+		return -EBUSY;
+
+	return vb2_qbuf(&dev->vbq, p);
+}
+
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	dprintk(dev, 1, "%s\n", __func__);
+
+	if (!input_from_user(dev))
+		return -EBUSY;
+
+	return vb2_dqbuf(&dev->vbq, p, file->f_flags & O_NONBLOCK);
+}
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	dprintk(dev, 1, "%s\n", __func__);
+
+	dev->working = 1;
+
+	if (input_from_user(dev))
+		return vb2_streamon(&dev->vbq, i);
+
+	return odif_start_detect(dev);
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	dprintk(dev, 1, "%s\n", __func__);
+
+	dev->working = 0;
+
+	if (input_from_user(dev))
+		return vb2_streamoff(&dev->vbq, i);
+
+	odif_stop_detect(dev);
+
+	return 0;
+}
+
+static int vidioc_g_od_count(struct file *file, void *priv,
+					struct v4l2_od_count *f)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	unsigned long flags;
+	struct v4l2_odif_result *tmp;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_entry(tmp, &dev->odif_dq.complete, list)
+		if (tmp->frm_seq == f->frm_seq) {
+			f->obj_cnt = tmp->obj_cnt;
+			ret = 0;
+			break;
+		}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return ret;
+}
+
+static int vidioc_g_od_result(struct file *file, void *priv,
+					struct v4l2_od_result *f)
+{
+	struct odif_dev *dev = video_drvdata(file);
+	unsigned long flags;
+	struct v4l2_odif_result *tmp;
+	struct v4l2_odif_result *fr = NULL;
+	unsigned int cnt = 0;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_entry(tmp, &dev->odif_dq.complete, list)
+		if (tmp->frm_seq == f->frm_seq) {
+			fr = tmp;
+			list_del(&tmp->list);
+			break;
+		}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (fr) {
+		ret = 0;
+		cnt = min(f->obj_cnt, fr->obj_cnt);
+		if (cnt)
+			memcpy(f->od, fr->objs,
+				sizeof(struct v4l2_od_object) * cnt);
+		f->obj_cnt = cnt;
+
+		free_result(fr);
+
+		atomic_dec(&dev->odif_dq.complete_cnt);
+	}
+	return ret;
+}
+
+static const struct v4l2_ioctl_ops odif_ioctl_ops = {
+	.vidioc_querycap      = vidioc_querycap,
+	.vidioc_enum_fmt_vid_out  = vidioc_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_out     = vidioc_g_fmt_vid_out,
+	.vidioc_reqbufs       = vidioc_reqbufs,
+	.vidioc_querybuf      = vidioc_querybuf,
+	.vidioc_qbuf          = vidioc_qbuf,
+	.vidioc_dqbuf         = vidioc_dqbuf,
+	.vidioc_streamon      = vidioc_streamon,
+	.vidioc_streamoff     = vidioc_streamoff,
+	.vidioc_g_od_count    = vidioc_g_od_count,
+	.vidioc_g_od_result   = vidioc_g_od_result,
+};
+
+static void odif_vdev_release(struct video_device *vdev)
+{
+	kfree(vdev->lock);
+	video_device_release(vdev);
+}
+
+static struct video_device odif_template = {
+	.name		= "odif",
+	.fops           = &odif_fops,
+	.ioctl_ops 	= &odif_ioctl_ops,
+	.release	= odif_vdev_release,
+};
+
+
+/* ------------------------------------------------------------------
+	Videobuf operations
+   ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[], void *alloc_ctxs[])
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vq);
+	unsigned long size;
+
+	dprintk(dev, 1, "%s\n", __func__);
+
+	BUG_ON(!dev->s.fmt);
+	size = (dev->s.width * dev->s.height * dev->s.fmt->depth) >> 3;
+
+	if (0 == *nbuffers)
+		*nbuffers = 2;
+	*nplanes = 1;
+	sizes[0] = size;
+
+	dprintk(dev, 1, "%s, count=%d, size=%ld\n", __func__,
+		*nbuffers, size);
+
+	return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+	/*
+	 * This callback is called once per buffer, after its allocation.
+	 *
+	 * Vivi does not allow changing format during streaming, but it is
+	 * possible to do so when streaming is paused (i.e. in streamoff state).
+	 * Buffers however are not freed when going into streamoff and so
+	 * buffer size verification has to be done in buffer_prepare, on each
+	 * qbuf.
+	 * It would be best to move verification code here to buf_init and
+	 * s_fmt though.
+	 */
+	dprintk(dev, 1, "%s vaddr=%p\n", __func__,
+			vb2_plane_vaddr(vb, 0));
+
+	return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct odif_buffer *buf = container_of(vb, struct odif_buffer, vb);
+	unsigned long size;
+
+	dprintk(dev, 1, "%s, field=%d\n", __func__, vb->v4l2_buf.field);
+
+	BUG_ON(!dev->s.fmt);
+	size = (dev->s.width * dev->s.height * dev->s.fmt->depth) >> 3;
+	if (vb2_plane_size(vb, 0) < size) {
+		dprintk(dev, 1, "%s data will not fit into plane (%lu < %lu)\n",
+				__func__, vb2_plane_size(vb, 0), size);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(&buf->vb, 0, size);
+
+	return 0;
+}
+
+static int buffer_finish(struct vb2_buffer *vb)
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+	dprintk(dev, 1, "%s\n", __func__);
+	return 0;
+}
+
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+	dprintk(dev, 1, "%s\n", __func__);
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+	struct odif_buffer *buf = container_of(vb, struct odif_buffer, vb);
+	struct odif_dmaqueue *dq = &dev->odif_dq;
+	unsigned long flags = 0;
+
+	dprintk(dev, 1, "%s vaddr:%p\n", __func__,
+			vb2_plane_vaddr(vb, 0));
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&buf->list, &dq->active);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (vb->vb2_queue->streaming)
+		dev->ops->submit_detect(dev);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vq);
+	dprintk(dev, 1, "%s\n", __func__);
+	return odif_start_detect(dev);
+}
+
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vq);
+	dprintk(dev, 1, "%s\n", __func__);
+	odif_stop_detect(dev);
+	return 0;
+}
+
+static void odif_lock(struct vb2_queue *vq)
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vq);
+
+	mutex_lock(&dev->mutex);
+}
+
+static void odif_unlock(struct vb2_queue *vq)
+{
+	struct odif_dev *dev = vb2_get_drv_priv(vq);
+	mutex_unlock(&dev->mutex);
+}
+static struct vb2_ops odif_video_qops = {
+	.queue_setup		= queue_setup,
+	.buf_init		= buffer_init,
+	.buf_prepare		= buffer_prepare,
+	.buf_finish		= buffer_finish,
+	.buf_cleanup		= buffer_cleanup,
+	.buf_queue		= buffer_queue,
+	.start_streaming	= start_streaming,
+	.stop_streaming		= stop_streaming,
+	.wait_prepare		= odif_unlock,
+	.wait_finish		= odif_lock,
+};
+
+/*only store one detection result for one frame*/
+void odif_add_detection(struct odif_dev *dev,
+		struct v4l2_odif_result *v4l2_fr)
+{
+	unsigned long flags;
+	struct v4l2_odif_result *old = NULL;
+	struct v4l2_odif_result *tmp;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_entry(tmp, &dev->odif_dq.complete, list)
+		if (tmp->frm_seq == v4l2_fr->frm_seq) {
+			old = tmp;
+			list_del(&tmp->list);
+			break;
+		}
+	list_add_tail(&v4l2_fr->list, &dev->odif_dq.complete);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (old)
+		free_result(old);
+	else
+		atomic_inc(&dev->odif_dq.complete_cnt);
+}
+EXPORT_SYMBOL_GPL(odif_add_detection);
+
+struct v4l2_odif_result *odif_allocate_detection(struct odif_dev *dev,
+		int cnt)
+{
+	struct v4l2_odif_result *result = NULL;
+	unsigned long flags;
+
+	if (atomic_read(&dev->odif_dq.complete_cnt) >=
+			result_cnt_threshold) {
+		spin_lock_irqsave(&dev->lock, flags);
+		result = list_entry(dev->odif_dq.complete.next,
+					struct v4l2_odif_result, list);
+		list_del(&result->list);
+		spin_unlock_irqrestore(&dev->lock, flags);
+
+		atomic_dec(&dev->odif_dq.complete_cnt);
+	}
+
+	if (!result)	goto allocate_result;
+
+	/* reuse the old one if count is matched */
+	if (result->obj_cnt == cnt)
+		goto out;
+
+	/*free the old result*/
+	free_result(result);
+
+allocate_result:
+	result = kzalloc(sizeof(*result) +
+			cnt * sizeof(struct v4l2_od_object), GFP_ATOMIC);
+	if (result)
+		result->obj_cnt = cnt;
+out:
+	return result;
+}
+EXPORT_SYMBOL_GPL(odif_allocate_detection);
+
+struct odif_buffer *odif_pick_up_buffer(struct odif_dev *dev)
+{
+	struct odif_buffer *buf = NULL;
+	unsigned long flags;
+
+	WARN_ON(!input_from_user(dev));
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(&dev->odif_dq.active))
+		goto out;
+	buf = list_entry(dev->odif_dq.active.next,
+				struct odif_buffer, list);
+	list_del(&buf->list);
+out:
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return buf;
+}
+EXPORT_SYMBOL_GPL(odif_pick_up_buffer);
+
+static struct v4l2_subdev_ops odif_subdev_ops = {
+};
+
+static int odif_switch_link(struct odif_dev *dev, int next,
+		const struct media_pad *remote)
+{
+	int ret = 0;
+
+	if (dev->input == next)
+		return ret;
+
+	if (!(dev->ops->capability & V4L2_CAP_VIDEO_OUTPUT) &&
+			next == OD_INPUT_FROM_USER_SPACE)
+		return -EINVAL;
+
+	if (!(dev->ops->capability & V4L2_CAP_VIDEO_CAPTURE) &&
+			next == OD_INPUT_FROM_MEDIA_HW)
+		return -EINVAL;
+
+	mutex_lock(dev->vfd->lock);
+	if (dev->working) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (dev->ops->set_input)
+		dev->ops->set_input(dev, next, remote);
+	else
+		ret = -EINVAL;
+out:
+	mutex_unlock(dev->vfd->lock);
+	return ret;
+}
+
+static int odif_entity_link_setup(struct media_entity *entity,
+			  const struct media_pad *local,
+			  const struct media_pad *remote, u32 flags)
+{
+	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+	struct odif_dev *dev = v4l2_get_subdevdata(sd);
+	u32    type = media_entity_type(remote->entity);
+	int ret, next;
+
+	/*link user space video to object detection*/
+	if (remote == &dev->entity[1].pads[0])
+		next = OD_INPUT_FROM_USER_SPACE;
+	else if (type == MEDIA_ENT_T_V4L2_SUBDEV)
+		next = OD_INPUT_FROM_MEDIA_HW;
+	else
+		next = dev->input;
+
+	printk("%s:%d next=%d\n", __func__, __LINE__, next);
+	ret = odif_switch_link(dev, next, remote);
+
+	return ret;
+}
+
+struct media_entity_operations odif_entity_ops = {
+	.link_setup = odif_entity_link_setup,
+};
+
+static void odif_cleanup_entities(struct odif_dev *odif)
+{
+
+	v4l2_device_unregister_subdev(&odif->entity[0].subdev);
+	v4l2_device_unregister_subdev(&odif->entity[1].subdev);
+
+	media_entity_cleanup(&odif->entity[0].subdev.entity);
+	media_entity_cleanup(&odif->entity[1].subdev.entity);
+}
+
+static int odif_init_entities(struct odif_dev *odif)
+{
+	const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_DYNAMIC;
+	int ret;
+	struct odif_entity *entity;
+	struct media_entity *source;
+	struct media_entity *sink;
+
+	/*entity[0] is the entity for object detection hw*/
+	entity = &odif->entity[0];
+	entity->num_links = 2;
+	entity->num_pads = 1;
+	entity->pads[0].flags = MEDIA_PAD_FL_SINK;
+
+	v4l2_subdev_init(&entity->subdev, &odif_subdev_ops);
+	v4l2_set_subdevdata(&entity->subdev, odif);
+	strlcpy(entity->subdev.name, "object detect",
+			sizeof(entity->subdev.name));
+	ret = media_entity_init(&entity->subdev.entity,
+		entity->num_pads, entity->pads, 1);
+
+	if (ret)
+		goto out;
+
+	/*entity[1] is the video entity which sources the user space video*/
+	entity = &odif->entity[1];
+	entity->num_links = 1;
+	entity->num_pads = 1;
+	entity->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+
+	v4l2_subdev_init(&entity->subdev, &odif_subdev_ops);
+	v4l2_set_subdevdata(&entity->subdev, odif);
+	strlcpy(entity->subdev.name, "user space video",
+			sizeof(entity->subdev.name));
+	ret = media_entity_init(&entity->subdev.entity,
+		entity->num_pads, entity->pads, 0);
+
+	/* create the default link */
+	source = &odif->entity[1].subdev.entity;
+	sink = &odif->entity[0].subdev.entity;
+	sink->ops = &odif_entity_ops;
+	ret = media_entity_create_link(source, 0,
+				       sink, 0, flags);
+	if (ret)
+		goto out;
+
+	/* init the default hw link*/
+	if (odif->ops->set_input)
+		ret = odif->ops->set_input(odif,
+			OD_INPUT_FROM_USER_SPACE,
+			&odif->entity[1].pads[0]);
+	if (ret)
+		goto out;
+
+	odif->input = OD_INPUT_FROM_USER_SPACE;
+
+	/* register the two subdevices */
+	ret = v4l2_device_register_subdev(&odif->v4l2_dev,
+			&odif->entity[0].subdev);
+	if (ret)
+		goto out;
+
+	ret = v4l2_device_register_subdev(&odif->v4l2_dev,
+			&odif->entity[1].subdev);
+	if (ret)
+		goto out;
+out:
+	return ret;
+}
+
+void odif_release(struct kref *ref)
+{
+	struct odif_dev *dev = container_of(ref, struct odif_dev, ref);
+
+	v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+		video_device_node_name(dev->vfd));
+
+	list_del(&dev->odif_devlist);
+
+	odif_cleanup_entities(dev);
+	video_unregister_device(dev->vfd);
+	v4l2_device_unregister(&dev->v4l2_dev);
+	kfree(dev);
+}
+EXPORT_SYMBOL_GPL(odif_release);
+
+int odif_create_instance(struct device *parent, int priv_size,
+		struct odif_ops *ops, struct odif_dev **odif_dev)
+{
+	struct odif_dev *dev;
+	struct video_device *vfd;
+	struct vb2_queue *q;
+	int ret, len;
+	struct mutex	*vmutex;
+
+	dev = kzalloc(sizeof(*dev) + priv_size, GFP_KERNEL);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	kref_init(&dev->ref);
+	dev->ops = ops;
+	dev->dev = parent;
+
+	len = snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
+			"%s", "odif");
+	dev->v4l2_dev.name[len] = '\0';
+
+	ret = v4l2_device_register(dev->dev, &dev->v4l2_dev);
+	if (ret)
+		goto free_dev;
+
+	/* initialize locks */
+	spin_lock_init(&dev->lock);
+
+	/* initialize queue */
+	q = &dev->vbq;
+	memset(q, 0, sizeof(dev->vbq));
+	q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	q->io_modes = VB2_MMAP;
+	q->drv_priv = dev;
+	q->buf_struct_size = sizeof(struct odif_buffer);
+	q->ops = &odif_video_qops;
+	q->mem_ops = &vb2_page_memops;
+
+	vb2_queue_init(q);
+
+	mutex_init(&dev->mutex);
+
+	/* init video dma queues */
+	atomic_set(&dev->odif_dq.complete_cnt, 0);
+	INIT_LIST_HEAD(&dev->odif_dq.active);
+	INIT_LIST_HEAD(&dev->odif_dq.complete);
+	init_waitqueue_head(&dev->odif_dq.wq);
+
+	ret = -ENOMEM;
+	vfd = video_device_alloc();
+	if (!vfd)
+		goto unreg_dev;
+
+	*vfd = odif_template;
+	vfd->debug = debug;
+	vfd->v4l2_dev = &dev->v4l2_dev;
+	set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
+
+	vmutex = kzalloc(sizeof(struct mutex), GFP_KERNEL);
+	if (!vmutex)
+		goto err_alloc_mutex;
+
+	mutex_init(vmutex);
+	/*
+	 * Provide a mutex to v4l2 core. It will be used to protect
+	 * all fops and v4l2 ioctls.
+	 */
+	vfd->lock = vmutex;
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
+	if (ret < 0)
+		goto rel_vdev;
+
+	if (video_nr != -1)
+		video_nr++;
+
+	dev->vfd = vfd;
+	video_set_drvdata(vfd, dev);
+
+	ret = odif_init_entities(dev);
+	if (ret)
+		goto unreg_video;
+
+	/* Now that everything is fine, let's add it to device list */
+	list_add_tail(&dev->odif_devlist, &odif_devlist);
+
+	v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+		  video_device_node_name(vfd));
+
+	*odif_dev = dev;
+	return 0;
+
+unreg_video:
+	video_unregister_device(vfd);
+rel_vdev:
+	kfree(vmutex);
+err_alloc_mutex:
+	video_device_release(vfd);
+unreg_dev:
+	v4l2_device_unregister(&dev->v4l2_dev);
+free_dev:
+	kfree(dev);
+out:
+	pr_err("%s: error, ret=%d", __func__, ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(odif_create_instance);
+
+static int __init odif_init(void)
+{
+	return 0;
+}
+
+static void __exit odif_exit(void)
+{
+}
+
+module_init(odif_init);
+module_exit(odif_exit);
+
+MODULE_DESCRIPTION("object detection interface function module");
+MODULE_AUTHOR("Ming Lei");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/odif/odif.h b/drivers/media/video/odif/odif.h
new file mode 100644
index 0000000..25c4788
--- /dev/null
+++ b/drivers/media/video/odif/odif.h
@@ -0,0 +1,157 @@
+#ifndef _LINUX_MEDIA_ODIF_H
+#define _LINUX_MEDIA_ODIF_H
+
+#include <linux/types.h>
+#include <linux/magic.h>
+#include <linux/errno.h>
+#include <linux/kref.h>
+#include <linux/kernel.h>
+#include <media/v4l2-common.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/videobuf2-page.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#define MAX_OBJ_COUNT		40
+
+#define OBJ_DIR_UP		0
+#define OBJ_DIR_RIGHT		1
+#define OBJ_DIR_LIFT		2
+#define OBJ_DIR_DOWN		3
+
+
+#define	OD_INPUT_FROM_USER_SPACE	1
+#define	OD_INPUT_FROM_MEDIA_HW		2
+
+struct odif_fmt {
+	char  *name;
+	u32   fourcc;          /* v4l2 format id */
+	u32   depth;
+	u32   width, height;
+};
+
+struct odif_setting {
+	struct odif_fmt            *fmt;
+	enum v4l2_field            field;
+
+	/* minimize object size to be detected, unit: pixel */
+	u32 			min_obj_size;
+	u32			obj_dir;
+
+	u32			startx, starty;
+	u32			sizex, sizey;
+
+	int			lhit;
+
+	u32			width, height;
+};
+
+/* buffer for one video frame */
+struct odif_buffer {
+	/* common v4l buffer stuff -- must be first */
+	struct vb2_buffer	vb;
+	struct list_head	list;
+};
+
+
+struct v4l2_odif_result {
+	struct list_head	list;
+
+	/*frame sequence*/
+	u32			frm_seq;
+	u32			obj_cnt;
+	struct v4l2_od_object	objs[0];
+};
+
+struct odif_dmaqueue {
+	atomic_t		complete_cnt;
+	struct list_head	complete;
+	struct list_head	active;
+	wait_queue_head_t	wq;
+};
+
+struct odif_entity {
+	/* Media controller-related fields. */
+	struct v4l2_subdev subdev;
+	unsigned int num_pads;
+	unsigned int num_links;
+
+	/*only one sink pad*/
+	struct media_pad pads[1];
+
+	/* TODO: odif controls */
+};
+
+struct odif_dev {
+	struct kref		ref;
+	struct device		*dev;
+
+	struct list_head        odif_devlist;
+	struct v4l2_device	v4l2_dev;
+	struct vb2_queue        vbq;
+	struct mutex            mutex;
+	spinlock_t		lock;
+
+	/* media controller entity*/
+	struct odif_entity	entity[2];
+
+	struct video_device	*vfd;
+	struct odif_dmaqueue	odif_dq;
+	int	working;
+
+	int	input;
+
+	/* setting */
+	struct odif_setting	s;
+
+	struct odif_ops	*ops;
+
+	unsigned long	priv[0];
+};
+
+/**
+ * struct odif_ops - interface between odif and low level hw driver
+ * @capability:	extra capability except for V4L2_CAP_OBJ_DETECTION
+ * 	V4L2_CAP_STREAMING: mandatory, start/stop detect is triggered
+ * 		by streaming on/off
+ * 	V4L2_CAP_VIDEO_OUTPUT: hw can support to detect objects from
+ * 		user space video input
+ *	V4L2_CAP_VIDEO_CAPTURE: hw can support to detect objects from
+ *		internal bus, this doesn't mean capture is capable
+ *
+ * @table:	supported format table
+ * @fmt_cnt:	supported format count
+ * @start_detect:	start_detect callback
+ * @stop_detect:	stop_detect callback
+ * @submit_detect:	submit_detect callback
+ * @set_input:	change video input source
+ */
+struct odif_ops {
+	u32	capability;
+	struct odif_fmt *table;
+	int fmt_cnt;
+	int (*start_detect)(struct odif_dev *odif);
+	int (*stop_detect)(struct odif_dev *odif);
+	int (*submit_detect)(struct odif_dev *odif);
+	int (*set_input)(struct odif_dev *odif, int next,
+			const struct media_pad *remote);
+};
+
+#define dprintk(dev, level, fmt, arg...) \
+	v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
+
+
+extern int odif_create_instance(struct device *parent, int priv_size,
+		struct odif_ops *ops, struct odif_dev **dev);
+extern void odif_release(struct kref *ref);
+extern void odif_add_detection(struct odif_dev *dev,
+		struct v4l2_odif_result *v4l2_fr);
+extern struct v4l2_odif_result *odif_allocate_detection(
+		struct odif_dev *dev, int cnt);
+extern struct odif_buffer *odif_pick_up_buffer(struct odif_dev *dev);
+
+#endif /* _LINUX_MEDIA_ODIF_H */
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [RFC PATCH v2 8/8] media: video: introduce omap4 face detection module driver
  2011-12-14 14:00 [RFC PATCH v2 0/7] media: introduce object detection(OD) driver Ming Lei
                   ` (6 preceding siblings ...)
  2011-12-14 14:00 ` [RFC PATCH v2 7/8] media: video: introduce object detection driver module Ming Lei
@ 2011-12-14 14:00 ` Ming Lei
  7 siblings, 0 replies; 24+ messages in thread
From: Ming Lei @ 2011-12-14 14:00 UTC (permalink / raw)
  To: Mauro Carvalho Chehab, Tony Lindgren
  Cc: Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media, Ming Lei

The patch introduces one face detection device driver for
driving face detection hardware on omap4[1].

Most things of the driver are dealing with omap4 face detection
hardware.

This driver is platform independent, so in theory it can
be used to drive same IP module on other platforms.

[1], Ch9 of OMAP4 Technical Reference Manual

Signed-off-by: Ming Lei <ming.lei@canonical.com>
---
v2:
	- based on odif module
	- use new object detection API
---
 drivers/media/video/odif/Kconfig      |    6 +
 drivers/media/video/odif/Makefile     |    1 +
 drivers/media/video/odif/fdif_omap4.c |  685 +++++++++++++++++++++++++++++++++
 3 files changed, 692 insertions(+), 0 deletions(-)
 create mode 100644 drivers/media/video/odif/fdif_omap4.c

diff --git a/drivers/media/video/odif/Kconfig b/drivers/media/video/odif/Kconfig
index 5090bd6..2a8c545 100644
--- a/drivers/media/video/odif/Kconfig
+++ b/drivers/media/video/odif/Kconfig
@@ -5,3 +5,9 @@ config ODIF
 	help
 	  The ODIF is a object detection module, which can be integrated into
 	  some SoCs to detect objects in images or video.
+
+config ODIF_OMAP4
+	depends on ODIF
+	tristate "OMAP4 Face Detection module"
+	help
+	  OMAP4 face detection support
diff --git a/drivers/media/video/odif/Makefile b/drivers/media/video/odif/Makefile
index a55ff66..0eb844f 100644
--- a/drivers/media/video/odif/Makefile
+++ b/drivers/media/video/odif/Makefile
@@ -1 +1,2 @@
 obj-$(CONFIG_ODIF)		+= odif.o
+obj-$(CONFIG_ODIF_OMAP4)	+= fdif_omap4.o
diff --git a/drivers/media/video/odif/fdif_omap4.c b/drivers/media/video/odif/fdif_omap4.c
new file mode 100644
index 0000000..d7953d8
--- /dev/null
+++ b/drivers/media/video/odif/fdif_omap4.c
@@ -0,0 +1,685 @@
+/*
+ *      fdif_omap4.c  --  face detection module driver
+ *
+ *      Copyright (C) 2011  Ming Lei (ming.lei@canonical.com)
+ *
+ *      This program is free software; you can redistribute it and/or modify
+ *      it under the terms of the GNU General Public License as published by
+ *      the Free Software Foundation; either version 2 of the License, or
+ *      (at your option) any later version.
+ *
+ *      This program is distributed in the hope that it will be useful,
+ *      but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *      GNU General Public License for more details.
+ *
+ *      You should have received a copy of the GNU General Public License
+ *      along with this program; if not, write to the Free Software
+ *      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+/*****************************************************************************/
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/signal.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/user_namespace.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include "odif.h"
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+
+#undef DEBUG
+
+#define PICT_SIZE_X     320
+#define PICT_SIZE_Y     240
+
+#define	WORK_MEM_SIZE	(52*1024)
+
+/* 9.5 FDIF Register Manua of TI OMAP4 TRM */
+#define FDIF_REVISION		0x0
+#define FDIF_HWINFO		0x4
+#define FDIF_SYSCONFIG		0x10
+#define SOFTRESET		(1 << 0)
+
+#define FDIF_IRQSTATUS_RAW_j	(0x24 + 2*0x10)
+#define FDIF_IRQSTATUS_j	(0x28 + 2*0x10)
+#define FDIF_IRQENABLE_SET_j	(0x2c + 2*0x10)
+#define FDIF_IRQENABLE_CLR_j	(0x30 + 2*0x10)
+#define FINISH_IRQ		(1 << 8)
+#define ERR_IRQ			(1 << 0)
+
+#define FDIF_PICADDR		0x60
+#define FDIF_CTRL		0x64
+#define CTRL_MAX_TAGS		0x0A
+
+#define FDIF_WKADDR		0x68
+#define FD_CTRL			0x80
+#define CTRL_FINISH		(1 << 2)
+#define CTRL_RUN		(1 << 1)
+#define CTRL_SRST		(1 << 0)
+
+
+#define FD_DNUM			0x84
+#define FD_DCOND		0x88
+#define FD_STARTX		0x8c
+#define FD_STARTY		0x90
+#define FD_SIZEX		0x94
+#define FD_SIZEY		0x98
+#define FD_LHIT			0x9c
+#define FD_CENTERX_i		0x160
+#define FD_CENTERY_i		0x164
+#define FD_CONFSIZE_i		0x168
+#define FD_ANGLE_i		0x16c
+
+static inline void fd_writel(void __iomem *base, u32 reg, u32 val)
+{
+	__raw_writel(val, base + reg);
+}
+
+static inline u32 fd_readl(void __iomem *base, u32 reg)
+{
+	return __raw_readl(base + reg);
+}
+
+struct fdif_qvga {
+	struct odif_dev *dev;
+
+	/*should be removed*/
+	struct platform_device	*pdev;
+	int			irq;
+	void __iomem		*base;
+
+	void			*work_mem_addr;
+	dma_addr_t 		work_dma;
+	dma_addr_t 		pict_dma;
+	unsigned long 		pict_mem_len;
+
+	struct odif_buffer	*pending;
+	spinlock_t		lock;
+};
+
+struct odif_fmt qvga_fmt[] = {
+	{
+		.name		= "8  Greyscale",
+		.fourcc		= V4L2_PIX_FMT_GREY,
+		.depth		= 8,
+		.width		= PICT_SIZE_X,
+		.height		= PICT_SIZE_Y,
+	},
+};
+
+
+#ifdef DEBUG
+static void dump_fdif_setting(struct fdif_qvga *fdif, const char *func)
+{
+	printk("%s: %s\n", func, __func__);
+	printk("work mem addr:%8p\n", fdif->work_mem_addr);
+	printk("face size=%2d, face dir=%2d, lhit=%d\n",
+			fdif->dev->s.min_obj_size, fdif->dev->s.obj_dir,
+			fdif->dev->s.lhit);
+	printk("startx =%4d starty=%4d sizex=%4d sizey=%4d\n",
+			fdif->dev->s.startx, fdif->dev->s.starty,
+			fdif->dev->s.sizex, fdif->dev->s.sizey);
+}
+
+static void dump_fdif_results(struct v4l2_odif_result *fdif, const char *func)
+{
+	int idx;
+
+	printk("%s: %s\n", func, __func__);
+
+	printk("found %d faces, but index:%d\n", fdif->face_cnt,
+			fdif->index);
+	for(idx=0; idx < fdif->face_cnt; idx++) {
+		struct v4l2_fd_detection *fr = &fdif->faces[idx];
+		printk("	No.%d x=%3d y=%2d sz=%2d ang=%3d conf=%2d\n",
+				idx, fr->face.centerx, fr->face.centery,
+				fr->face.sizex, fr->face.angle,
+				fr->face.confidence);
+	}
+}
+
+static void dump_fdif_regs(struct fdif_qvga *fdif, const char *func)
+{
+	printk("%s:%s\n", __func__, func);
+	printk("FDIF_CTRL=%08x FDIF_SYSCONFIG=%08x\n",
+			fd_readl(fdif->base, FDIF_CTRL),
+			fd_readl(fdif->base, FDIF_SYSCONFIG));
+	printk("FDIF_IRQSTATUS_RAW_j=%08x FDIF_IRQSTATUS_j=%08x\n",
+			fd_readl(fdif->base, FDIF_IRQSTATUS_RAW_j),
+			fd_readl(fdif->base, FDIF_IRQSTATUS_j));
+	printk("FDIF_PICADDR=%08x FDIF_WKADDR=%08x\n",
+			fd_readl(fdif->base, FDIF_PICADDR),
+			fd_readl(fdif->base, FDIF_WKADDR));
+	printk("FD_CTRL=%04x, FDIF_IRQENABLE_SET_j=%04x\n",
+			fd_readl(fdif->base, FD_CTRL),
+			fd_readl(fdif->base, FDIF_IRQENABLE_SET_j));
+}
+
+#else
+static inline void dump_fdif_setting(struct fdif_qvga *fdif, const char *func)
+{
+}
+static inline void dump_fdif_results(struct v4l2_odif_result *fdif, const char *func)
+{
+}
+static inline void dump_fdif_regs(struct fdif_qvga *fdif, const char *func)
+{
+}
+#endif
+
+/**
+ * 0x0: Set the min face size to 20 pixels
+ * 0x1: Set the min face size to 25 pixels
+ * 0x2: Set the min face size to 32 pixels
+ * 0x3: Set the min face size to 40 pixels
+ * */
+static int pixels2hw(u32 pixels)
+{
+	if (pixels <= 20)
+		pixels = 20;
+	else if (pixels <= 25)
+		pixels = 25;
+	else if (pixels <= 32)
+		pixels = 32;
+	else
+		pixels = 40;
+
+	switch (pixels) {
+	case 20:
+		return 0;
+	case 25:
+		return 1;
+	case 32:
+		return 2;
+	case 40:
+		return 3;
+	}
+
+	return 0;
+}
+
+
+static void install_default_setting(struct fdif_qvga *fdif)
+{
+	fdif->dev->s.fmt		= &qvga_fmt[0];
+	fdif->dev->s.field		= V4L2_FIELD_NONE;
+
+	fdif->dev->s.min_obj_size	= 25;
+	fdif->dev->s.obj_dir		= OBJ_DIR_UP;
+	fdif->dev->s.startx		= 0;
+	fdif->dev->s.starty		= 0;
+	fdif->dev->s.sizex		= 0x140;
+	fdif->dev->s.sizey		= 0xf0;
+	fdif->dev->s.lhit		= 0x5;
+
+	fdif->dev->s.width		= PICT_SIZE_X;
+	fdif->dev->s.height		= PICT_SIZE_Y;
+}
+
+static void commit_image_setting(struct fdif_qvga *fdif)
+{
+	unsigned long conf;
+	struct vb2_buffer *vb = &fdif->pending->vb;
+	void *pict_vaddr = vb2_plane_vaddr(vb, 0);
+
+	fdif->pict_mem_len = vb2_plane_size(vb, 0);
+	fdif->pict_dma = dma_map_single(&fdif->pdev->dev,
+				pict_vaddr,
+				fdif->pict_mem_len,
+				DMA_TO_DEVICE);
+
+	fd_writel(fdif->base, FDIF_PICADDR, fdif->pict_dma);
+
+	conf = (pixels2hw(fdif->dev->s.min_obj_size) & 0x3) ||
+		((fdif->dev->s.obj_dir & 0x3) << 2);
+	fd_writel(fdif->base, FD_DCOND, conf);
+
+	fd_writel(fdif->base, FD_STARTX, fdif->dev->s.startx);
+	fd_writel(fdif->base, FD_STARTY, fdif->dev->s.starty);
+	fd_writel(fdif->base, FD_SIZEX, fdif->dev->s.sizex);
+	fd_writel(fdif->base, FD_SIZEY, fdif->dev->s.sizey);
+	fd_writel(fdif->base, FD_LHIT, fdif->dev->s.lhit);
+}
+
+
+/*softreset fdif*/
+static int softreset_fdif(struct fdif_qvga *fdif)
+{
+	unsigned long conf;
+	int to = 0;
+
+	conf = fd_readl(fdif->base, FDIF_SYSCONFIG);
+	conf |= SOFTRESET;
+	fd_writel(fdif->base, FDIF_SYSCONFIG, conf);
+
+	while ((conf & SOFTRESET) && to++ < 2000) {
+		conf = fd_readl(fdif->base, FDIF_SYSCONFIG);
+		udelay(2);
+	}
+
+	if (to == 2000)
+		dev_err(&fdif->pdev->dev, "%s: reset failed\n", __func__);
+
+	return to == 2000;
+}
+
+static void __start_detect(struct fdif_qvga *fdif)
+{
+	unsigned long conf;
+
+	dump_fdif_setting(fdif, __func__);
+
+	commit_image_setting(fdif);
+
+	/*enable finish irq*/
+	conf = FINISH_IRQ;
+	fd_writel(fdif->base, FDIF_IRQENABLE_SET_j, conf);
+
+	/*set RUN flag*/
+	conf = CTRL_RUN;
+	fd_writel(fdif->base, FD_CTRL, conf);
+
+	dump_fdif_regs(fdif, __func__);
+}
+
+static void __stop_detect(struct fdif_qvga *fdif)
+{
+	unsigned long conf;
+
+	dump_fdif_regs(fdif, __func__);
+
+	dma_unmap_single(&fdif->pdev->dev, fdif->pict_dma,
+				fdif->pict_mem_len,
+				DMA_TO_DEVICE);
+	/*disable finish irq*/
+	conf = FINISH_IRQ;
+	fd_writel(fdif->base, FDIF_IRQENABLE_CLR_j, conf);
+
+	/*mark FINISH flag*/
+	conf = CTRL_FINISH;
+	fd_writel(fdif->base, FD_CTRL, conf);
+}
+
+static int read_faces(struct fdif_qvga *fdif, int is_err)
+{
+	int cnt, idx = 0;
+	struct v4l2_odif_result *v4l2_fr;
+	struct odif_dev *dev = fdif->dev;
+	struct odif_buffer *buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fdif->lock, flags);
+
+	buf = fdif->pending;
+	if (!buf) {
+		WARN_ON(1);
+		cnt = -EIO;
+		goto out;
+	}
+
+	buf->vb.v4l2_buf.sequence++;
+
+	if (!is_err)
+		cnt = fd_readl(fdif->base, FD_DNUM) & 0x3f;
+	else
+		cnt = 0;
+
+	v4l2_fr = odif_allocate_detection(dev, cnt);
+	if (!v4l2_fr) {
+		cnt = -ENOMEM;
+		goto out;
+	}
+
+	v4l2_fr->frm_seq = buf->vb.v4l2_buf.sequence;
+
+	while(idx < cnt) {
+		struct v4l2_od_object *fr = &v4l2_fr->objs[idx];
+
+		fr->type = V4L2_OD_TYPE_FACE;
+
+		fr->o.face.id = idx + 1;
+		fr->o.face.f.centerx = fd_readl(fdif->base,
+				FD_CENTERX_i + idx * 0x10) & 0x1ff;
+		fr->o.face.f.centery = fd_readl(fdif->base,
+				FD_CENTERY_i + idx * 0x10) & 0xff;
+		fr->o.face.f.angle = fd_readl(fdif->base,
+				FD_ANGLE_i + idx * 0x10) & 0x1ff;
+		fr->o.face.f.sizex = fd_readl(fdif->base,
+				FD_CONFSIZE_i + idx * 0x10);
+		fr->confidence = ((fr->o.face.f.sizex >> 8) & 0xf) * 10;
+		fr->o.face.f.sizey = fr->o.face.f.sizex = fr->o.face.f.sizex & 0xff;
+
+		idx++;
+	}
+
+	__stop_detect(fdif);
+	fdif->pending = NULL;
+	spin_unlock_irqrestore(&fdif->lock, flags);
+
+	dump_fdif_results(v4l2_fr, __func__);
+
+	/*queue the detection result to complete queue*/
+	odif_add_detection(dev, v4l2_fr);
+
+	if (is_err)
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+	else
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+
+	wake_up(&dev->odif_dq.wq);
+
+	return cnt;
+
+out:
+	spin_unlock_irqrestore(&fdif->lock, flags);
+	return cnt;
+}
+
+static int __submit_detection(struct fdif_qvga *fdif)
+{
+	struct odif_dev *dev = fdif->dev;
+	struct odif_buffer *buf;
+	unsigned long flags;
+	unsigned int ret = 0;
+
+	buf = odif_pick_up_buffer(dev);
+	if (!buf)
+		goto out;
+
+	spin_lock_irqsave(&fdif->lock, flags);
+	if (fdif->pending) {
+		spin_unlock_irqrestore(&fdif->lock, flags);
+		ret = -EBUSY;
+		goto out;
+	}
+	fdif->pending = buf;
+	__start_detect(fdif);
+	spin_unlock_irqrestore(&fdif->lock, flags);
+
+	return 0;
+
+out:
+	return ret;
+}
+
+static irqreturn_t handle_detection(int irq, void *__fdif)
+{
+	unsigned long irqsts;
+	struct fdif_qvga *fdif = __fdif;
+	irqreturn_t ret = IRQ_HANDLED;
+
+	/*clear irq status*/
+	irqsts = fd_readl(fdif->base, FDIF_IRQSTATUS_j);
+
+	if (irqsts & (FINISH_IRQ | ERR_IRQ)) {
+		int is_err = irqsts & ERR_IRQ;
+
+		fd_writel(fdif->base, FDIF_IRQSTATUS_j, irqsts);
+
+		read_faces(fdif, is_err);
+		if (is_err)
+			softreset_fdif(fdif);
+
+		__submit_detection(fdif);
+	} else {
+		ret = IRQ_NONE;
+	}
+
+	return ret;
+}
+
+static void fdif_global_init(struct fdif_qvga *fdif)
+{
+	unsigned long conf;
+	struct device *dev = &fdif->pdev->dev;
+
+	/*softreset fdif*/
+	softreset_fdif(fdif);
+
+	/*set max tags*/
+	conf = fd_readl(fdif->base, FDIF_CTRL);
+	conf &= ~0x1e;
+	conf |= (CTRL_MAX_TAGS << 1);
+	fd_writel(fdif->base, FDIF_CTRL, conf);
+
+	/*enable error irq*/
+	conf = ERR_IRQ;
+	fd_writel(fdif->base, FDIF_IRQENABLE_SET_j, conf);
+
+	fdif->work_dma = dma_map_single(dev,
+				fdif->work_mem_addr,
+		                WORK_MEM_SIZE,
+				DMA_TO_DEVICE);
+	fd_writel(fdif->base, FDIF_WKADDR, fdif->work_dma);
+}
+
+static void fdif_global_deinit(struct fdif_qvga *fdif)
+{
+	unsigned long conf;
+	struct device *dev = &fdif->pdev->dev;
+
+	/*enable error irq*/
+	conf = ERR_IRQ;
+	fd_writel(fdif->base, FDIF_IRQENABLE_CLR_j, conf);
+
+	dma_unmap_single(dev, fdif->work_dma,
+			WORK_MEM_SIZE, DMA_TO_DEVICE);
+}
+
+
+static int start_detect(struct odif_dev *dev)
+{
+	struct fdif_qvga *fdif = dev_get_drvdata(dev->dev);
+
+	pm_runtime_get_sync(dev->dev);
+	fdif_global_init(fdif);
+
+	return  __submit_detection(fdif);
+}
+
+static int stop_detect(struct odif_dev *dev)
+{
+	struct fdif_qvga *fdif = dev_get_drvdata(dev->dev);
+	unsigned long flags, irqsts;
+	struct odif_buffer	*buf;
+
+	spin_lock_irqsave(&fdif->lock, flags);
+
+	/*stop current transfer first*/
+	__stop_detect(fdif);
+
+	buf = fdif->pending;
+	fdif->pending = NULL;
+
+	/*clear irq status in case that it is set*/
+	irqsts = fd_readl(fdif->base, FDIF_IRQSTATUS_j);
+	fd_writel(fdif->base, FDIF_IRQSTATUS_j, irqsts);
+
+	spin_unlock_irqrestore(&fdif->lock, flags);
+
+	if (buf)
+		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+
+	fdif_global_deinit(fdif);
+	pm_runtime_put(dev->dev);
+	return 0;
+}
+
+static int submit_detect(struct odif_dev *dev)
+{
+	struct fdif_qvga *fdif = dev_get_drvdata(dev->dev);
+
+	__submit_detection(fdif);
+
+	return 0;
+}
+
+static struct odif_ops qvga_ops = {
+	.capability = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING,
+	.table	= qvga_fmt,
+	.fmt_cnt = 1,
+	.start_detect = start_detect,
+	.stop_detect = stop_detect,
+	.submit_detect = submit_detect,
+};
+
+static int fdif_probe(struct platform_device *pdev)
+{
+	struct device	*dev = &pdev->dev;
+	struct fdif_qvga *fdif;
+	struct odif_dev *fdev;
+	struct resource *res;
+	int ret, order;
+
+	ret = odif_create_instance(dev, sizeof(struct fdif_qvga),
+			&qvga_ops, &fdev);
+	if (ret) {
+		dev_err(dev, "odif_create_instance failed:%d\n", ret);
+		goto end_probe;
+	}
+
+	fdif = (struct fdif_qvga *)fdev->priv;
+	fdif->dev = fdev;
+
+	spin_lock_init(&fdif->lock);
+	fdif->pdev = pdev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "fdif get resource failed\n");
+		ret = -ENODEV;
+		goto err_iomap;
+	}
+
+	fdif->base = ioremap(res->start, resource_size(res));
+	if (!fdif->base) {
+		dev_err(dev, "fdif ioremap failed\n");
+		ret = -ENOMEM;
+		goto err_iomap;
+	}
+
+	fdif->irq = platform_get_irq(pdev, 0);
+	if (fdif->irq < 0) {
+		dev_err(dev, "fdif get irq failed\n");
+		ret = -ENODEV;
+		goto err_get_irq;
+	}
+
+	ret = request_irq(fdif->irq, handle_detection, 0, "fdif-qvga",
+			fdif);
+	if (ret) {
+		dev_err(dev, "request_irq failed:%d\n", ret);
+		goto err_get_irq;
+	}
+
+	order = get_order(WORK_MEM_SIZE);
+	fdif->work_mem_addr = (void *)__get_free_pages(GFP_KERNEL, order);
+	if (!fdif->work_mem_addr) {
+		dev_err(dev, "fdif buffer allocation(%d) failed\n", order);
+		ret = -ENOMEM;
+		goto err_work_mem;
+	}
+
+	install_default_setting(fdif);
+
+	platform_set_drvdata(pdev, fdif);
+
+	pm_suspend_ignore_children(dev, true);
+	pm_runtime_get_sync(dev);
+	dev_info(dev, "fdif version=%8x hwinfo=%08x\n",
+			fd_readl(fdif->base, FDIF_REVISION),
+			fd_readl(fdif->base, FDIF_HWINFO));
+	pm_runtime_put(dev);
+
+	return 0;
+
+err_work_mem:
+	free_irq(fdif->irq, fdif);
+err_get_irq:
+	iounmap(fdif->base);
+err_iomap:
+	kref_put(&fdif->dev->ref, odif_release);
+end_probe:
+	return ret;
+}
+
+static int fdif_remove(struct platform_device *pdev)
+{
+	struct fdif_qvga *fdif = platform_get_drvdata(pdev);
+	int order;
+
+	platform_set_drvdata(pdev, NULL);
+
+	free_irq(fdif->irq, fdif);
+
+	order = get_order(WORK_MEM_SIZE);
+	free_pages((unsigned long)fdif->work_mem_addr, order);
+
+	iounmap(fdif->base);
+
+	kref_put(&fdif->dev->ref, odif_release);
+
+	return 0;
+}
+
+static int fdif_suspend(struct platform_device *pdev, pm_message_t msg)
+{
+	return 0;
+}
+
+static int fdif_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct platform_device_id odif_device_ids[] = {
+	{.name = "omap-fdif"},
+	{},
+};
+
+struct platform_driver fdif_driver = {
+	.probe =	fdif_probe,
+	.remove =	fdif_remove,
+	.suspend =	fdif_suspend,
+	.resume =	fdif_resume,
+	.driver = {
+		.name  =	"omap-fdif",
+		.owner =	THIS_MODULE,
+	},
+	.id_table = 	odif_device_ids,
+};
+
+static int __init omap4_fdif_init(void)
+{
+	int retval;
+	retval = platform_driver_register(&fdif_driver);
+	if (retval) {
+		printk(KERN_ERR "Unable to register fdif driver\n");
+		return retval;
+	}
+	return 0;
+}
+
+static void omap4_fdif_cleanup(void)
+{
+	platform_driver_unregister(&fdif_driver);
+}
+
+module_init(omap4_fdif_init);
+module_exit(omap4_fdif_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap-fdif");
+MODULE_AUTHOR("Ming Lei");
-- 
1.7.5.4


^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 1/8] omap4: introduce fdif(face detect module) hwmod
  2011-12-14 14:00 ` [RFC PATCH v2 1/8] omap4: introduce fdif(face detect module) hwmod Ming Lei
@ 2011-12-16  5:53   ` Paul Walmsley
  2011-12-16 14:54     ` Cousson, Benoit
  0 siblings, 1 reply; 24+ messages in thread
From: Paul Walmsley @ 2011-12-16  5:53 UTC (permalink / raw)
  To: b-cousson
  Cc: Ming Lei, Mauro Carvalho Chehab, Tony Lindgren,
	Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media

[-- Attachment #1: Type: TEXT/PLAIN, Size: 4317 bytes --]

Hi Benoît

On Wed, 14 Dec 2011, Ming Lei wrote:

> Signed-off-by: Ming Lei <ming.lei@canonical.com>
> ---
>  arch/arm/mach-omap2/omap_hwmod_44xx_data.c |   81 ++++++++++++++++++++++++++++
>  1 files changed, 81 insertions(+), 0 deletions(-)

any comments on this patch?  I'd like to queue it if it looks good to you.

- Paul

> 
> diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
> index 6cf21ee..30db754 100644
> --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
> +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
> @@ -53,6 +53,7 @@ static struct omap_hwmod omap44xx_dmm_hwmod;
>  static struct omap_hwmod omap44xx_dsp_hwmod;
>  static struct omap_hwmod omap44xx_dss_hwmod;
>  static struct omap_hwmod omap44xx_emif_fw_hwmod;
> +static struct omap_hwmod omap44xx_fdif_hwmod;
>  static struct omap_hwmod omap44xx_hsi_hwmod;
>  static struct omap_hwmod omap44xx_ipu_hwmod;
>  static struct omap_hwmod omap44xx_iss_hwmod;
> @@ -354,6 +355,14 @@ static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
>  	.user		= OCP_USER_MPU | OCP_USER_SDMA,
>  };
>  
> +/* fdif -> l3_main_2 */
> +static struct omap_hwmod_ocp_if omap44xx_fdif__l3_main_2 = {
> +	.master		= &omap44xx_fdif_hwmod,
> +	.slave		= &omap44xx_l3_main_2_hwmod,
> +	.clk		= "l3_div_ck",
> +	.user		= OCP_USER_MPU | OCP_USER_SDMA,
> +};
> +
>  /* hsi -> l3_main_2 */
>  static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = {
>  	.master		= &omap44xx_hsi_hwmod,
> @@ -5444,6 +5453,75 @@ static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
>  	.slaves_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_slaves),
>  };
>  
> +/* 'fdif' class */
> +static struct omap_hwmod_class_sysconfig omap44xx_fdif_sysc = {
> +	.rev_offs	= 0x0000,
> +	.sysc_offs	= 0x0010,
> +	.sysc_flags	= (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
> +			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
> +	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
> +			   MSTANDBY_FORCE | MSTANDBY_NO |
> +			   MSTANDBY_SMART),
> +	.sysc_fields	= &omap_hwmod_sysc_type2,
> +};
> +
> +static struct omap_hwmod_class omap44xx_fdif_hwmod_class = {
> +	.name	= "fdif",
> +	.sysc	= &omap44xx_fdif_sysc,
> +};
> +
> +/*fdif*/
> +static struct omap_hwmod_addr_space omap44xx_fdif_addrs[] = {
> +	{
> +		.pa_start	= 0x4a10a000,
> +		.pa_end		= 0x4a10afff,
> +		.flags		= ADDR_TYPE_RT
> +	},
> +	{ }
> +};
> +
> +/* l4_cfg -> fdif */
> +static struct omap_hwmod_ocp_if omap44xx_l4_cfg__fdif = {
> +	.master		= &omap44xx_l4_cfg_hwmod,
> +	.slave		= &omap44xx_fdif_hwmod,
> +	.clk		= "l4_div_ck",
> +	.addr		= omap44xx_fdif_addrs,
> +	.user		= OCP_USER_MPU | OCP_USER_SDMA,
> +};
> +
> +/* fdif slave ports */
> +static struct omap_hwmod_ocp_if *omap44xx_fdif_slaves[] = {
> +	&omap44xx_l4_cfg__fdif,
> +};
> +static struct omap_hwmod_irq_info omap44xx_fdif_irqs[] = {
> +	{ .irq = 69 + OMAP44XX_IRQ_GIC_START },
> +	{ .irq = -1 }
> +};
> +
> +/* fdif master ports */
> +static struct omap_hwmod_ocp_if *omap44xx_fdif_masters[] = {
> +	&omap44xx_fdif__l3_main_2,
> +};
> +
> +static struct omap_hwmod omap44xx_fdif_hwmod = {
> +	.name		= "fdif",
> +	.class		= &omap44xx_fdif_hwmod_class,
> +	.clkdm_name	= "iss_clkdm",
> +	.mpu_irqs	= omap44xx_fdif_irqs,
> +	.main_clk	= "fdif_fck",
> +	.prcm = {
> +		.omap4 = {
> +			.clkctrl_offs = OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET,
> +			.context_offs = OMAP4_RM_CAM_FDIF_CONTEXT_OFFSET,
> +			.modulemode   = MODULEMODE_SWCTRL,
> +		},
> +	},
> +	.slaves		= omap44xx_fdif_slaves,
> +	.slaves_cnt	= ARRAY_SIZE(omap44xx_fdif_slaves),
> +	.masters	= omap44xx_fdif_masters,
> +	.masters_cnt	= ARRAY_SIZE(omap44xx_fdif_masters),
> +};
> +
>  static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
>  
>  	/* dmm class */
> @@ -5593,6 +5671,9 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
>  	&omap44xx_wd_timer2_hwmod,
>  	&omap44xx_wd_timer3_hwmod,
>  
> +	/* fdif class */
> +	&omap44xx_fdif_hwmod,
> +
>  	NULL,
>  };
>  
> -- 
> 1.7.5.4
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-omap" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 


- Paul

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 1/8] omap4: introduce fdif(face detect module) hwmod
  2011-12-16  5:53   ` Paul Walmsley
@ 2011-12-16 14:54     ` Cousson, Benoit
  2011-12-19 21:48       ` Paul Walmsley
  0 siblings, 1 reply; 24+ messages in thread
From: Cousson, Benoit @ 2011-12-16 14:54 UTC (permalink / raw)
  To: Paul Walmsley
  Cc: Ming Lei, Mauro Carvalho Chehab, Tony Lindgren,
	Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media

Hi Paul,

On 12/16/2011 6:53 AM, Paul Walmsley wrote:
> Hi Benoît
>
> On Wed, 14 Dec 2011, Ming Lei wrote:
>
>> Signed-off-by: Ming Lei<ming.lei@canonical.com>

Acked-by: Benoit Cousson <b-cousson@ti.com>

>> ---
>>   arch/arm/mach-omap2/omap_hwmod_44xx_data.c |   81 ++++++++++++++++++++++++++++
>>   1 files changed, 81 insertions(+), 0 deletions(-)
>
> any comments on this patch?  I'd like to queue it if it looks good to you.

It looks good to me. The only minor comment is about fdif location in 
the list that should be sorted and thus cannot be after wd_timer2.

Regards,
Benoit

>
> - Paul
>
>>
>> diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
>> index 6cf21ee..30db754 100644
>> --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
>> +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
>> @@ -53,6 +53,7 @@ static struct omap_hwmod omap44xx_dmm_hwmod;
>>   static struct omap_hwmod omap44xx_dsp_hwmod;
>>   static struct omap_hwmod omap44xx_dss_hwmod;
>>   static struct omap_hwmod omap44xx_emif_fw_hwmod;
>> +static struct omap_hwmod omap44xx_fdif_hwmod;
>>   static struct omap_hwmod omap44xx_hsi_hwmod;
>>   static struct omap_hwmod omap44xx_ipu_hwmod;
>>   static struct omap_hwmod omap44xx_iss_hwmod;
>> @@ -354,6 +355,14 @@ static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
>>   	.user		= OCP_USER_MPU | OCP_USER_SDMA,
>>   };
>>
>> +/* fdif ->  l3_main_2 */
>> +static struct omap_hwmod_ocp_if omap44xx_fdif__l3_main_2 = {
>> +	.master		=&omap44xx_fdif_hwmod,
>> +	.slave		=&omap44xx_l3_main_2_hwmod,
>> +	.clk		= "l3_div_ck",
>> +	.user		= OCP_USER_MPU | OCP_USER_SDMA,
>> +};
>> +
>>   /* hsi ->  l3_main_2 */
>>   static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = {
>>   	.master		=&omap44xx_hsi_hwmod,
>> @@ -5444,6 +5453,75 @@ static struct omap_hwmod omap44xx_wd_timer3_hwmod = {
>>   	.slaves_cnt	= ARRAY_SIZE(omap44xx_wd_timer3_slaves),
>>   };
>>
>> +/* 'fdif' class */
>> +static struct omap_hwmod_class_sysconfig omap44xx_fdif_sysc = {
>> +	.rev_offs	= 0x0000,
>> +	.sysc_offs	= 0x0010,
>> +	.sysc_flags	= (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
>> +			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
>> +	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
>> +			   MSTANDBY_FORCE | MSTANDBY_NO |
>> +			   MSTANDBY_SMART),
>> +	.sysc_fields	=&omap_hwmod_sysc_type2,
>> +};
>> +
>> +static struct omap_hwmod_class omap44xx_fdif_hwmod_class = {
>> +	.name	= "fdif",
>> +	.sysc	=&omap44xx_fdif_sysc,
>> +};
>> +
>> +/*fdif*/
>> +static struct omap_hwmod_addr_space omap44xx_fdif_addrs[] = {
>> +	{
>> +		.pa_start	= 0x4a10a000,
>> +		.pa_end		= 0x4a10afff,
>> +		.flags		= ADDR_TYPE_RT
>> +	},
>> +	{ }
>> +};
>> +
>> +/* l4_cfg ->  fdif */
>> +static struct omap_hwmod_ocp_if omap44xx_l4_cfg__fdif = {
>> +	.master		=&omap44xx_l4_cfg_hwmod,
>> +	.slave		=&omap44xx_fdif_hwmod,
>> +	.clk		= "l4_div_ck",
>> +	.addr		= omap44xx_fdif_addrs,
>> +	.user		= OCP_USER_MPU | OCP_USER_SDMA,
>> +};
>> +
>> +/* fdif slave ports */
>> +static struct omap_hwmod_ocp_if *omap44xx_fdif_slaves[] = {
>> +	&omap44xx_l4_cfg__fdif,
>> +};
>> +static struct omap_hwmod_irq_info omap44xx_fdif_irqs[] = {
>> +	{ .irq = 69 + OMAP44XX_IRQ_GIC_START },
>> +	{ .irq = -1 }
>> +};
>> +
>> +/* fdif master ports */
>> +static struct omap_hwmod_ocp_if *omap44xx_fdif_masters[] = {
>> +	&omap44xx_fdif__l3_main_2,
>> +};
>> +
>> +static struct omap_hwmod omap44xx_fdif_hwmod = {
>> +	.name		= "fdif",
>> +	.class		=&omap44xx_fdif_hwmod_class,
>> +	.clkdm_name	= "iss_clkdm",
>> +	.mpu_irqs	= omap44xx_fdif_irqs,
>> +	.main_clk	= "fdif_fck",
>> +	.prcm = {
>> +		.omap4 = {
>> +			.clkctrl_offs = OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET,
>> +			.context_offs = OMAP4_RM_CAM_FDIF_CONTEXT_OFFSET,
>> +			.modulemode   = MODULEMODE_SWCTRL,
>> +		},
>> +	},
>> +	.slaves		= omap44xx_fdif_slaves,
>> +	.slaves_cnt	= ARRAY_SIZE(omap44xx_fdif_slaves),
>> +	.masters	= omap44xx_fdif_masters,
>> +	.masters_cnt	= ARRAY_SIZE(omap44xx_fdif_masters),
>> +};
>> +
>>   static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
>>
>>   	/* dmm class */
>> @@ -5593,6 +5671,9 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
>>   	&omap44xx_wd_timer2_hwmod,
>>   	&omap44xx_wd_timer3_hwmod,
>>
>> +	/* fdif class */
>> +	&omap44xx_fdif_hwmod,
>> +
>>   	NULL,
>>   };
>>
>> --
>> 1.7.5.4
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-omap" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>>
>
>
> - Paul


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 1/8] omap4: introduce fdif(face detect module) hwmod
  2011-12-16 14:54     ` Cousson, Benoit
@ 2011-12-19 21:48       ` Paul Walmsley
  0 siblings, 0 replies; 24+ messages in thread
From: Paul Walmsley @ 2011-12-19 21:48 UTC (permalink / raw)
  To: Cousson, Benoit
  Cc: Ming Lei, Mauro Carvalho Chehab, Tony Lindgren,
	Sylwester Nawrocki, Alan Cox, linux-omap, linux-arm-kernel,
	linux-kernel, linux-media

[-- Attachment #1: Type: TEXT/PLAIN, Size: 4954 bytes --]

Hi Benoît,

On Fri, 16 Dec 2011, Cousson, Benoit wrote:

> On 12/16/2011 6:53 AM, Paul Walmsley wrote:
> > Hi Benoît
> > 
> > On Wed, 14 Dec 2011, Ming Lei wrote:
> > 
> > > Signed-off-by: Ming Lei<ming.lei@canonical.com>
> 
> Acked-by: Benoit Cousson <b-cousson@ti.com>
> 
> > > ---
> > >   arch/arm/mach-omap2/omap_hwmod_44xx_data.c |   81
> > > ++++++++++++++++++++++++++++
> > >   1 files changed, 81 insertions(+), 0 deletions(-)
> > 
> > any comments on this patch?  I'd like to queue it if it looks good to you.
> 
> It looks good to me. The only minor comment is about fdif location in the list
> that should be sorted and thus cannot be after wd_timer2.

Thanks, patch updated to match the script output and queued.  Modified 
patch follows.


- Paul

From: Ming Lei <ming.lei@canonical.com>
Date: Mon, 19 Dec 2011 14:34:06 -0700
Subject: [PATCH] ARM: OMAP4: hwmod data: introduce fdif(face detect module)
 hwmod

Add hwmod data for the OMAP4 FDIF IP block.

Signed-off-by: Ming Lei <ming.lei@canonical.com>
Acked-by: Benoît Cousson <b-cousson@ti.com>
[paul@pwsan.com: rearranged to match script output; fixed FDIF end address to
 match script data; wrote trivial changelog]
Signed-off-by: Paul Walmsley <paul@pwsan.com>
---
 arch/arm/mach-omap2/omap_hwmod_44xx_data.c |   85 ++++++++++++++++++++++++++++
 1 files changed, 85 insertions(+), 0 deletions(-)

diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index daaf165..3ac4bf6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -53,6 +53,7 @@ static struct omap_hwmod omap44xx_dmm_hwmod;
 static struct omap_hwmod omap44xx_dsp_hwmod;
 static struct omap_hwmod omap44xx_dss_hwmod;
 static struct omap_hwmod omap44xx_emif_fw_hwmod;
+static struct omap_hwmod omap44xx_fdif_hwmod;
 static struct omap_hwmod omap44xx_hsi_hwmod;
 static struct omap_hwmod omap44xx_ipu_hwmod;
 static struct omap_hwmod omap44xx_iss_hwmod;
@@ -346,6 +347,14 @@ static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* fdif -> l3_main_2 */
+static struct omap_hwmod_ocp_if omap44xx_fdif__l3_main_2 = {
+	.master		= &omap44xx_fdif_hwmod,
+	.slave		= &omap44xx_l3_main_2_hwmod,
+	.clk		= "l3_div_ck",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* hsi -> l3_main_2 */
 static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = {
 	.master		= &omap44xx_hsi_hwmod,
@@ -1797,6 +1806,79 @@ static struct omap_hwmod omap44xx_dss_venc_hwmod = {
 };
 
 /*
+ * 'fdif' class
+ * face detection hw accelerator module
+ */
+
+static struct omap_hwmod_class_sysconfig omap44xx_fdif_sysc = {
+	.rev_offs	= 0x0000,
+	.sysc_offs	= 0x0010,
+	.sysc_flags	= (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
+			   SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+			   MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+	.sysc_fields	= &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap44xx_fdif_hwmod_class = {
+	.name	= "fdif",
+	.sysc	= &omap44xx_fdif_sysc,
+};
+
+/* fdif */
+static struct omap_hwmod_irq_info omap44xx_fdif_irqs[] = {
+	{ .irq = 69 + OMAP44XX_IRQ_GIC_START },
+	{ .irq = -1 }
+};
+
+/* fdif master ports */
+static struct omap_hwmod_ocp_if *omap44xx_fdif_masters[] = {
+	&omap44xx_fdif__l3_main_2,
+};
+
+static struct omap_hwmod_addr_space omap44xx_fdif_addrs[] = {
+	{
+		.pa_start	= 0x4a10a000,
+		.pa_end		= 0x4a10a1ff,
+		.flags		= ADDR_TYPE_RT
+	},
+	{ }
+};
+
+/* l4_cfg -> fdif */
+static struct omap_hwmod_ocp_if omap44xx_l4_cfg__fdif = {
+	.master		= &omap44xx_l4_cfg_hwmod,
+	.slave		= &omap44xx_fdif_hwmod,
+	.clk		= "l4_div_ck",
+	.addr		= omap44xx_fdif_addrs,
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* fdif slave ports */
+static struct omap_hwmod_ocp_if *omap44xx_fdif_slaves[] = {
+	&omap44xx_l4_cfg__fdif,
+};
+
+static struct omap_hwmod omap44xx_fdif_hwmod = {
+	.name		= "fdif",
+	.class		= &omap44xx_fdif_hwmod_class,
+	.clkdm_name	= "iss_clkdm",
+	.mpu_irqs	= omap44xx_fdif_irqs,
+	.main_clk	= "fdif_fck",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = OMAP4_CM_CAM_FDIF_CLKCTRL_OFFSET,
+			.context_offs = OMAP4_RM_CAM_FDIF_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_SWCTRL,
+		},
+	},
+	.slaves		= omap44xx_fdif_slaves,
+	.slaves_cnt	= ARRAY_SIZE(omap44xx_fdif_slaves),
+	.masters	= omap44xx_fdif_masters,
+	.masters_cnt	= ARRAY_SIZE(omap44xx_fdif_masters),
+};
+
+/*
  * 'gpio' class
  * general purpose io module
  */
@@ -5327,6 +5409,9 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
 	&omap44xx_dss_rfbi_hwmod,
 	&omap44xx_dss_venc_hwmod,
 
+	/* fdif class */
+	&omap44xx_fdif_hwmod,
+
 	/* gpio class */
 	&omap44xx_gpio1_hwmod,
 	&omap44xx_gpio2_hwmod,
-- 
1.7.7.3

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* RE: [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops
  2011-12-14 14:00 ` [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops Ming Lei
@ 2011-12-22  9:28   ` Marek Szyprowski
  2011-12-23  9:22     ` Ming Lei
  0 siblings, 1 reply; 24+ messages in thread
From: Marek Szyprowski @ 2011-12-22  9:28 UTC (permalink / raw)
  To: 'Ming Lei', 'Mauro Carvalho Chehab',
	'Tony Lindgren'
  Cc: 'Sylwester Nawrocki', 'Alan Cox',
	linux-omap, linux-arm-kernel, linux-kernel, linux-media,
	'Pawel Osciak'

Hello,

On Wednesday, December 14, 2011 3:00 PM Ming Lei wrote:

> DMA contig memory resource is very limited and precious, also
> accessing to it from CPU is very slow on some platform.
> 
> For some cases(such as the comming face detection driver), DMA Streaming
> buffer is enough, so introduce VIDEOBUF2_PAGE to allocate continuous
> physical memory but letting video device driver to handle DMA buffer mapping
> and unmapping things.
> 
> Signed-off-by: Ming Lei <ming.lei@canonical.com>

Could you elaborate a bit why do you think that DMA contig memory resource
is so limited? If dma_alloc_coherent fails because of the memory fragmentation,
the alloc_pages() call with order > 0 will also fail.

I understand that there might be some speed issues with coherent (uncached)
userspace mappings, but I would solve it in completely different way. The interface
for both coherent/uncached and non-coherent/cached contig allocator should be the
same, so exchanging them is easy and will not require changes in the driver.
I'm planning to introduce some design changes in memory allocator api and introduce
prepare and finish callbacks in allocator ops. I hope to post the rfc after
Christmas. For your face detection driver using standard dma-contig allocator
shouldn't be a big issue.

Your current implementation also abuses the design and api of videobuf2 memory
allocators. If the allocator needs to return a custom structure to the driver
you should use cookie method. vaddr is intended to provide only a pointer to
kernel virtual mapping, but you pass a struct page * there.

(snipped)

Best regards
-- 
Marek Szyprowski
Samsung Poland R&D Center




^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops
  2011-12-22  9:28   ` Marek Szyprowski
@ 2011-12-23  9:22     ` Ming Lei
  2011-12-23  9:34       ` Marek Szyprowski
  0 siblings, 1 reply; 24+ messages in thread
From: Ming Lei @ 2011-12-23  9:22 UTC (permalink / raw)
  To: Marek Szyprowski
  Cc: Mauro Carvalho Chehab, Tony Lindgren, Sylwester Nawrocki,
	Alan Cox, linux-arm-kernel, linux-kernel, linux-media,
	Pawel Osciak

On Thu, Dec 22, 2011 at 5:28 PM, Marek Szyprowski
<m.szyprowski@samsung.com> wrote:
> Hello,
>
> On Wednesday, December 14, 2011 3:00 PM Ming Lei wrote:
>
>> DMA contig memory resource is very limited and precious, also
>> accessing to it from CPU is very slow on some platform.
>>
>> For some cases(such as the comming face detection driver), DMA Streaming
>> buffer is enough, so introduce VIDEOBUF2_PAGE to allocate continuous
>> physical memory but letting video device driver to handle DMA buffer mapping
>> and unmapping things.
>>
>> Signed-off-by: Ming Lei <ming.lei@canonical.com>
>
> Could you elaborate a bit why do you think that DMA contig memory resource
> is so limited? If dma_alloc_coherent fails because of the memory fragmentation,
> the alloc_pages() call with order > 0 will also fail.

For example, on ARM, there is very limited kernel virtual address space reserved
for DMA coherent buffer mapping, the default size is about 2M if I
don't remember
mistakenly.

>
> I understand that there might be some speed issues with coherent (uncached)
> userspace mappings, but I would solve it in completely different way. The interface

Also there is poor performance inside kernel space, see [1]

> for both coherent/uncached and non-coherent/cached contig allocator should be the
> same, so exchanging them is easy and will not require changes in the driver.
> I'm planning to introduce some design changes in memory allocator api and introduce
> prepare and finish callbacks in allocator ops. I hope to post the rfc after
> Christmas. For your face detection driver using standard dma-contig allocator
> shouldn't be a big issue.
>
> Your current implementation also abuses the design and api of videobuf2 memory
> allocators. If the allocator needs to return a custom structure to the driver

I think returning vaddr is enough.

> you should use cookie method. vaddr is intended to provide only a pointer to
> kernel virtual mapping, but you pass a struct page * there.

No, __get_free_pages returns virtual address instead of 'struct page *'.


thanks,
--
Ming Lei

[1], http://marc.info/?t=131198148500001&r=1&w=2

^ permalink raw reply	[flat|nested] 24+ messages in thread

* RE: [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops
  2011-12-23  9:22     ` Ming Lei
@ 2011-12-23  9:34       ` Marek Szyprowski
  2011-12-23  9:51         ` Ming Lei
  0 siblings, 1 reply; 24+ messages in thread
From: Marek Szyprowski @ 2011-12-23  9:34 UTC (permalink / raw)
  To: 'Ming Lei'
  Cc: 'Mauro Carvalho Chehab', 'Tony Lindgren',
	'Sylwester Nawrocki', 'Alan Cox',
	linux-arm-kernel, linux-kernel, linux-media,
	'Pawel Osciak'

Hello,

On Friday, December 23, 2011 10:22 AM Ming Lei wrote:

> On Thu, Dec 22, 2011 at 5:28 PM, Marek Szyprowski
> <m.szyprowski@samsung.com> wrote:
> >> DMA contig memory resource is very limited and precious, also
> >> accessing to it from CPU is very slow on some platform.
> >>
> >> For some cases(such as the comming face detection driver), DMA Streaming
> >> buffer is enough, so introduce VIDEOBUF2_PAGE to allocate continuous
> >> physical memory but letting video device driver to handle DMA buffer mapping
> >> and unmapping things.
> >>
> >> Signed-off-by: Ming Lei <ming.lei@canonical.com>
> >
> > Could you elaborate a bit why do you think that DMA contig memory resource
> > is so limited? If dma_alloc_coherent fails because of the memory fragmentation,
> > the alloc_pages() call with order > 0 will also fail.
> 
> For example, on ARM, there is very limited kernel virtual address space reserved
> for DMA coherent buffer mapping, the default size is about 2M if I
> don't remember mistakenly.

It can be easily increased for particular boards, there is no problem with this.

> > I understand that there might be some speed issues with coherent (uncached)
> > userspace mappings, but I would solve it in completely different way. The interface
> 
> Also there is poor performance inside kernel space, see [1]

Your driver doesn't access video data inside kernel space, so this is also not an issue.
 
> > for both coherent/uncached and non-coherent/cached contig allocator should be the
> > same, so exchanging them is easy and will not require changes in the driver.
> > I'm planning to introduce some design changes in memory allocator api and introduce
> > prepare and finish callbacks in allocator ops. I hope to post the rfc after
> > Christmas. For your face detection driver using standard dma-contig allocator
> > shouldn't be a big issue.
> >
> > Your current implementation also abuses the design and api of videobuf2 memory
> > allocators. If the allocator needs to return a custom structure to the driver
> 
> I think returning vaddr is enough.
> 
> > you should use cookie method. vaddr is intended to provide only a pointer to
> > kernel virtual mapping, but you pass a struct page * there.
> 
> No, __get_free_pages returns virtual address instead of 'struct page *'.

Then you MUST use cookie for it. vaddr method should return kernel virtual address 
to the buffer video data. Some parts of videobuf2 relies on this - it is used by file
io emulator (read(), write() calls) and mmap equivalent for non-mmu systems.

Manual casting in the driver is also a bad idea, that's why there are helper functions
defined for both dma_contig and dma_sg allocators: vb2_dma_contig_plane_dma_addr() and
vb2_dma_sg_plane_desc().

Best regards
-- 
Marek Szyprowski
Samsung Poland R&D Center



^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops
  2011-12-23  9:34       ` Marek Szyprowski
@ 2011-12-23  9:51         ` Ming Lei
  2011-12-23 10:38           ` Marek Szyprowski
  0 siblings, 1 reply; 24+ messages in thread
From: Ming Lei @ 2011-12-23  9:51 UTC (permalink / raw)
  To: Marek Szyprowski
  Cc: Mauro Carvalho Chehab, Tony Lindgren, Sylwester Nawrocki,
	Alan Cox, linux-arm-kernel, linux-kernel, linux-media,
	Pawel Osciak

Hi,

On Fri, Dec 23, 2011 at 5:34 PM, Marek Szyprowski
<m.szyprowski@samsung.com> wrote:

>> For example, on ARM, there is very limited kernel virtual address space reserved
>> for DMA coherent buffer mapping, the default size is about 2M if I
>> don't remember mistakenly.
>
> It can be easily increased for particular boards, there is no problem with this.

It is not easily to increase it because there is very limited space reserved for
this purpose, see Documentation/arm/memory.txt. Also looks like it is
not configurable.

>
>> > I understand that there might be some speed issues with coherent (uncached)
>> > userspace mappings, but I would solve it in completely different way. The interface
>>
>> Also there is poor performance inside kernel space, see [1]
>
> Your driver doesn't access video data inside kernel space, so this is also not an issue.

Why not introduce it so that other drivers(include face detection) can
benefit with it? :-)

>> >
>> > Your current implementation also abuses the design and api of videobuf2 memory
>> > allocators. If the allocator needs to return a custom structure to the driver
>>
>> I think returning vaddr is enough.
>>
>> > you should use cookie method. vaddr is intended to provide only a pointer to
>> > kernel virtual mapping, but you pass a struct page * there.
>>
>> No, __get_free_pages returns virtual address instead of 'struct page *'.
>
> Then you MUST use cookie for it. vaddr method should return kernel virtual address
> to the buffer video data. Some parts of videobuf2 relies on this - it is used by file
> io emulator (read(), write() calls) and mmap equivalent for non-mmu systems.
>
> Manual casting in the driver is also a bad idea, that's why there are helper functions

I don't see any casts are needed. The dma address can be got from vaddr with
dma_map_* easily in drivers, see the usage on patch 8/8(media: video: introduce
omap4 face detection module driver).

> defined for both dma_contig and dma_sg allocators: vb2_dma_contig_plane_dma_addr() and
> vb2_dma_sg_plane_desc().

These two helpers are not needed and won't be provided by VIDEOBUF2_PAGE memops.

thanks,
--
Ming Lei

^ permalink raw reply	[flat|nested] 24+ messages in thread

* RE: [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops
  2011-12-23  9:51         ` Ming Lei
@ 2011-12-23 10:38           ` Marek Szyprowski
  2011-12-23 12:20             ` Ming Lei
  0 siblings, 1 reply; 24+ messages in thread
From: Marek Szyprowski @ 2011-12-23 10:38 UTC (permalink / raw)
  To: 'Ming Lei'
  Cc: 'Mauro Carvalho Chehab', 'Tony Lindgren',
	'Sylwester Nawrocki', 'Alan Cox',
	linux-arm-kernel, linux-kernel, linux-media,
	'Pawel Osciak'

Hello,

On Friday, December 23, 2011 10:51 AM Ming Lei wrote:

> On Fri, Dec 23, 2011 at 5:34 PM, Marek Szyprowski
> <m.szyprowski@samsung.com> wrote:
> 
> >> For example, on ARM, there is very limited kernel virtual address space reserved
> >> for DMA coherent buffer mapping, the default size is about 2M if I
> >> don't remember mistakenly.
> >
> > It can be easily increased for particular boards, there is no problem with this.
> 
> It is not easily to increase it because there is very limited space reserved for
> this purpose, see Documentation/arm/memory.txt. Also looks like it is
> not configurable.

It is really not a big issue to increase it by a few MBytes.
 
> >> > I understand that there might be some speed issues with coherent (uncached)
> >> > userspace mappings, but I would solve it in completely different way. The interface
> >>
> >> Also there is poor performance inside kernel space, see [1]
> >
> > Your driver doesn't access video data inside kernel space, so this is also not an issue.
> 
> Why not introduce it so that other drivers(include face detection) can
> benefit with it? :-)

We can get back into this once a driver which really benefits from comes.
 
> >> >
> >> > Your current implementation also abuses the design and api of videobuf2 memory
> >> > allocators. If the allocator needs to return a custom structure to the driver
> >>
> >> I think returning vaddr is enough.
> >>
> >> > you should use cookie method. vaddr is intended to provide only a pointer to
> >> > kernel virtual mapping, but you pass a struct page * there.
> >>
> >> No, __get_free_pages returns virtual address instead of 'struct page *'.
> >
> > Then you MUST use cookie for it. vaddr method should return kernel virtual address
> > to the buffer video data. Some parts of videobuf2 relies on this - it is used by file
> > io emulator (read(), write() calls) and mmap equivalent for non-mmu systems.
> >
> > Manual casting in the driver is also a bad idea, that's why there are helper functions
> 
> I don't see any casts are needed. The dma address can be got from vaddr with
> dma_map_* easily in drivers, see the usage on patch 8/8(media: video: introduce
> omap4 face detection module driver).

Sorry, but I won't accept such driver/allocator which abuses the defined API. I've already
pointed what vaddr method is used for.

> > defined for both dma_contig and dma_sg allocators: vb2_dma_contig_plane_dma_addr() and
> > vb2_dma_sg_plane_desc().
> 
> These two helpers are not needed and won't be provided by VIDEOBUF2_PAGE memops.

I gave the example. Your allocator should have something similar.

Best regards
-- 
Marek Szyprowski
Samsung Poland R&D Center




^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops
  2011-12-23 10:38           ` Marek Szyprowski
@ 2011-12-23 12:20             ` Ming Lei
  2012-01-10 10:20               ` Marek Szyprowski
  0 siblings, 1 reply; 24+ messages in thread
From: Ming Lei @ 2011-12-23 12:20 UTC (permalink / raw)
  To: Marek Szyprowski
  Cc: Mauro Carvalho Chehab, Tony Lindgren, Sylwester Nawrocki,
	Alan Cox, linux-arm-kernel, linux-kernel, linux-media,
	Pawel Osciak

Hi,

On Fri, Dec 23, 2011 at 6:38 PM, Marek Szyprowski
<m.szyprowski@samsung.com> wrote:
> Hello,
>
> On Friday, December 23, 2011 10:51 AM Ming Lei wrote:
>
>> On Fri, Dec 23, 2011 at 5:34 PM, Marek Szyprowski
>> <m.szyprowski@samsung.com> wrote:
>>
>> >> For example, on ARM, there is very limited kernel virtual address space reserved
>> >> for DMA coherent buffer mapping, the default size is about 2M if I
>> >> don't remember mistakenly.
>> >
>> > It can be easily increased for particular boards, there is no problem with this.
>>
>> It is not easily to increase it because there is very limited space reserved for
>> this purpose, see Documentation/arm/memory.txt. Also looks like it is
>> not configurable.
>
> It is really not a big issue to increase it by a few MBytes.

IMO, the extra few MBytes are not helpful for the issue at all, considered the
kernel virtual memory address fragmentation problem, you know there is only
several MBytes DMA coherent virtual address space on ARM.

>
>> >> > I understand that there might be some speed issues with coherent (uncached)
>> >> > userspace mappings, but I would solve it in completely different way. The interface
>> >>
>> >> Also there is poor performance inside kernel space, see [1]
>> >
>> > Your driver doesn't access video data inside kernel space, so this is also not an issue.
>>
>> Why not introduce it so that other drivers(include face detection) can
>> benefit with it? :-)
>
> We can get back into this once a driver which really benefits from comes.

In fact, streaming DMA is more widespread used than coherent DMA in
kernel, so why can't videobuf2 support streaming dma? :-)

You know under some situation, coherent DMA buffer is very slower than
other buffer to be accessed by CPU.

>
>> >> >
>> >> > Your current implementation also abuses the design and api of videobuf2 memory
>> >> > allocators. If the allocator needs to return a custom structure to the driver
>> >>
>> >> I think returning vaddr is enough.
>> >>
>> >> > you should use cookie method. vaddr is intended to provide only a pointer to
>> >> > kernel virtual mapping, but you pass a struct page * there.
>> >>
>> >> No, __get_free_pages returns virtual address instead of 'struct page *'.
>> >
>> > Then you MUST use cookie for it. vaddr method should return kernel virtual address
>> > to the buffer video data. Some parts of videobuf2 relies on this - it is used by file
>> > io emulator (read(), write() calls) and mmap equivalent for non-mmu systems.
>> >
>> > Manual casting in the driver is also a bad idea, that's why there are helper functions
>>
>> I don't see any casts are needed. The dma address can be got from vaddr with
>> dma_map_* easily in drivers, see the usage on patch 8/8(media: video: introduce
>> omap4 face detection module driver).
>
> Sorry, but I won't accept such driver/allocator which abuses the defined API. I've already
> pointed what vaddr method is used for.

Sorry, could you describe the abuse problem in a bit detail?

>
>> > defined for both dma_contig and dma_sg allocators: vb2_dma_contig_plane_dma_addr() and
>> > vb2_dma_sg_plane_desc().
>>
>> These two helpers are not needed and won't be provided by VIDEOBUF2_PAGE memops.
>
> I gave the example. Your allocator should have something similar.

I don't think the two helper are required for the VIDEOBUF2_PAGE memops, why
can't driver handle DMA mapping directly?


thanks,
--
Ming Lei

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 7/8] media: video: introduce object detection driver module
  2011-12-14 14:00 ` [RFC PATCH v2 7/8] media: video: introduce object detection driver module Ming Lei
@ 2011-12-29 17:16   ` Sylwester Nawrocki
  2012-01-04  8:13     ` Ming Lei
  0 siblings, 1 reply; 24+ messages in thread
From: Sylwester Nawrocki @ 2011-12-29 17:16 UTC (permalink / raw)
  To: Ming Lei
  Cc: Mauro Carvalho Chehab, Tony Lindgren, Alan Cox, linux-omap,
	linux-arm-kernel, linux-kernel, linux-media

Hi Ming,

On 12/14/2011 03:00 PM, Ming Lei wrote:
> This patch introduces object detection generic driver.
> 
> The driver is responsible for all v4l2 stuff, buffer management
> and other general things, and doesn't touch object detection hardware
> directly. Several interfaces are exported to low level drivers
> (such as the coming omap4 FD driver) which will communicate with
> object detection hw module.
> 
> So the driver will make driving object detection hw modules more
> easy.
> TODO:
> 	- implement object detection setting interfaces with v4l2
> 	controls or ext controls
> 
> Signed-off-by: Ming Lei <ming.lei@canonical.com>
> ---
> v2:
> 	- extend face detection driver to object detection driver
> 	- introduce subdevice and media entity
> 	- provide support to detect object from media HW
> ---
>  drivers/media/video/Kconfig       |    2 +
>  drivers/media/video/Makefile      |    1 +
>  drivers/media/video/odif/Kconfig  |    7 +
>  drivers/media/video/odif/Makefile |    1 +
>  drivers/media/video/odif/odif.c   |  890 +++++++++++++++++++++++++++++++++++++
>  drivers/media/video/odif/odif.h   |  157 +++++++
>  6 files changed, 1058 insertions(+), 0 deletions(-)
>  create mode 100644 drivers/media/video/odif/Kconfig
>  create mode 100644 drivers/media/video/odif/Makefile
>  create mode 100644 drivers/media/video/odif/odif.c
>  create mode 100644 drivers/media/video/odif/odif.h
> 
> diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
> index 5684a00..8740ee9 100644
> --- a/drivers/media/video/Kconfig
> +++ b/drivers/media/video/Kconfig
> @@ -1166,3 +1166,5 @@ config VIDEO_SAMSUNG_S5P_MFC
>  	    MFC 5.1 driver for V4L2.
>  
>  endif # V4L_MEM2MEM_DRIVERS
> +
> +source "drivers/media/video/odif/Kconfig"
> diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
> index bc797f2..259c8d8 100644
> --- a/drivers/media/video/Makefile
> +++ b/drivers/media/video/Makefile
> @@ -197,6 +197,7 @@ obj-$(CONFIG_VIDEO_IR_I2C)  += ir-kbd-i2c.o
>  obj-y	+= davinci/
>  
>  obj-$(CONFIG_ARCH_OMAP)	+= omap/
> +obj-$(CONFIG_ODIF)	+= odif/
>  
>  ccflags-y += -Idrivers/media/dvb/dvb-core
>  ccflags-y += -Idrivers/media/dvb/frontends
> diff --git a/drivers/media/video/odif/Kconfig b/drivers/media/video/odif/Kconfig
> new file mode 100644
> index 0000000..5090bd6
> --- /dev/null
> +++ b/drivers/media/video/odif/Kconfig
> @@ -0,0 +1,7 @@
> +config ODIF
> +	depends on VIDEO_DEV && VIDEO_V4L2
> +	select VIDEOBUF2_PAGE
> +	tristate "Object Detection module"
> +	help
> +	  The ODIF is a object detection module, which can be integrated into
> +	  some SoCs to detect objects in images or video.
> diff --git a/drivers/media/video/odif/Makefile b/drivers/media/video/odif/Makefile
> new file mode 100644
> index 0000000..a55ff66
> --- /dev/null
> +++ b/drivers/media/video/odif/Makefile
> @@ -0,0 +1 @@
> +obj-$(CONFIG_ODIF)		+= odif.o
> diff --git a/drivers/media/video/odif/odif.c b/drivers/media/video/odif/odif.c
> new file mode 100644
> index 0000000..381ab9d
> --- /dev/null
> +++ b/drivers/media/video/odif/odif.c
> @@ -0,0 +1,890 @@
> +/*
> + *      odif.c  --  object detection module driver
> + *
> + *      Copyright (C) 2011  Ming Lei (ming.lei@canonical.com)
> + *
> + *	This file is based on drivers/media/video/vivi.c.
> + *
> + *      This program is free software; you can redistribute it and/or modify
> + *      it under the terms of the GNU General Public License as published by
> + *      the Free Software Foundation; either version 2 of the License, or
> + *      (at your option) any later version.
> + *
> + *      This program is distributed in the hope that it will be useful,
> + *      but WITHOUT ANY WARRANTY; without even the implied warranty of
> + *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + *      GNU General Public License for more details.
> + *
> + *      You should have received a copy of the GNU General Public License
> + *      along with this program; if not, write to the Free Software
> + *      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
> + *
> + */
> +
> +/*****************************************************************************/
> +
> +#include <linux/module.h>
> +#include <linux/fs.h>
> +#include <linux/mm.h>
> +#include <linux/signal.h>
> +#include <linux/wait.h>
> +#include <linux/poll.h>
> +#include <linux/mman.h>
> +#include <linux/pm_runtime.h>
> +#include <linux/delay.h>
> +#include <linux/platform_device.h>
> +#include <linux/interrupt.h>
> +#include <asm/uaccess.h>
> +#include <asm/byteorder.h>
> +#include <asm/io.h>
> +#include "odif.h"
> +
> +#define	input_from_user(dev) \
> +	(dev->input == OD_INPUT_FROM_USER_SPACE)
> +
> +#define	DEFAULT_PENDING_RESULT_CNT	8
> +
> +static unsigned debug = 0;
> +module_param(debug, uint, 0644);
> +MODULE_PARM_DESC(debug, "activates debug info");
> +
> +static unsigned result_cnt_threshold = DEFAULT_PENDING_RESULT_CNT;
> +module_param(result_cnt_threshold, uint, 0644);
> +MODULE_PARM_DESC(result_cnt, "max pending result count");
> +
> +static LIST_HEAD(odif_devlist);
> +static unsigned video_nr = -1;
> +
> +int odif_open(struct file *file)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +
> +	kref_get(&dev->ref);
> +	return v4l2_fh_open(file);
> +}

Individual drivers may need to perform some initialization when
device node is opened. So I consider taking this possibility away
from them not a good idea.

> +
> +static unsigned int
> +odif_poll(struct file *file, struct poll_table_struct *wait)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	unsigned int mask = 0;
> +	unsigned long flags;
> +
> +	poll_wait(file, &dev->odif_dq.wq, wait);
> +
> +	spin_lock_irqsave(&dev->lock, flags);
> +	if ((file->f_mode & FMODE_READ) &&
> +		!list_empty(&dev->odif_dq.complete))
> +		mask |= POLLIN | POLLWRNORM;
> +	spin_unlock_irqrestore(&dev->lock, flags);
> +	return mask;
> +}
> +
> +static int odif_close(struct file *file)
> +{
> +	struct video_device  *vdev = video_devdata(file);
> +	struct odif_dev *dev = video_drvdata(file);
> +	int ret;
> +
> +	dprintk(dev, 1, "close called (dev=%s), file %p\n",
> +		video_device_node_name(vdev), file);
> +
> +	if (v4l2_fh_is_singular_file(file))
> +		vb2_queue_release(&dev->vbq);
> +
> +	ret = v4l2_fh_release(file);
> +	kref_put(&dev->ref, odif_release);
> +
> +	return ret;
> +}

Same comments as for open().

> +
> +static int odif_mmap(struct file *file, struct vm_area_struct *vma)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	int ret;
> +
> +	dprintk(dev, 1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
> +
> +	if (!input_from_user(dev))
> +		return -EBUSY;
> +
> +	ret = vb2_mmap(&dev->vbq, vma);
> +	dprintk(dev, 1, "vma start=0x%08lx, size=%ld, ret=%d\n",
> +		(unsigned long)vma->vm_start,
> +		(unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
> +		ret);
> +	return ret;
> +}
> +
> +static const struct v4l2_file_operations odif_fops = {
> +	.owner		= THIS_MODULE,
> +	.open           = odif_open,
> +	.release        = odif_close,
> +	.poll		= odif_poll,
> +	.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
> +	.mmap           = odif_mmap,
> +};
> +
> +/* ------------------------------------------------------------------
> +	IOCTL vidioc handling
> +   ------------------------------------------------------------------*/
> +static void free_result(struct v4l2_odif_result *result)
> +{
> +	kfree(result);
> +}
> +
> +static int odif_start_detect(struct odif_dev *dev)
> +{
> +	int ret;
> +
> +	dprintk(dev, 1, "%s\n", __func__);
> +
> +	ret = dev->ops->start_detect(dev);
> +
> +	dprintk(dev, 1, "returning from %s, ret is %d\n",
> +			__func__, ret);
> +	return ret;
> +}
> +
> +static void odif_stop_detect(struct odif_dev *dev)
> +{
> +	struct odif_dmaqueue *dma_q = &dev->odif_dq;
> +	unsigned long flags;
> +
> +	dprintk(dev, 1, "%s\n", __func__);
> +
> +	/*stop hardware first*/
> +	dev->ops->stop_detect(dev);
> +
> +	spin_lock_irqsave(&dev->lock, flags);
> +	/* Release all active buffers */
> +	while (!list_empty(&dma_q->active)) {
> +		struct odif_buffer *buf;
> +		buf = list_entry(dma_q->active.next, struct odif_buffer, list);
> +		list_del(&buf->list);
> +		spin_unlock_irqrestore(&dev->lock, flags);
> +		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
> +		dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
> +		spin_lock_irqsave(&dev->lock, flags);
> +	}
> +
> +	/* Release all complete detect result, so user space __must__ read
> +	 * the results before stream off*/
> +	while (!list_empty(&dma_q->complete)) {
> +		struct v4l2_odif_result *result;
> +		result = list_entry(dma_q->complete.next, struct v4l2_odif_result, list);
> +		list_del(&result->list);
> +		spin_unlock_irqrestore(&dev->lock, flags);
> +
> +		free_result(result);
> +		dprintk(dev, 2, "[frm_seq:%d] result removed\n", result->frm_seq);
> +		spin_lock_irqsave(&dev->lock, flags);
> +	}
> +	atomic_set(&dma_q->complete_cnt, 0);
> +
> +	spin_unlock_irqrestore(&dev->lock, flags);
> +}
> +static int vidioc_querycap(struct file *file, void  *priv,
> +					struct v4l2_capability *cap)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +
> +	strcpy(cap->driver, "odif");
> +	strcpy(cap->card, "odif");
> +	strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
> +	cap->capabilities = dev->ops->capability | V4L2_CAP_OBJ_DETECTION;

The reported capabilities are wrong, please see below.

> +
> +	return 0;
> +}
> +
> +static int vidioc_enum_fmt_vid_out(struct file *file, void  *priv,
> +					struct v4l2_fmtdesc *f)
> +{
> +	struct odif_fmt *fmt;
> +	struct odif_dev *dev = video_drvdata(file);
> +
> +	if (f->index >= dev->ops->fmt_cnt) {
> +		return -EINVAL;
> +	}

Single statement shouldn't have brackets around it.

> +
> +	fmt = &dev->ops->table[f->index];
> +
> +	strlcpy(f->description, fmt->name, sizeof(f->description));
> +	f->pixelformat = fmt->fourcc;
> +	return 0;
> +}
> +
> +static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
> +					struct v4l2_format *f)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +
> +	f->fmt.pix.width        = dev->s.width;
> +	f->fmt.pix.height       = dev->s.height;
> +	f->fmt.pix.field        = dev->s.field;
> +	f->fmt.pix.pixelformat  = dev->s.fmt->fourcc;
> +	f->fmt.pix.bytesperline =
> +		(f->fmt.pix.width * dev->s.fmt->depth) >> 3;
> +	f->fmt.pix.sizeimage =
> +		f->fmt.pix.height * f->fmt.pix.bytesperline;
> +	return 0;
> +}
> +
> +static int vidioc_reqbufs(struct file *file, void *priv,
> +			  struct v4l2_requestbuffers *p)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	dprintk(dev, 1, "%s\n", __func__);
> +
> +	if (!input_from_user(dev))
> +		return -EBUSY;
> +
> +	return vb2_reqbufs(&dev->vbq, p);
> +}
> +
> +static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	dprintk(dev, 1, "%s\n", __func__);
> +
> +	if (!input_from_user(dev))
> +		return -EBUSY;
> +
> +	return vb2_querybuf(&dev->vbq, p);
> +}
> +
> +static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	dprintk(dev, 1, "%s\n", __func__);
> +
> +	if (!input_from_user(dev))
> +		return -EBUSY;
> +
> +	return vb2_qbuf(&dev->vbq, p);
> +}
> +
> +static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	dprintk(dev, 1, "%s\n", __func__);
> +
> +	if (!input_from_user(dev))
> +		return -EBUSY;
> +
> +	return vb2_dqbuf(&dev->vbq, p, file->f_flags & O_NONBLOCK);
> +}
> +
> +static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	dprintk(dev, 1, "%s\n", __func__);
> +
> +	dev->working = 1;
> +
> +	if (input_from_user(dev))
> +		return vb2_streamon(&dev->vbq, i);
> +
> +	return odif_start_detect(dev);
> +}
> +
> +static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	dprintk(dev, 1, "%s\n", __func__);
> +
> +	dev->working = 0;
> +
> +	if (input_from_user(dev))
> +		return vb2_streamoff(&dev->vbq, i);
> +
> +	odif_stop_detect(dev);
> +
> +	return 0;
> +}
> +
> +static int vidioc_g_od_count(struct file *file, void *priv,
> +					struct v4l2_od_count *f)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	unsigned long flags;
> +	struct v4l2_odif_result *tmp;
> +	int ret = -EINVAL;
> +
> +	spin_lock_irqsave(&dev->lock, flags);
> +	list_for_each_entry(tmp, &dev->odif_dq.complete, list)
> +		if (tmp->frm_seq == f->frm_seq) {
> +			f->obj_cnt = tmp->obj_cnt;
> +			ret = 0;
> +			break;
> +		}
> +	spin_unlock_irqrestore(&dev->lock, flags);
> +
> +	return ret;
> +}
> +
> +static int vidioc_g_od_result(struct file *file, void *priv,
> +					struct v4l2_od_result *f)
> +{
> +	struct odif_dev *dev = video_drvdata(file);
> +	unsigned long flags;
> +	struct v4l2_odif_result *tmp;
> +	struct v4l2_odif_result *fr = NULL;
> +	unsigned int cnt = 0;
> +	int ret = -EINVAL;
> +
> +	spin_lock_irqsave(&dev->lock, flags);
> +	list_for_each_entry(tmp, &dev->odif_dq.complete, list)
> +		if (tmp->frm_seq == f->frm_seq) {
> +			fr = tmp;
> +			list_del(&tmp->list);
> +			break;
> +		}
> +	spin_unlock_irqrestore(&dev->lock, flags);
> +
> +	if (fr) {
> +		ret = 0;
> +		cnt = min(f->obj_cnt, fr->obj_cnt);
> +		if (cnt)
> +			memcpy(f->od, fr->objs,
> +				sizeof(struct v4l2_od_object) * cnt);
> +		f->obj_cnt = cnt;
> +
> +		free_result(fr);

Some drivers may be designed so they do not discard the result data
automatically once it is read by user application. AFAICS this module
doesn't allow such behaviour.

For instance, it might be required to discard the oldest results data
when the ring buffer gets filled in.

> +
> +		atomic_dec(&dev->odif_dq.complete_cnt);
> +	}
> +	return ret;
> +}
> +
> +static const struct v4l2_ioctl_ops odif_ioctl_ops = {
> +	.vidioc_querycap      = vidioc_querycap,
> +	.vidioc_enum_fmt_vid_out  = vidioc_enum_fmt_vid_out,
> +	.vidioc_g_fmt_vid_out     = vidioc_g_fmt_vid_out,
> +	.vidioc_reqbufs       = vidioc_reqbufs,
> +	.vidioc_querybuf      = vidioc_querybuf,
> +	.vidioc_qbuf          = vidioc_qbuf,
> +	.vidioc_dqbuf         = vidioc_dqbuf,
> +	.vidioc_streamon      = vidioc_streamon,
> +	.vidioc_streamoff     = vidioc_streamoff,
> +	.vidioc_g_od_count    = vidioc_g_od_count,
> +	.vidioc_g_od_result   = vidioc_g_od_result,
> +};
> +
> +static void odif_vdev_release(struct video_device *vdev)
> +{
> +	kfree(vdev->lock);
> +	video_device_release(vdev);
> +}
> +
> +static struct video_device odif_template = {
> +	.name		= "odif",
> +	.fops           = &odif_fops,
> +	.ioctl_ops 	= &odif_ioctl_ops,
> +	.release	= odif_vdev_release,
> +};
> +
> +
> +/* ------------------------------------------------------------------
> +	Videobuf operations
> +   ------------------------------------------------------------------*/
> +static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
> +				unsigned int *nbuffers, unsigned int *nplanes,
> +				unsigned int sizes[], void *alloc_ctxs[])
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vq);
> +	unsigned long size;
> +
> +	dprintk(dev, 1, "%s\n", __func__);
> +
> +	BUG_ON(!dev->s.fmt);
> +	size = (dev->s.width * dev->s.height * dev->s.fmt->depth) >> 3;
> +
> +	if (0 == *nbuffers)
> +		*nbuffers = 2;
> +	*nplanes = 1;
> +	sizes[0] = size;
> +
> +	dprintk(dev, 1, "%s, count=%d, size=%ld\n", __func__,
> +		*nbuffers, size);
> +
> +	return 0;
> +}
> +
> +static int buffer_init(struct vb2_buffer *vb)
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
> +
> +	/*
> +	 * This callback is called once per buffer, after its allocation.
> +	 *
> +	 * Vivi does not allow changing format during streaming, but it is
> +	 * possible to do so when streaming is paused (i.e. in streamoff state).
> +	 * Buffers however are not freed when going into streamoff and so
> +	 * buffer size verification has to be done in buffer_prepare, on each
> +	 * qbuf.
> +	 * It would be best to move verification code here to buf_init and
> +	 * s_fmt though.
> +	 */
> +	dprintk(dev, 1, "%s vaddr=%p\n", __func__,
> +			vb2_plane_vaddr(vb, 0));
> +
> +	return 0;
> +}

As I already mentioned this empty callback can be removed. Anyway IMO the
implementation of the buffer queue operations should be left to individual
drivers. Having them in generic module doesn't sound flexible enough.

> +
> +static int buffer_prepare(struct vb2_buffer *vb)
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
> +	struct odif_buffer *buf = container_of(vb, struct odif_buffer, vb);
> +	unsigned long size;
> +
> +	dprintk(dev, 1, "%s, field=%d\n", __func__, vb->v4l2_buf.field);
> +
> +	BUG_ON(!dev->s.fmt);
> +	size = (dev->s.width * dev->s.height * dev->s.fmt->depth) >> 3;
> +	if (vb2_plane_size(vb, 0) < size) {
> +		dprintk(dev, 1, "%s data will not fit into plane (%lu < %lu)\n",
> +				__func__, vb2_plane_size(vb, 0), size);
> +		return -EINVAL;
> +	}
> +
> +	vb2_set_plane_payload(&buf->vb, 0, size);
> +
> +	return 0;
> +}
> +
> +static int buffer_finish(struct vb2_buffer *vb)
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
> +	dprintk(dev, 1, "%s\n", __func__);
> +	return 0;
> +}
> +
> +static void buffer_cleanup(struct vb2_buffer *vb)
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
> +	dprintk(dev, 1, "%s\n", __func__);
> +}
> +
> +static void buffer_queue(struct vb2_buffer *vb)
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
> +	struct odif_buffer *buf = container_of(vb, struct odif_buffer, vb);
> +	struct odif_dmaqueue *dq = &dev->odif_dq;
> +	unsigned long flags = 0;
> +
> +	dprintk(dev, 1, "%s vaddr:%p\n", __func__,
> +			vb2_plane_vaddr(vb, 0));
> +
> +	spin_lock_irqsave(&dev->lock, flags);
> +	list_add_tail(&buf->list, &dq->active);
> +	spin_unlock_irqrestore(&dev->lock, flags);
> +
> +	if (vb->vb2_queue->streaming)
> +		dev->ops->submit_detect(dev);
> +}
> +
> +static int start_streaming(struct vb2_queue *vq, unsigned int count)
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vq);
> +	dprintk(dev, 1, "%s\n", __func__);
> +	return odif_start_detect(dev);
> +}
> +
> +/* abort streaming and wait for last buffer */
> +static int stop_streaming(struct vb2_queue *vq)
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vq);
> +	dprintk(dev, 1, "%s\n", __func__);
> +	odif_stop_detect(dev);
> +	return 0;
> +}
> +
> +static void odif_lock(struct vb2_queue *vq)
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vq);
> +
> +	mutex_lock(&dev->mutex);
> +}
> +
> +static void odif_unlock(struct vb2_queue *vq)
> +{
> +	struct odif_dev *dev = vb2_get_drv_priv(vq);
> +	mutex_unlock(&dev->mutex);
> +}
> +static struct vb2_ops odif_video_qops = {
> +	.queue_setup		= queue_setup,
> +	.buf_init		= buffer_init,
> +	.buf_prepare		= buffer_prepare,
> +	.buf_finish		= buffer_finish,
> +	.buf_cleanup		= buffer_cleanup,
> +	.buf_queue		= buffer_queue,
> +	.start_streaming	= start_streaming,
> +	.stop_streaming		= stop_streaming,
> +	.wait_prepare		= odif_unlock,
> +	.wait_finish		= odif_lock,

Not all of these ops will be always needed by every driver. Things would
be a bit simpler if we had left their implementation to the individual
drivers.

> +};
> +
> +/*only store one detection result for one frame*/

Could you do me a favour and in the future add whitespace after each "/*"
and before  "*/" in the comments ? Sorry, I'm getting nervous every time
I look at something like /*blah blah*/ ;) I also prefer starting comments with
big letter. Of course these are just my personal preferences;)

> +void odif_add_detection(struct odif_dev *dev,
> +		struct v4l2_odif_result *v4l2_fr)
> +{
> +	unsigned long flags;
> +	struct v4l2_odif_result *old = NULL;
> +	struct v4l2_odif_result *tmp;
> +
> +	spin_lock_irqsave(&dev->lock, flags);
> +	list_for_each_entry(tmp, &dev->odif_dq.complete, list)
> +		if (tmp->frm_seq == v4l2_fr->frm_seq) {
> +			old = tmp;
> +			list_del(&tmp->list);
> +			break;
> +		}
> +	list_add_tail(&v4l2_fr->list, &dev->odif_dq.complete);
> +	spin_unlock_irqrestore(&dev->lock, flags);
> +
> +	if (old)
> +		free_result(old);
> +	else
> +		atomic_inc(&dev->odif_dq.complete_cnt);
> +}
> +EXPORT_SYMBOL_GPL(odif_add_detection);
> +
> +struct v4l2_odif_result *odif_allocate_detection(struct odif_dev *dev,
> +		int cnt)
> +{
> +	struct v4l2_odif_result *result = NULL;
> +	unsigned long flags;
> +
> +	if (atomic_read(&dev->odif_dq.complete_cnt) >=
> +			result_cnt_threshold) {
> +		spin_lock_irqsave(&dev->lock, flags);
> +		result = list_entry(dev->odif_dq.complete.next,
> +					struct v4l2_odif_result, list);
> +		list_del(&result->list);
> +		spin_unlock_irqrestore(&dev->lock, flags);
> +
> +		atomic_dec(&dev->odif_dq.complete_cnt);
> +	}
> +
> +	if (!result)	goto allocate_result;
> +
> +	/* reuse the old one if count is matched */
> +	if (result->obj_cnt == cnt)
> +		goto out;
> +
> +	/*free the old result*/
> +	free_result(result);
> +
> +allocate_result:
> +	result = kzalloc(sizeof(*result) +
> +			cnt * sizeof(struct v4l2_od_object), GFP_ATOMIC);

I prefer not allocating memory in interrupt context like this, especially
as this can be avoided without significant effort and resources waste.

> +	if (result)
> +		result->obj_cnt = cnt;
> +out:
> +	return result;
> +}
> +EXPORT_SYMBOL_GPL(odif_allocate_detection);
> +
> +struct odif_buffer *odif_pick_up_buffer(struct odif_dev *dev)
> +{
> +	struct odif_buffer *buf = NULL;
> +	unsigned long flags;
> +
> +	WARN_ON(!input_from_user(dev));
> +
> +	spin_lock_irqsave(&dev->lock, flags);
> +	if (list_empty(&dev->odif_dq.active))
> +		goto out;
> +	buf = list_entry(dev->odif_dq.active.next,
> +				struct odif_buffer, list);
> +	list_del(&buf->list);
> +out:
> +	spin_unlock_irqrestore(&dev->lock, flags);
> +
> +	return buf;
> +}
> +EXPORT_SYMBOL_GPL(odif_pick_up_buffer);
> +
> +static struct v4l2_subdev_ops odif_subdev_ops = {
> +};
> +
> +static int odif_switch_link(struct odif_dev *dev, int next,
> +		const struct media_pad *remote)
> +{
> +	int ret = 0;
> +
> +	if (dev->input == next)
> +		return ret;
> +
> +	if (!(dev->ops->capability & V4L2_CAP_VIDEO_OUTPUT) &&
> +			next == OD_INPUT_FROM_USER_SPACE)
> +		return -EINVAL;
> +
> +	if (!(dev->ops->capability & V4L2_CAP_VIDEO_CAPTURE) &&
> +			next == OD_INPUT_FROM_MEDIA_HW)
> +		return -EINVAL;
> +
> +	mutex_lock(dev->vfd->lock);
> +	if (dev->working) {
> +		ret = -EBUSY;
> +		goto out;
> +	}
> +
> +	if (dev->ops->set_input)
> +		dev->ops->set_input(dev, next, remote);
> +	else
> +		ret = -EINVAL;
> +out:
> +	mutex_unlock(dev->vfd->lock);
> +	return ret;
> +}
> +
> +static int odif_entity_link_setup(struct media_entity *entity,
> +			  const struct media_pad *local,
> +			  const struct media_pad *remote, u32 flags)
> +{
> +	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
> +	struct odif_dev *dev = v4l2_get_subdevdata(sd);
> +	u32    type = media_entity_type(remote->entity);
> +	int ret, next;
> +
> +	/*link user space video to object detection*/
> +	if (remote == &dev->entity[1].pads[0])
> +		next = OD_INPUT_FROM_USER_SPACE;
> +	else if (type == MEDIA_ENT_T_V4L2_SUBDEV)
> +		next = OD_INPUT_FROM_MEDIA_HW;
> +	else
> +		next = dev->input;
> +
> +	printk("%s:%d next=%d\n", __func__, __LINE__, next);
> +	ret = odif_switch_link(dev, next, remote);
> +
> +	return ret;
> +}
> +
> +struct media_entity_operations odif_entity_ops = {
> +	.link_setup = odif_entity_link_setup,
> +};
> +
> +static void odif_cleanup_entities(struct odif_dev *odif)
> +{
> +
> +	v4l2_device_unregister_subdev(&odif->entity[0].subdev);
> +	v4l2_device_unregister_subdev(&odif->entity[1].subdev);
> +
> +	media_entity_cleanup(&odif->entity[0].subdev.entity);
> +	media_entity_cleanup(&odif->entity[1].subdev.entity);
> +}
> +
> +static int odif_init_entities(struct odif_dev *odif)
> +{
> +	const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_DYNAMIC;
> +	int ret;
> +	struct odif_entity *entity;
> +	struct media_entity *source;
> +	struct media_entity *sink;
> +
> +	/*entity[0] is the entity for object detection hw*/
> +	entity = &odif->entity[0];
> +	entity->num_links = 2;
> +	entity->num_pads = 1;
> +	entity->pads[0].flags = MEDIA_PAD_FL_SINK;
> +
> +	v4l2_subdev_init(&entity->subdev, &odif_subdev_ops);
> +	v4l2_set_subdevdata(&entity->subdev, odif);
> +	strlcpy(entity->subdev.name, "object detect",
> +			sizeof(entity->subdev.name));
> +	ret = media_entity_init(&entity->subdev.entity,
> +		entity->num_pads, entity->pads, 1);
> +
> +	if (ret)
> +		goto out;
> +
> +	/*entity[1] is the video entity which sources the user space video*/
> +	entity = &odif->entity[1];
> +	entity->num_links = 1;
> +	entity->num_pads = 1;
> +	entity->pads[0].flags = MEDIA_PAD_FL_SOURCE;
> +
> +	v4l2_subdev_init(&entity->subdev, &odif_subdev_ops);
> +	v4l2_set_subdevdata(&entity->subdev, odif);
> +	strlcpy(entity->subdev.name, "user space video",
> +			sizeof(entity->subdev.name));
> +	ret = media_entity_init(&entity->subdev.entity,
> +		entity->num_pads, entity->pads, 0);
> +
> +	/* create the default link */
> +	source = &odif->entity[1].subdev.entity;
> +	sink = &odif->entity[0].subdev.entity;
> +	sink->ops = &odif_entity_ops;
> +	ret = media_entity_create_link(source, 0,
> +				       sink, 0, flags);
> +	if (ret)
> +		goto out;
> +
> +	/* init the default hw link*/
> +	if (odif->ops->set_input)
> +		ret = odif->ops->set_input(odif,
> +			OD_INPUT_FROM_USER_SPACE,
> +			&odif->entity[1].pads[0]);
> +	if (ret)
> +		goto out;
> +
> +	odif->input = OD_INPUT_FROM_USER_SPACE;
> +
> +	/* register the two subdevices */
> +	ret = v4l2_device_register_subdev(&odif->v4l2_dev,
> +			&odif->entity[0].subdev);
> +	if (ret)
> +		goto out;
> +
> +	ret = v4l2_device_register_subdev(&odif->v4l2_dev,
> +			&odif->entity[1].subdev);

You're creating v4l2 subdevice but not all drivers would require it.
Also individual drivers will usually manage connections between the media
entities on their own. So IMHO this module goes a little to far on fixing
up driver's architecture.

Also what are there two subdevs needed for ?

> +	if (ret)
> +		goto out;
> +out:
> +	return ret;
> +}
> +
> +void odif_release(struct kref *ref)
> +{
> +	struct odif_dev *dev = container_of(ref, struct odif_dev, ref);
> +
> +	v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
> +		video_device_node_name(dev->vfd));
> +
> +	list_del(&dev->odif_devlist);
> +
> +	odif_cleanup_entities(dev);
> +	video_unregister_device(dev->vfd);
> +	v4l2_device_unregister(&dev->v4l2_dev);
> +	kfree(dev);
> +}
> +EXPORT_SYMBOL_GPL(odif_release);
> +
> +int odif_create_instance(struct device *parent, int priv_size,
> +		struct odif_ops *ops, struct odif_dev **odif_dev)
> +{
> +	struct odif_dev *dev;
> +	struct video_device *vfd;
> +	struct vb2_queue *q;
> +	int ret, len;
> +	struct mutex	*vmutex;
> +
> +	dev = kzalloc(sizeof(*dev) + priv_size, GFP_KERNEL);
> +	if (!dev) {
> +		ret = -ENOMEM;
> +		goto out;
> +	}
> +
> +	kref_init(&dev->ref);
> +	dev->ops = ops;
> +	dev->dev = parent;
> +
> +	len = snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
> +			"%s", "odif");
> +	dev->v4l2_dev.name[len] = '\0';

There is no need for tricks like that, snprintf() already ensures the string
has trailing '\0' appended to it.

> +
> +	ret = v4l2_device_register(dev->dev, &dev->v4l2_dev);
> +	if (ret)
> +		goto free_dev;
> +
> +	/* initialize locks */
> +	spin_lock_init(&dev->lock);
> +
> +	/* initialize queue */
> +	q = &dev->vbq;
> +	memset(q, 0, sizeof(dev->vbq));
> +	q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
> +	q->io_modes = VB2_MMAP;
> +	q->drv_priv = dev;
> +	q->buf_struct_size = sizeof(struct odif_buffer);
> +	q->ops = &odif_video_qops;
> +	q->mem_ops = &vb2_page_memops;
> +
> +	vb2_queue_init(q);
> +
> +	mutex_init(&dev->mutex);
> +
> +	/* init video dma queues */
> +	atomic_set(&dev->odif_dq.complete_cnt, 0);
> +	INIT_LIST_HEAD(&dev->odif_dq.active);
> +	INIT_LIST_HEAD(&dev->odif_dq.complete);
> +	init_waitqueue_head(&dev->odif_dq.wq);
> +
> +	ret = -ENOMEM;
> +	vfd = video_device_alloc();
> +	if (!vfd)
> +		goto unreg_dev;
> +
> +	*vfd = odif_template;
> +	vfd->debug = debug;
> +	vfd->v4l2_dev = &dev->v4l2_dev;
> +	set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
> +
> +	vmutex = kzalloc(sizeof(struct mutex), GFP_KERNEL);
> +	if (!vmutex)
> +		goto err_alloc_mutex;
> +
> +	mutex_init(vmutex);
> +	/*
> +	 * Provide a mutex to v4l2 core. It will be used to protect
> +	 * all fops and v4l2 ioctls.
> +	 */
> +	vfd->lock = vmutex;
> +
> +	ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
> +	if (ret < 0)
> +		goto rel_vdev;
> +
> +	if (video_nr != -1)
> +		video_nr++;
> +
> +	dev->vfd = vfd;
> +	video_set_drvdata(vfd, dev);
> +
> +	ret = odif_init_entities(dev);
> +	if (ret)
> +		goto unreg_video;
> +
> +	/* Now that everything is fine, let's add it to device list */
> +	list_add_tail(&dev->odif_devlist, &odif_devlist);
> +
> +	v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
> +		  video_device_node_name(vfd));
> +
> +	*odif_dev = dev;
> +	return 0;
> +
> +unreg_video:
> +	video_unregister_device(vfd);
> +rel_vdev:
> +	kfree(vmutex);
> +err_alloc_mutex:
> +	video_device_release(vfd);
> +unreg_dev:
> +	v4l2_device_unregister(&dev->v4l2_dev);
> +free_dev:
> +	kfree(dev);
> +out:
> +	pr_err("%s: error, ret=%d", __func__, ret);
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(odif_create_instance);
> +
> +static int __init odif_init(void)
> +{
> +	return 0;
> +}
> +
> +static void __exit odif_exit(void)
> +{
> +}
> +
> +module_init(odif_init);
> +module_exit(odif_exit);
> +
> +MODULE_DESCRIPTION("object detection interface function module");
> +MODULE_AUTHOR("Ming Lei");
> +MODULE_LICENSE("GPL");
> diff --git a/drivers/media/video/odif/odif.h b/drivers/media/video/odif/odif.h
> new file mode 100644
> index 0000000..25c4788
> --- /dev/null
> +++ b/drivers/media/video/odif/odif.h
> @@ -0,0 +1,157 @@
> +#ifndef _LINUX_MEDIA_ODIF_H
> +#define _LINUX_MEDIA_ODIF_H
> +
> +#include <linux/types.h>
> +#include <linux/magic.h>
> +#include <linux/errno.h>
> +#include <linux/kref.h>
> +#include <linux/kernel.h>
> +#include <media/v4l2-common.h>
> +#include <linux/videodev2.h>
> +#include <media/media-entity.h>
> +#include <media/videobuf2-page.h>
> +#include <media/v4l2-device.h>
> +#include <media/v4l2-ioctl.h>
> +#include <media/v4l2-ctrls.h>
> +#include <media/v4l2-fh.h>
> +#include <media/v4l2-event.h>
> +
> +#define MAX_OBJ_COUNT		40
> +
> +#define OBJ_DIR_UP		0
> +#define OBJ_DIR_RIGHT		1
> +#define OBJ_DIR_LIFT		2
> +#define OBJ_DIR_DOWN		3
> +
> +
> +#define	OD_INPUT_FROM_USER_SPACE	1
> +#define	OD_INPUT_FROM_MEDIA_HW		2
> +
> +struct odif_fmt {
> +	char  *name;
> +	u32   fourcc;          /* v4l2 format id */
> +	u32   depth;
> +	u32   width, height;
> +};
> +
> +struct odif_setting {
> +	struct odif_fmt            *fmt;
> +	enum v4l2_field            field;
> +
> +	/* minimize object size to be detected, unit: pixel */
> +	u32 			min_obj_size;
> +	u32			obj_dir;
> +
> +	u32			startx, starty;
> +	u32			sizex, sizey;
> +
> +	int			lhit;
> +
> +	u32			width, height;
> +};
> +
> +/* buffer for one video frame */
> +struct odif_buffer {
> +	/* common v4l buffer stuff -- must be first */
> +	struct vb2_buffer	vb;
> +	struct list_head	list;
> +};
> +
> +
> +struct v4l2_odif_result {
> +	struct list_head	list;
> +
> +	/*frame sequence*/
> +	u32			frm_seq;
> +	u32			obj_cnt;
> +	struct v4l2_od_object	objs[0];
> +};
> +
> +struct odif_dmaqueue {
> +	atomic_t		complete_cnt;
> +	struct list_head	complete;
> +	struct list_head	active;
> +	wait_queue_head_t	wq;
> +};
> +
> +struct odif_entity {
> +	/* Media controller-related fields. */
> +	struct v4l2_subdev subdev;
> +	unsigned int num_pads;
> +	unsigned int num_links;
> +
> +	/*only one sink pad*/
> +	struct media_pad pads[1];
> +
> +	/* TODO: odif controls */
> +};
> +
> +struct odif_dev {
> +	struct kref		ref;
> +	struct device		*dev;
> +
> +	struct list_head        odif_devlist;
> +	struct v4l2_device	v4l2_dev;
> +	struct vb2_queue        vbq;
> +	struct mutex            mutex;
> +	spinlock_t		lock;
> +
> +	/* media controller entity*/
> +	struct odif_entity	entity[2];
> +
> +	struct video_device	*vfd;
> +	struct odif_dmaqueue	odif_dq;
> +	int	working;
> +
> +	int	input;
> +
> +	/* setting */
> +	struct odif_setting	s;
> +
> +	struct odif_ops	*ops;
> +
> +	unsigned long	priv[0];
> +};
> +
> +/**
> + * struct odif_ops - interface between odif and low level hw driver
> + * @capability:	extra capability except for V4L2_CAP_OBJ_DETECTION
> + * 	V4L2_CAP_STREAMING: mandatory, start/stop detect is triggered
> + * 		by streaming on/off
> + * 	V4L2_CAP_VIDEO_OUTPUT: hw can support to detect objects from
> + * 		user space video input
> + *	V4L2_CAP_VIDEO_CAPTURE: hw can support to detect objects from
> + *		internal bus, this doesn't mean capture is capable

No, we can't be reinterpreting V4L2_CAP_VIDEO_CAPTURE flag this way.
The information how data can be fed to an object detection module should
be oxposed at a media device level. The media device driver an object
detection device belongs to should create proper links that application
may then activate.

Such interpretation of V4L2_CAP_VIDEO_CAPTURE violates the spec and would
just confuse applications that query device nodes. Nack.

> + *
> + * @table:	supported format table
> + * @fmt_cnt:	supported format count
> + * @start_detect:	start_detect callback
> + * @stop_detect:	stop_detect callback
> + * @submit_detect:	submit_detect callback
> + * @set_input:	change video input source
> + */
> +struct odif_ops {
> +	u32	capability;
> +	struct odif_fmt *table;
> +	int fmt_cnt;
> +	int (*start_detect)(struct odif_dev *odif);
> +	int (*stop_detect)(struct odif_dev *odif);
> +	int (*submit_detect)(struct odif_dev *odif);
> +	int (*set_input)(struct odif_dev *odif, int next,
> +			const struct media_pad *remote);
> +};
> +
> +#define dprintk(dev, level, fmt, arg...) \
> +	v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
> +
> +
> +extern int odif_create_instance(struct device *parent, int priv_size,
> +		struct odif_ops *ops, struct odif_dev **dev);
> +extern void odif_release(struct kref *ref);
> +extern void odif_add_detection(struct odif_dev *dev,
> +		struct v4l2_odif_result *v4l2_fr);
> +extern struct v4l2_odif_result *odif_allocate_detection(
> +		struct odif_dev *dev, int cnt);
> +extern struct odif_buffer *odif_pick_up_buffer(struct odif_dev *dev);
> +
> +#endif /* _LINUX_MEDIA_ODIF_H */

I suggest you to merge this module with next patch and remove what's
unnecessary for OMAP4 FDIF. IMHO creating generic object detection
module doesn't make sense, given the complexity of hardware. We have
already the required building blocks in v4l2 core, what we need is
specification of Face Detection interface semantics, which should be
eventually added to the V4L DocBook.
The object detection ioctls need to be adjusted to support camera sensors
with face detection capability, I'll comment on patch 6/8 about this.

-- 
Thanks,
Sylwester

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 7/8] media: video: introduce object detection driver module
  2011-12-29 17:16   ` Sylwester Nawrocki
@ 2012-01-04  8:13     ` Ming Lei
  2012-01-17 21:05       ` Sylwester Nawrocki
  0 siblings, 1 reply; 24+ messages in thread
From: Ming Lei @ 2012-01-04  8:13 UTC (permalink / raw)
  To: Sylwester Nawrocki
  Cc: Mauro Carvalho Chehab, Tony Lindgren, Alan Cox, linux-omap,
	linux-arm-kernel, linux-kernel, linux-media

Hi Sylwester,

Thanks for your review.

On Fri, Dec 30, 2011 at 1:16 AM, Sylwester Nawrocki <snjw23@gmail.com> wrote:
> Hi Ming,
>
> On 12/14/2011 03:00 PM, Ming Lei wrote:
>> This patch introduces object detection generic driver.
>>
>> The driver is responsible for all v4l2 stuff, buffer management
>> and other general things, and doesn't touch object detection hardware
>> directly. Several interfaces are exported to low level drivers
>> (such as the coming omap4 FD driver) which will communicate with
>> object detection hw module.
>>
>> So the driver will make driving object detection hw modules more
>> easy.
>> TODO:
>>       - implement object detection setting interfaces with v4l2
>>       controls or ext controls
>>
>> Signed-off-by: Ming Lei <ming.lei@canonical.com>
>> ---
>> v2:
>>       - extend face detection driver to object detection driver
>>       - introduce subdevice and media entity
>>       - provide support to detect object from media HW
>> ---
>>  drivers/media/video/Kconfig       |    2 +
>>  drivers/media/video/Makefile      |    1 +
>>  drivers/media/video/odif/Kconfig  |    7 +
>>  drivers/media/video/odif/Makefile |    1 +
>>  drivers/media/video/odif/odif.c   |  890 +++++++++++++++++++++++++++++++++++++
>>  drivers/media/video/odif/odif.h   |  157 +++++++
>>  6 files changed, 1058 insertions(+), 0 deletions(-)
>>  create mode 100644 drivers/media/video/odif/Kconfig
>>  create mode 100644 drivers/media/video/odif/Makefile
>>  create mode 100644 drivers/media/video/odif/odif.c
>>  create mode 100644 drivers/media/video/odif/odif.h
>>
>> diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
>> index 5684a00..8740ee9 100644
>> --- a/drivers/media/video/Kconfig
>> +++ b/drivers/media/video/Kconfig
>> @@ -1166,3 +1166,5 @@ config VIDEO_SAMSUNG_S5P_MFC
>>           MFC 5.1 driver for V4L2.
>>
>>  endif # V4L_MEM2MEM_DRIVERS
>> +
>> +source "drivers/media/video/odif/Kconfig"
>> diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
>> index bc797f2..259c8d8 100644
>> --- a/drivers/media/video/Makefile
>> +++ b/drivers/media/video/Makefile
>> @@ -197,6 +197,7 @@ obj-$(CONFIG_VIDEO_IR_I2C)  += ir-kbd-i2c.o
>>  obj-y        += davinci/
>>
>>  obj-$(CONFIG_ARCH_OMAP)      += omap/
>> +obj-$(CONFIG_ODIF)   += odif/
>>
>>  ccflags-y += -Idrivers/media/dvb/dvb-core
>>  ccflags-y += -Idrivers/media/dvb/frontends
>> diff --git a/drivers/media/video/odif/Kconfig b/drivers/media/video/odif/Kconfig
>> new file mode 100644
>> index 0000000..5090bd6
>> --- /dev/null
>> +++ b/drivers/media/video/odif/Kconfig
>> @@ -0,0 +1,7 @@
>> +config ODIF
>> +     depends on VIDEO_DEV && VIDEO_V4L2
>> +     select VIDEOBUF2_PAGE
>> +     tristate "Object Detection module"
>> +     help
>> +       The ODIF is a object detection module, which can be integrated into
>> +       some SoCs to detect objects in images or video.
>> diff --git a/drivers/media/video/odif/Makefile b/drivers/media/video/odif/Makefile
>> new file mode 100644
>> index 0000000..a55ff66
>> --- /dev/null
>> +++ b/drivers/media/video/odif/Makefile
>> @@ -0,0 +1 @@
>> +obj-$(CONFIG_ODIF)           += odif.o
>> diff --git a/drivers/media/video/odif/odif.c b/drivers/media/video/odif/odif.c
>> new file mode 100644
>> index 0000000..381ab9d
>> --- /dev/null
>> +++ b/drivers/media/video/odif/odif.c
>> @@ -0,0 +1,890 @@
>> +/*
>> + *      odif.c  --  object detection module driver
>> + *
>> + *      Copyright (C) 2011  Ming Lei (ming.lei@canonical.com)
>> + *
>> + *   This file is based on drivers/media/video/vivi.c.
>> + *
>> + *      This program is free software; you can redistribute it and/or modify
>> + *      it under the terms of the GNU General Public License as published by
>> + *      the Free Software Foundation; either version 2 of the License, or
>> + *      (at your option) any later version.
>> + *
>> + *      This program is distributed in the hope that it will be useful,
>> + *      but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
>> + *      GNU General Public License for more details.
>> + *
>> + *      You should have received a copy of the GNU General Public License
>> + *      along with this program; if not, write to the Free Software
>> + *      Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
>> + *
>> + */
>> +
>> +/*****************************************************************************/
>> +
>> +#include <linux/module.h>
>> +#include <linux/fs.h>
>> +#include <linux/mm.h>
>> +#include <linux/signal.h>
>> +#include <linux/wait.h>
>> +#include <linux/poll.h>
>> +#include <linux/mman.h>
>> +#include <linux/pm_runtime.h>
>> +#include <linux/delay.h>
>> +#include <linux/platform_device.h>
>> +#include <linux/interrupt.h>
>> +#include <asm/uaccess.h>
>> +#include <asm/byteorder.h>
>> +#include <asm/io.h>
>> +#include "odif.h"
>> +
>> +#define      input_from_user(dev) \
>> +     (dev->input == OD_INPUT_FROM_USER_SPACE)
>> +
>> +#define      DEFAULT_PENDING_RESULT_CNT      8
>> +
>> +static unsigned debug = 0;
>> +module_param(debug, uint, 0644);
>> +MODULE_PARM_DESC(debug, "activates debug info");
>> +
>> +static unsigned result_cnt_threshold = DEFAULT_PENDING_RESULT_CNT;
>> +module_param(result_cnt_threshold, uint, 0644);
>> +MODULE_PARM_DESC(result_cnt, "max pending result count");
>> +
>> +static LIST_HEAD(odif_devlist);
>> +static unsigned video_nr = -1;
>> +
>> +int odif_open(struct file *file)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +
>> +     kref_get(&dev->ref);
>> +     return v4l2_fh_open(file);
>> +}
>
> Individual drivers may need to perform some initialization when
> device node is opened. So I consider taking this possibility away
> from them not a good idea.

OK, it can be handled easily by introducing new callbacks(such as.
.open_detect and .close_detect for close) in struct odif_ops.

>
>> +
>> +static unsigned int
>> +odif_poll(struct file *file, struct poll_table_struct *wait)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     unsigned int mask = 0;
>> +     unsigned long flags;
>> +
>> +     poll_wait(file, &dev->odif_dq.wq, wait);
>> +
>> +     spin_lock_irqsave(&dev->lock, flags);
>> +     if ((file->f_mode & FMODE_READ) &&
>> +             !list_empty(&dev->odif_dq.complete))
>> +             mask |= POLLIN | POLLWRNORM;
>> +     spin_unlock_irqrestore(&dev->lock, flags);
>> +     return mask;
>> +}
>> +
>> +static int odif_close(struct file *file)
>> +{
>> +     struct video_device  *vdev = video_devdata(file);
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     int ret;
>> +
>> +     dprintk(dev, 1, "close called (dev=%s), file %p\n",
>> +             video_device_node_name(vdev), file);
>> +
>> +     if (v4l2_fh_is_singular_file(file))
>> +             vb2_queue_release(&dev->vbq);
>> +
>> +     ret = v4l2_fh_release(file);
>> +     kref_put(&dev->ref, odif_release);
>> +
>> +     return ret;
>> +}
>
> Same comments as for open().

Same above.

>
>> +
>> +static int odif_mmap(struct file *file, struct vm_area_struct *vma)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     int ret;
>> +
>> +     dprintk(dev, 1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
>> +
>> +     if (!input_from_user(dev))
>> +             return -EBUSY;
>> +
>> +     ret = vb2_mmap(&dev->vbq, vma);
>> +     dprintk(dev, 1, "vma start=0x%08lx, size=%ld, ret=%d\n",
>> +             (unsigned long)vma->vm_start,
>> +             (unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
>> +             ret);
>> +     return ret;
>> +}
>> +
>> +static const struct v4l2_file_operations odif_fops = {
>> +     .owner          = THIS_MODULE,
>> +     .open           = odif_open,
>> +     .release        = odif_close,
>> +     .poll           = odif_poll,
>> +     .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
>> +     .mmap           = odif_mmap,
>> +};
>> +
>> +/* ------------------------------------------------------------------
>> +     IOCTL vidioc handling
>> +   ------------------------------------------------------------------*/
>> +static void free_result(struct v4l2_odif_result *result)
>> +{
>> +     kfree(result);
>> +}
>> +
>> +static int odif_start_detect(struct odif_dev *dev)
>> +{
>> +     int ret;
>> +
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +
>> +     ret = dev->ops->start_detect(dev);
>> +
>> +     dprintk(dev, 1, "returning from %s, ret is %d\n",
>> +                     __func__, ret);
>> +     return ret;
>> +}
>> +
>> +static void odif_stop_detect(struct odif_dev *dev)
>> +{
>> +     struct odif_dmaqueue *dma_q = &dev->odif_dq;
>> +     unsigned long flags;
>> +
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +
>> +     /*stop hardware first*/
>> +     dev->ops->stop_detect(dev);
>> +
>> +     spin_lock_irqsave(&dev->lock, flags);
>> +     /* Release all active buffers */
>> +     while (!list_empty(&dma_q->active)) {
>> +             struct odif_buffer *buf;
>> +             buf = list_entry(dma_q->active.next, struct odif_buffer, list);
>> +             list_del(&buf->list);
>> +             spin_unlock_irqrestore(&dev->lock, flags);
>> +             vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
>> +             dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
>> +             spin_lock_irqsave(&dev->lock, flags);
>> +     }
>> +
>> +     /* Release all complete detect result, so user space __must__ read
>> +      * the results before stream off*/
>> +     while (!list_empty(&dma_q->complete)) {
>> +             struct v4l2_odif_result *result;
>> +             result = list_entry(dma_q->complete.next, struct v4l2_odif_result, list);
>> +             list_del(&result->list);
>> +             spin_unlock_irqrestore(&dev->lock, flags);
>> +
>> +             free_result(result);
>> +             dprintk(dev, 2, "[frm_seq:%d] result removed\n", result->frm_seq);
>> +             spin_lock_irqsave(&dev->lock, flags);
>> +     }
>> +     atomic_set(&dma_q->complete_cnt, 0);
>> +
>> +     spin_unlock_irqrestore(&dev->lock, flags);
>> +}
>> +static int vidioc_querycap(struct file *file, void  *priv,
>> +                                     struct v4l2_capability *cap)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +
>> +     strcpy(cap->driver, "odif");
>> +     strcpy(cap->card, "odif");
>> +     strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
>> +     cap->capabilities = dev->ops->capability | V4L2_CAP_OBJ_DETECTION;
>
> The reported capabilities are wrong, please see below.
>
>> +
>> +     return 0;
>> +}
>> +
>> +static int vidioc_enum_fmt_vid_out(struct file *file, void  *priv,
>> +                                     struct v4l2_fmtdesc *f)
>> +{
>> +     struct odif_fmt *fmt;
>> +     struct odif_dev *dev = video_drvdata(file);
>> +
>> +     if (f->index >= dev->ops->fmt_cnt) {
>> +             return -EINVAL;
>> +     }
>
> Single statement shouldn't have brackets around it.
>
>> +
>> +     fmt = &dev->ops->table[f->index];
>> +
>> +     strlcpy(f->description, fmt->name, sizeof(f->description));
>> +     f->pixelformat = fmt->fourcc;
>> +     return 0;
>> +}
>> +
>> +static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
>> +                                     struct v4l2_format *f)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +
>> +     f->fmt.pix.width        = dev->s.width;
>> +     f->fmt.pix.height       = dev->s.height;
>> +     f->fmt.pix.field        = dev->s.field;
>> +     f->fmt.pix.pixelformat  = dev->s.fmt->fourcc;
>> +     f->fmt.pix.bytesperline =
>> +             (f->fmt.pix.width * dev->s.fmt->depth) >> 3;
>> +     f->fmt.pix.sizeimage =
>> +             f->fmt.pix.height * f->fmt.pix.bytesperline;
>> +     return 0;
>> +}
>> +
>> +static int vidioc_reqbufs(struct file *file, void *priv,
>> +                       struct v4l2_requestbuffers *p)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +
>> +     if (!input_from_user(dev))
>> +             return -EBUSY;
>> +
>> +     return vb2_reqbufs(&dev->vbq, p);
>> +}
>> +
>> +static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +
>> +     if (!input_from_user(dev))
>> +             return -EBUSY;
>> +
>> +     return vb2_querybuf(&dev->vbq, p);
>> +}
>> +
>> +static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +
>> +     if (!input_from_user(dev))
>> +             return -EBUSY;
>> +
>> +     return vb2_qbuf(&dev->vbq, p);
>> +}
>> +
>> +static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +
>> +     if (!input_from_user(dev))
>> +             return -EBUSY;
>> +
>> +     return vb2_dqbuf(&dev->vbq, p, file->f_flags & O_NONBLOCK);
>> +}
>> +
>> +static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +
>> +     dev->working = 1;
>> +
>> +     if (input_from_user(dev))
>> +             return vb2_streamon(&dev->vbq, i);
>> +
>> +     return odif_start_detect(dev);
>> +}
>> +
>> +static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +
>> +     dev->working = 0;
>> +
>> +     if (input_from_user(dev))
>> +             return vb2_streamoff(&dev->vbq, i);
>> +
>> +     odif_stop_detect(dev);
>> +
>> +     return 0;
>> +}
>> +
>> +static int vidioc_g_od_count(struct file *file, void *priv,
>> +                                     struct v4l2_od_count *f)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     unsigned long flags;
>> +     struct v4l2_odif_result *tmp;
>> +     int ret = -EINVAL;
>> +
>> +     spin_lock_irqsave(&dev->lock, flags);
>> +     list_for_each_entry(tmp, &dev->odif_dq.complete, list)
>> +             if (tmp->frm_seq == f->frm_seq) {
>> +                     f->obj_cnt = tmp->obj_cnt;
>> +                     ret = 0;
>> +                     break;
>> +             }
>> +     spin_unlock_irqrestore(&dev->lock, flags);
>> +
>> +     return ret;
>> +}
>> +
>> +static int vidioc_g_od_result(struct file *file, void *priv,
>> +                                     struct v4l2_od_result *f)
>> +{
>> +     struct odif_dev *dev = video_drvdata(file);
>> +     unsigned long flags;
>> +     struct v4l2_odif_result *tmp;
>> +     struct v4l2_odif_result *fr = NULL;
>> +     unsigned int cnt = 0;
>> +     int ret = -EINVAL;
>> +
>> +     spin_lock_irqsave(&dev->lock, flags);
>> +     list_for_each_entry(tmp, &dev->odif_dq.complete, list)
>> +             if (tmp->frm_seq == f->frm_seq) {
>> +                     fr = tmp;
>> +                     list_del(&tmp->list);
>> +                     break;
>> +             }
>> +     spin_unlock_irqrestore(&dev->lock, flags);
>> +
>> +     if (fr) {
>> +             ret = 0;
>> +             cnt = min(f->obj_cnt, fr->obj_cnt);
>> +             if (cnt)
>> +                     memcpy(f->od, fr->objs,
>> +                             sizeof(struct v4l2_od_object) * cnt);
>> +             f->obj_cnt = cnt;
>> +
>> +             free_result(fr);
>
> Some drivers may be designed so they do not discard the result data
> automatically once it is read by user application. AFAICS this module
> doesn't allow such behaviour.
>
> For instance, it might be required to discard the oldest results data
> when the ring buffer gets filled in.

Looks like no any benefit about keeping the old result data, could you
give some use cases which require the old results need to be kept for
some time?

>
>> +
>> +             atomic_dec(&dev->odif_dq.complete_cnt);
>> +     }
>> +     return ret;
>> +}
>> +
>> +static const struct v4l2_ioctl_ops odif_ioctl_ops = {
>> +     .vidioc_querycap      = vidioc_querycap,
>> +     .vidioc_enum_fmt_vid_out  = vidioc_enum_fmt_vid_out,
>> +     .vidioc_g_fmt_vid_out     = vidioc_g_fmt_vid_out,
>> +     .vidioc_reqbufs       = vidioc_reqbufs,
>> +     .vidioc_querybuf      = vidioc_querybuf,
>> +     .vidioc_qbuf          = vidioc_qbuf,
>> +     .vidioc_dqbuf         = vidioc_dqbuf,
>> +     .vidioc_streamon      = vidioc_streamon,
>> +     .vidioc_streamoff     = vidioc_streamoff,
>> +     .vidioc_g_od_count    = vidioc_g_od_count,
>> +     .vidioc_g_od_result   = vidioc_g_od_result,
>> +};
>> +
>> +static void odif_vdev_release(struct video_device *vdev)
>> +{
>> +     kfree(vdev->lock);
>> +     video_device_release(vdev);
>> +}
>> +
>> +static struct video_device odif_template = {
>> +     .name           = "odif",
>> +     .fops           = &odif_fops,
>> +     .ioctl_ops      = &odif_ioctl_ops,
>> +     .release        = odif_vdev_release,
>> +};
>> +
>> +
>> +/* ------------------------------------------------------------------
>> +     Videobuf operations
>> +   ------------------------------------------------------------------*/
>> +static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
>> +                             unsigned int *nbuffers, unsigned int *nplanes,
>> +                             unsigned int sizes[], void *alloc_ctxs[])
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vq);
>> +     unsigned long size;
>> +
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +
>> +     BUG_ON(!dev->s.fmt);
>> +     size = (dev->s.width * dev->s.height * dev->s.fmt->depth) >> 3;
>> +
>> +     if (0 == *nbuffers)
>> +             *nbuffers = 2;
>> +     *nplanes = 1;
>> +     sizes[0] = size;
>> +
>> +     dprintk(dev, 1, "%s, count=%d, size=%ld\n", __func__,
>> +             *nbuffers, size);
>> +
>> +     return 0;
>> +}
>> +
>> +static int buffer_init(struct vb2_buffer *vb)
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
>> +
>> +     /*
>> +      * This callback is called once per buffer, after its allocation.
>> +      *
>> +      * Vivi does not allow changing format during streaming, but it is
>> +      * possible to do so when streaming is paused (i.e. in streamoff state).
>> +      * Buffers however are not freed when going into streamoff and so
>> +      * buffer size verification has to be done in buffer_prepare, on each
>> +      * qbuf.
>> +      * It would be best to move verification code here to buf_init and
>> +      * s_fmt though.
>> +      */
>> +     dprintk(dev, 1, "%s vaddr=%p\n", __func__,
>> +                     vb2_plane_vaddr(vb, 0));
>> +
>> +     return 0;
>> +}
>
> As I already mentioned this empty callback can be removed. Anyway IMO the
> implementation of the buffer queue operations should be left to individual
> drivers. Having them in generic module doesn't sound flexible enough.

IMO, the buffer queue operations can be shared for use case of
detecting objects from user space images, so it may be kept it in
generic driver.

>
>> +
>> +static int buffer_prepare(struct vb2_buffer *vb)
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
>> +     struct odif_buffer *buf = container_of(vb, struct odif_buffer, vb);
>> +     unsigned long size;
>> +
>> +     dprintk(dev, 1, "%s, field=%d\n", __func__, vb->v4l2_buf.field);
>> +
>> +     BUG_ON(!dev->s.fmt);
>> +     size = (dev->s.width * dev->s.height * dev->s.fmt->depth) >> 3;
>> +     if (vb2_plane_size(vb, 0) < size) {
>> +             dprintk(dev, 1, "%s data will not fit into plane (%lu < %lu)\n",
>> +                             __func__, vb2_plane_size(vb, 0), size);
>> +             return -EINVAL;
>> +     }
>> +
>> +     vb2_set_plane_payload(&buf->vb, 0, size);
>> +
>> +     return 0;
>> +}
>> +
>> +static int buffer_finish(struct vb2_buffer *vb)
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +     return 0;
>> +}
>> +
>> +static void buffer_cleanup(struct vb2_buffer *vb)
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +}
>> +
>> +static void buffer_queue(struct vb2_buffer *vb)
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
>> +     struct odif_buffer *buf = container_of(vb, struct odif_buffer, vb);
>> +     struct odif_dmaqueue *dq = &dev->odif_dq;
>> +     unsigned long flags = 0;
>> +
>> +     dprintk(dev, 1, "%s vaddr:%p\n", __func__,
>> +                     vb2_plane_vaddr(vb, 0));
>> +
>> +     spin_lock_irqsave(&dev->lock, flags);
>> +     list_add_tail(&buf->list, &dq->active);
>> +     spin_unlock_irqrestore(&dev->lock, flags);
>> +
>> +     if (vb->vb2_queue->streaming)
>> +             dev->ops->submit_detect(dev);
>> +}
>> +
>> +static int start_streaming(struct vb2_queue *vq, unsigned int count)
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vq);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +     return odif_start_detect(dev);
>> +}
>> +
>> +/* abort streaming and wait for last buffer */
>> +static int stop_streaming(struct vb2_queue *vq)
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vq);
>> +     dprintk(dev, 1, "%s\n", __func__);
>> +     odif_stop_detect(dev);
>> +     return 0;
>> +}
>> +
>> +static void odif_lock(struct vb2_queue *vq)
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vq);
>> +
>> +     mutex_lock(&dev->mutex);
>> +}
>> +
>> +static void odif_unlock(struct vb2_queue *vq)
>> +{
>> +     struct odif_dev *dev = vb2_get_drv_priv(vq);
>> +     mutex_unlock(&dev->mutex);
>> +}
>> +static struct vb2_ops odif_video_qops = {
>> +     .queue_setup            = queue_setup,
>> +     .buf_init               = buffer_init,
>> +     .buf_prepare            = buffer_prepare,
>> +     .buf_finish             = buffer_finish,
>> +     .buf_cleanup            = buffer_cleanup,
>> +     .buf_queue              = buffer_queue,
>> +     .start_streaming        = start_streaming,
>> +     .stop_streaming         = stop_streaming,
>> +     .wait_prepare           = odif_unlock,
>> +     .wait_finish            = odif_lock,
>
> Not all of these ops will be always needed by every driver. Things would
> be a bit simpler if we had left their implementation to the individual
> drivers.

OK, I will remove some empty ops.

>
>> +};
>> +
>> +/*only store one detection result for one frame*/
>
> Could you do me a favour and in the future add whitespace after each "/*"
> and before  "*/" in the comments ? Sorry, I'm getting nervous every time
> I look at something like /*blah blah*/ ;) I also prefer starting comments with
> big letter. Of course these are just my personal preferences;)

Will fix it in v3, :-)

>
>> +void odif_add_detection(struct odif_dev *dev,
>> +             struct v4l2_odif_result *v4l2_fr)
>> +{
>> +     unsigned long flags;
>> +     struct v4l2_odif_result *old = NULL;
>> +     struct v4l2_odif_result *tmp;
>> +
>> +     spin_lock_irqsave(&dev->lock, flags);
>> +     list_for_each_entry(tmp, &dev->odif_dq.complete, list)
>> +             if (tmp->frm_seq == v4l2_fr->frm_seq) {
>> +                     old = tmp;
>> +                     list_del(&tmp->list);
>> +                     break;
>> +             }
>> +     list_add_tail(&v4l2_fr->list, &dev->odif_dq.complete);
>> +     spin_unlock_irqrestore(&dev->lock, flags);
>> +
>> +     if (old)
>> +             free_result(old);
>> +     else
>> +             atomic_inc(&dev->odif_dq.complete_cnt);
>> +}
>> +EXPORT_SYMBOL_GPL(odif_add_detection);
>> +
>> +struct v4l2_odif_result *odif_allocate_detection(struct odif_dev *dev,
>> +             int cnt)
>> +{
>> +     struct v4l2_odif_result *result = NULL;
>> +     unsigned long flags;
>> +
>> +     if (atomic_read(&dev->odif_dq.complete_cnt) >=
>> +                     result_cnt_threshold) {
>> +             spin_lock_irqsave(&dev->lock, flags);
>> +             result = list_entry(dev->odif_dq.complete.next,
>> +                                     struct v4l2_odif_result, list);
>> +             list_del(&result->list);
>> +             spin_unlock_irqrestore(&dev->lock, flags);
>> +
>> +             atomic_dec(&dev->odif_dq.complete_cnt);
>> +     }
>> +
>> +     if (!result)    goto allocate_result;
>> +
>> +     /* reuse the old one if count is matched */
>> +     if (result->obj_cnt == cnt)
>> +             goto out;
>> +
>> +     /*free the old result*/
>> +     free_result(result);
>> +
>> +allocate_result:
>> +     result = kzalloc(sizeof(*result) +
>> +                     cnt * sizeof(struct v4l2_od_object), GFP_ATOMIC);
>
> I prefer not allocating memory in interrupt context like this, especially
> as this can be avoided without significant effort and resources waste.

Considered that the allocated space is not very large, maybe it can be allocated
in interrupt context. The count of v4l2_od_object to be allocated is variant,
so it is not easy to reserve buffers during driver init to handle variant buffer
allocation.  Also this can be left for future optimization.

>
>> +     if (result)
>> +             result->obj_cnt = cnt;
>> +out:
>> +     return result;
>> +}
>> +EXPORT_SYMBOL_GPL(odif_allocate_detection);
>> +
>> +struct odif_buffer *odif_pick_up_buffer(struct odif_dev *dev)
>> +{
>> +     struct odif_buffer *buf = NULL;
>> +     unsigned long flags;
>> +
>> +     WARN_ON(!input_from_user(dev));
>> +
>> +     spin_lock_irqsave(&dev->lock, flags);
>> +     if (list_empty(&dev->odif_dq.active))
>> +             goto out;
>> +     buf = list_entry(dev->odif_dq.active.next,
>> +                             struct odif_buffer, list);
>> +     list_del(&buf->list);
>> +out:
>> +     spin_unlock_irqrestore(&dev->lock, flags);
>> +
>> +     return buf;
>> +}
>> +EXPORT_SYMBOL_GPL(odif_pick_up_buffer);
>> +
>> +static struct v4l2_subdev_ops odif_subdev_ops = {
>> +};
>> +
>> +static int odif_switch_link(struct odif_dev *dev, int next,
>> +             const struct media_pad *remote)
>> +{
>> +     int ret = 0;
>> +
>> +     if (dev->input == next)
>> +             return ret;
>> +
>> +     if (!(dev->ops->capability & V4L2_CAP_VIDEO_OUTPUT) &&
>> +                     next == OD_INPUT_FROM_USER_SPACE)
>> +             return -EINVAL;
>> +
>> +     if (!(dev->ops->capability & V4L2_CAP_VIDEO_CAPTURE) &&
>> +                     next == OD_INPUT_FROM_MEDIA_HW)
>> +             return -EINVAL;
>> +
>> +     mutex_lock(dev->vfd->lock);
>> +     if (dev->working) {
>> +             ret = -EBUSY;
>> +             goto out;
>> +     }
>> +
>> +     if (dev->ops->set_input)
>> +             dev->ops->set_input(dev, next, remote);
>> +     else
>> +             ret = -EINVAL;
>> +out:
>> +     mutex_unlock(dev->vfd->lock);
>> +     return ret;
>> +}
>> +
>> +static int odif_entity_link_setup(struct media_entity *entity,
>> +                       const struct media_pad *local,
>> +                       const struct media_pad *remote, u32 flags)
>> +{
>> +     struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
>> +     struct odif_dev *dev = v4l2_get_subdevdata(sd);
>> +     u32    type = media_entity_type(remote->entity);
>> +     int ret, next;
>> +
>> +     /*link user space video to object detection*/
>> +     if (remote == &dev->entity[1].pads[0])
>> +             next = OD_INPUT_FROM_USER_SPACE;
>> +     else if (type == MEDIA_ENT_T_V4L2_SUBDEV)
>> +             next = OD_INPUT_FROM_MEDIA_HW;
>> +     else
>> +             next = dev->input;
>> +
>> +     printk("%s:%d next=%d\n", __func__, __LINE__, next);
>> +     ret = odif_switch_link(dev, next, remote);
>> +
>> +     return ret;
>> +}
>> +
>> +struct media_entity_operations odif_entity_ops = {
>> +     .link_setup = odif_entity_link_setup,
>> +};
>> +
>> +static void odif_cleanup_entities(struct odif_dev *odif)
>> +{
>> +
>> +     v4l2_device_unregister_subdev(&odif->entity[0].subdev);
>> +     v4l2_device_unregister_subdev(&odif->entity[1].subdev);
>> +
>> +     media_entity_cleanup(&odif->entity[0].subdev.entity);
>> +     media_entity_cleanup(&odif->entity[1].subdev.entity);
>> +}
>> +
>> +static int odif_init_entities(struct odif_dev *odif)
>> +{
>> +     const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_DYNAMIC;
>> +     int ret;
>> +     struct odif_entity *entity;
>> +     struct media_entity *source;
>> +     struct media_entity *sink;
>> +
>> +     /*entity[0] is the entity for object detection hw*/
>> +     entity = &odif->entity[0];
>> +     entity->num_links = 2;
>> +     entity->num_pads = 1;
>> +     entity->pads[0].flags = MEDIA_PAD_FL_SINK;
>> +
>> +     v4l2_subdev_init(&entity->subdev, &odif_subdev_ops);
>> +     v4l2_set_subdevdata(&entity->subdev, odif);
>> +     strlcpy(entity->subdev.name, "object detect",
>> +                     sizeof(entity->subdev.name));
>> +     ret = media_entity_init(&entity->subdev.entity,
>> +             entity->num_pads, entity->pads, 1);
>> +
>> +     if (ret)
>> +             goto out;
>> +
>> +     /*entity[1] is the video entity which sources the user space video*/
>> +     entity = &odif->entity[1];
>> +     entity->num_links = 1;
>> +     entity->num_pads = 1;
>> +     entity->pads[0].flags = MEDIA_PAD_FL_SOURCE;
>> +
>> +     v4l2_subdev_init(&entity->subdev, &odif_subdev_ops);
>> +     v4l2_set_subdevdata(&entity->subdev, odif);
>> +     strlcpy(entity->subdev.name, "user space video",
>> +                     sizeof(entity->subdev.name));
>> +     ret = media_entity_init(&entity->subdev.entity,
>> +             entity->num_pads, entity->pads, 0);
>> +
>> +     /* create the default link */
>> +     source = &odif->entity[1].subdev.entity;
>> +     sink = &odif->entity[0].subdev.entity;
>> +     sink->ops = &odif_entity_ops;
>> +     ret = media_entity_create_link(source, 0,
>> +                                    sink, 0, flags);
>> +     if (ret)
>> +             goto out;
>> +
>> +     /* init the default hw link*/
>> +     if (odif->ops->set_input)
>> +             ret = odif->ops->set_input(odif,
>> +                     OD_INPUT_FROM_USER_SPACE,
>> +                     &odif->entity[1].pads[0]);
>> +     if (ret)
>> +             goto out;
>> +
>> +     odif->input = OD_INPUT_FROM_USER_SPACE;
>> +
>> +     /* register the two subdevices */
>> +     ret = v4l2_device_register_subdev(&odif->v4l2_dev,
>> +                     &odif->entity[0].subdev);
>> +     if (ret)
>> +             goto out;
>> +
>> +     ret = v4l2_device_register_subdev(&odif->v4l2_dev,
>> +                     &odif->entity[1].subdev);
>
> You're creating v4l2 subdevice but not all drivers would require it.
> Also individual drivers will usually manage connections between the media
> entities on their own. So IMHO this module goes a little to far on fixing
> up driver's architecture.
>
> Also what are there two subdevs needed for ?

The two subsevs are used to describe two media entity, one of which
is the object detection hw, and another is the video entity which sources
the user space video. Without the two entities, I don't know how media
controller connects these into framework.

>
>> +     if (ret)
>> +             goto out;
>> +out:
>> +     return ret;
>> +}
>> +
>> +void odif_release(struct kref *ref)
>> +{
>> +     struct odif_dev *dev = container_of(ref, struct odif_dev, ref);
>> +
>> +     v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
>> +             video_device_node_name(dev->vfd));
>> +
>> +     list_del(&dev->odif_devlist);
>> +
>> +     odif_cleanup_entities(dev);
>> +     video_unregister_device(dev->vfd);
>> +     v4l2_device_unregister(&dev->v4l2_dev);
>> +     kfree(dev);
>> +}
>> +EXPORT_SYMBOL_GPL(odif_release);
>> +
>> +int odif_create_instance(struct device *parent, int priv_size,
>> +             struct odif_ops *ops, struct odif_dev **odif_dev)
>> +{
>> +     struct odif_dev *dev;
>> +     struct video_device *vfd;
>> +     struct vb2_queue *q;
>> +     int ret, len;
>> +     struct mutex    *vmutex;
>> +
>> +     dev = kzalloc(sizeof(*dev) + priv_size, GFP_KERNEL);
>> +     if (!dev) {
>> +             ret = -ENOMEM;
>> +             goto out;
>> +     }
>> +
>> +     kref_init(&dev->ref);
>> +     dev->ops = ops;
>> +     dev->dev = parent;
>> +
>> +     len = snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
>> +                     "%s", "odif");
>> +     dev->v4l2_dev.name[len] = '\0';
>
> There is no need for tricks like that, snprintf() already ensures the string
> has trailing '\0' appended to it.

Will remove the above line in v3.

>
>> +
>> +     ret = v4l2_device_register(dev->dev, &dev->v4l2_dev);
>> +     if (ret)
>> +             goto free_dev;
>> +
>> +     /* initialize locks */
>> +     spin_lock_init(&dev->lock);
>> +
>> +     /* initialize queue */
>> +     q = &dev->vbq;
>> +     memset(q, 0, sizeof(dev->vbq));
>> +     q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
>> +     q->io_modes = VB2_MMAP;
>> +     q->drv_priv = dev;
>> +     q->buf_struct_size = sizeof(struct odif_buffer);
>> +     q->ops = &odif_video_qops;
>> +     q->mem_ops = &vb2_page_memops;
>> +
>> +     vb2_queue_init(q);
>> +
>> +     mutex_init(&dev->mutex);
>> +
>> +     /* init video dma queues */
>> +     atomic_set(&dev->odif_dq.complete_cnt, 0);
>> +     INIT_LIST_HEAD(&dev->odif_dq.active);
>> +     INIT_LIST_HEAD(&dev->odif_dq.complete);
>> +     init_waitqueue_head(&dev->odif_dq.wq);
>> +
>> +     ret = -ENOMEM;
>> +     vfd = video_device_alloc();
>> +     if (!vfd)
>> +             goto unreg_dev;
>> +
>> +     *vfd = odif_template;
>> +     vfd->debug = debug;
>> +     vfd->v4l2_dev = &dev->v4l2_dev;
>> +     set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
>> +
>> +     vmutex = kzalloc(sizeof(struct mutex), GFP_KERNEL);
>> +     if (!vmutex)
>> +             goto err_alloc_mutex;
>> +
>> +     mutex_init(vmutex);
>> +     /*
>> +      * Provide a mutex to v4l2 core. It will be used to protect
>> +      * all fops and v4l2 ioctls.
>> +      */
>> +     vfd->lock = vmutex;
>> +
>> +     ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
>> +     if (ret < 0)
>> +             goto rel_vdev;
>> +
>> +     if (video_nr != -1)
>> +             video_nr++;
>> +
>> +     dev->vfd = vfd;
>> +     video_set_drvdata(vfd, dev);
>> +
>> +     ret = odif_init_entities(dev);
>> +     if (ret)
>> +             goto unreg_video;
>> +
>> +     /* Now that everything is fine, let's add it to device list */
>> +     list_add_tail(&dev->odif_devlist, &odif_devlist);
>> +
>> +     v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
>> +               video_device_node_name(vfd));
>> +
>> +     *odif_dev = dev;
>> +     return 0;
>> +
>> +unreg_video:
>> +     video_unregister_device(vfd);
>> +rel_vdev:
>> +     kfree(vmutex);
>> +err_alloc_mutex:
>> +     video_device_release(vfd);
>> +unreg_dev:
>> +     v4l2_device_unregister(&dev->v4l2_dev);
>> +free_dev:
>> +     kfree(dev);
>> +out:
>> +     pr_err("%s: error, ret=%d", __func__, ret);
>> +     return ret;
>> +}
>> +EXPORT_SYMBOL_GPL(odif_create_instance);
>> +
>> +static int __init odif_init(void)
>> +{
>> +     return 0;
>> +}
>> +
>> +static void __exit odif_exit(void)
>> +{
>> +}
>> +
>> +module_init(odif_init);
>> +module_exit(odif_exit);
>> +
>> +MODULE_DESCRIPTION("object detection interface function module");
>> +MODULE_AUTHOR("Ming Lei");
>> +MODULE_LICENSE("GPL");
>> diff --git a/drivers/media/video/odif/odif.h b/drivers/media/video/odif/odif.h
>> new file mode 100644
>> index 0000000..25c4788
>> --- /dev/null
>> +++ b/drivers/media/video/odif/odif.h
>> @@ -0,0 +1,157 @@
>> +#ifndef _LINUX_MEDIA_ODIF_H
>> +#define _LINUX_MEDIA_ODIF_H
>> +
>> +#include <linux/types.h>
>> +#include <linux/magic.h>
>> +#include <linux/errno.h>
>> +#include <linux/kref.h>
>> +#include <linux/kernel.h>
>> +#include <media/v4l2-common.h>
>> +#include <linux/videodev2.h>
>> +#include <media/media-entity.h>
>> +#include <media/videobuf2-page.h>
>> +#include <media/v4l2-device.h>
>> +#include <media/v4l2-ioctl.h>
>> +#include <media/v4l2-ctrls.h>
>> +#include <media/v4l2-fh.h>
>> +#include <media/v4l2-event.h>
>> +
>> +#define MAX_OBJ_COUNT                40
>> +
>> +#define OBJ_DIR_UP           0
>> +#define OBJ_DIR_RIGHT                1
>> +#define OBJ_DIR_LIFT         2
>> +#define OBJ_DIR_DOWN         3
>> +
>> +
>> +#define      OD_INPUT_FROM_USER_SPACE        1
>> +#define      OD_INPUT_FROM_MEDIA_HW          2
>> +
>> +struct odif_fmt {
>> +     char  *name;
>> +     u32   fourcc;          /* v4l2 format id */
>> +     u32   depth;
>> +     u32   width, height;
>> +};
>> +
>> +struct odif_setting {
>> +     struct odif_fmt            *fmt;
>> +     enum v4l2_field            field;
>> +
>> +     /* minimize object size to be detected, unit: pixel */
>> +     u32                     min_obj_size;
>> +     u32                     obj_dir;
>> +
>> +     u32                     startx, starty;
>> +     u32                     sizex, sizey;
>> +
>> +     int                     lhit;
>> +
>> +     u32                     width, height;
>> +};
>> +
>> +/* buffer for one video frame */
>> +struct odif_buffer {
>> +     /* common v4l buffer stuff -- must be first */
>> +     struct vb2_buffer       vb;
>> +     struct list_head        list;
>> +};
>> +
>> +
>> +struct v4l2_odif_result {
>> +     struct list_head        list;
>> +
>> +     /*frame sequence*/
>> +     u32                     frm_seq;
>> +     u32                     obj_cnt;
>> +     struct v4l2_od_object   objs[0];
>> +};
>> +
>> +struct odif_dmaqueue {
>> +     atomic_t                complete_cnt;
>> +     struct list_head        complete;
>> +     struct list_head        active;
>> +     wait_queue_head_t       wq;
>> +};
>> +
>> +struct odif_entity {
>> +     /* Media controller-related fields. */
>> +     struct v4l2_subdev subdev;
>> +     unsigned int num_pads;
>> +     unsigned int num_links;
>> +
>> +     /*only one sink pad*/
>> +     struct media_pad pads[1];
>> +
>> +     /* TODO: odif controls */
>> +};
>> +
>> +struct odif_dev {
>> +     struct kref             ref;
>> +     struct device           *dev;
>> +
>> +     struct list_head        odif_devlist;
>> +     struct v4l2_device      v4l2_dev;
>> +     struct vb2_queue        vbq;
>> +     struct mutex            mutex;
>> +     spinlock_t              lock;
>> +
>> +     /* media controller entity*/
>> +     struct odif_entity      entity[2];
>> +
>> +     struct video_device     *vfd;
>> +     struct odif_dmaqueue    odif_dq;
>> +     int     working;
>> +
>> +     int     input;
>> +
>> +     /* setting */
>> +     struct odif_setting     s;
>> +
>> +     struct odif_ops *ops;
>> +
>> +     unsigned long   priv[0];
>> +};
>> +
>> +/**
>> + * struct odif_ops - interface between odif and low level hw driver
>> + * @capability:      extra capability except for V4L2_CAP_OBJ_DETECTION
>> + *   V4L2_CAP_STREAMING: mandatory, start/stop detect is triggered
>> + *           by streaming on/off
>> + *   V4L2_CAP_VIDEO_OUTPUT: hw can support to detect objects from
>> + *           user space video input
>> + *   V4L2_CAP_VIDEO_CAPTURE: hw can support to detect objects from
>> + *           internal bus, this doesn't mean capture is capable
>
> No, we can't be reinterpreting V4L2_CAP_VIDEO_CAPTURE flag this way.
> The information how data can be fed to an object detection module should
> be oxposed at a media device level. The media device driver an object
> detection device belongs to should create proper links that application
> may then activate.
>
> Such interpretation of V4L2_CAP_VIDEO_CAPTURE violates the spec and would
> just confuse applications that query device nodes. Nack.

OK, I will remove V4L2_CAP_VIDEO_CAPTURE here and let media device driver
to handle it.

>> + *
>> + * @table:   supported format table
>> + * @fmt_cnt: supported format count
>> + * @start_detect:    start_detect callback
>> + * @stop_detect:     stop_detect callback
>> + * @submit_detect:   submit_detect callback
>> + * @set_input:       change video input source
>> + */
>> +struct odif_ops {
>> +     u32     capability;
>> +     struct odif_fmt *table;
>> +     int fmt_cnt;
>> +     int (*start_detect)(struct odif_dev *odif);
>> +     int (*stop_detect)(struct odif_dev *odif);
>> +     int (*submit_detect)(struct odif_dev *odif);
>> +     int (*set_input)(struct odif_dev *odif, int next,
>> +                     const struct media_pad *remote);
>> +};
>> +
>> +#define dprintk(dev, level, fmt, arg...) \
>> +     v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ## arg)
>> +
>> +
>> +extern int odif_create_instance(struct device *parent, int priv_size,
>> +             struct odif_ops *ops, struct odif_dev **dev);
>> +extern void odif_release(struct kref *ref);
>> +extern void odif_add_detection(struct odif_dev *dev,
>> +             struct v4l2_odif_result *v4l2_fr);
>> +extern struct v4l2_odif_result *odif_allocate_detection(
>> +             struct odif_dev *dev, int cnt);
>> +extern struct odif_buffer *odif_pick_up_buffer(struct odif_dev *dev);
>> +
>> +#endif /* _LINUX_MEDIA_ODIF_H */
>
> I suggest you to merge this module with next patch and remove what's
> unnecessary for OMAP4 FDIF. IMHO creating generic object detection
> module doesn't make sense, given the complexity of hardware. We have

IMO, at least now there are some functions which can be implemented
in generic object detection module to avoid duplicated implementation in
object detection hw driver: interfaces with user space, handling object
detection from user space images.  We can let object detect hw driver to
handle the complexity of hardware by defining appropriate callbacks.

> already the required building blocks in v4l2 core, what we need is
> specification of Face Detection interface semantics, which should be
> eventually added to the V4L DocBook.
> The object detection ioctls need to be adjusted to support camera sensors
> with face detection capability, I'll comment on patch 6/8 about this.

Expect your comments on it, :-)

thanks,
--
Ming Lei

^ permalink raw reply	[flat|nested] 24+ messages in thread

* RE: [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops
  2011-12-23 12:20             ` Ming Lei
@ 2012-01-10 10:20               ` Marek Szyprowski
  2012-01-10 11:55                 ` Ming Lei
  0 siblings, 1 reply; 24+ messages in thread
From: Marek Szyprowski @ 2012-01-10 10:20 UTC (permalink / raw)
  To: 'Ming Lei'
  Cc: 'Mauro Carvalho Chehab', 'Tony Lindgren',
	'Sylwester Nawrocki', 'Alan Cox',
	linux-arm-kernel, linux-kernel, linux-media,
	'Pawel Osciak'

Hello,

On Friday, December 23, 2011 1:21 PM Ming Lei wrote:

> >> >> > Your current implementation also abuses the design and api of videobuf2 memory
> >> >> > allocators. If the allocator needs to return a custom structure to the driver
> >> >>
> >> >> I think returning vaddr is enough.
> >> >>
> >> >> > you should use cookie method. vaddr is intended to provide only a pointer to
> >> >> > kernel virtual mapping, but you pass a struct page * there.
> >> >>
> >> >> No, __get_free_pages returns virtual address instead of 'struct page *'.
> >> >
> >> > Then you MUST use cookie for it. vaddr method should return kernel virtual address
> >> > to the buffer video data. Some parts of videobuf2 relies on this - it is used by file
> >> > io emulator (read(), write() calls) and mmap equivalent for non-mmu systems.
> >> >
> >> > Manual casting in the driver is also a bad idea, that's why there are helper functions
> >>
> >> I don't see any casts are needed. The dma address can be got from vaddr with
> >> dma_map_* easily in drivers, see the usage on patch 8/8(media: video: introduce
> >> omap4 face detection module driver).
> >
> > Sorry, but I won't accept such driver/allocator which abuses the defined API. I've already
> > pointed what vaddr method is used for.
> 
> Sorry, could you describe the abuse problem in a bit detail?

Videobuf2 requires memory module handlers to provide vaddr method to provide a pointer in 
kernel virtual address space to video data (buffer content). It is used for example by 
read()/write() io method emulator. Memory allocator/handler should not be specific to any
particular use case in the device driver. That's the design. Simple.

I your case you want to give pointer to struct page from the memory allocator to the 
driver. The cookie method has been introduced exactly for this purpose. Memory allocator
also provides a simple inline function to convert generic 'void *' return type from cookie
method to allocator specific structure/pointer. vb2_dma_contig_plane_dma_addr() and 
vb2_dma_sg_plane_desc() were examples how does passing allocator specific type though the
cookie method works.

Best regards
-- 
Marek Szyprowski
Samsung Poland R&D Center




^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops
  2012-01-10 10:20               ` Marek Szyprowski
@ 2012-01-10 11:55                 ` Ming Lei
  0 siblings, 0 replies; 24+ messages in thread
From: Ming Lei @ 2012-01-10 11:55 UTC (permalink / raw)
  To: Marek Szyprowski
  Cc: Mauro Carvalho Chehab, Tony Lindgren, Sylwester Nawrocki,
	Alan Cox, linux-arm-kernel, linux-kernel, linux-media,
	Pawel Osciak

Hi,

On Tue, Jan 10, 2012 at 6:20 PM, Marek Szyprowski
<m.szyprowski@samsung.com> wrote:

>> Sorry, could you describe the abuse problem in a bit detail?
>
> Videobuf2 requires memory module handlers to provide vaddr method to provide a pointer in
> kernel virtual address space to video data (buffer content). It is used for example by

Yes, this is what the patch is doing, __get_free_pages just  returns
the kernel virtual
address which will be passed to driver.

> read()/write() io method emulator. Memory allocator/handler should not be specific to any
> particular use case in the device driver. That's the design. Simple.

Most of the patch is copied from videobuf-vmalloc.c, and the
interfaces are totally same
with videobuf-vmalloc.c.

>
> I your case you want to give pointer to struct page from the memory allocator to the

In my case, the pointer to struct page is not required to the driver
at all, so I think you
have misunderstood the patch, don't I?

> driver. The cookie method has been introduced exactly for this purpose. Memory allocator
> also provides a simple inline function to convert generic 'void *' return type from cookie
> method to allocator specific structure/pointer. vb2_dma_contig_plane_dma_addr() and
> vb2_dma_sg_plane_desc() were examples how does passing allocator specific type though the
> cookie method works.

thanks,
--
Ming Lei

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 6/8] media: v4l2: introduce two IOCTLs for object detection
  2011-12-14 14:00 ` [RFC PATCH v2 6/8] media: v4l2: introduce two IOCTLs for object detection Ming Lei
@ 2012-01-13 21:16   ` Sylwester Nawrocki
  0 siblings, 0 replies; 24+ messages in thread
From: Sylwester Nawrocki @ 2012-01-13 21:16 UTC (permalink / raw)
  To: Ming Lei
  Cc: Mauro Carvalho Chehab, Tony Lindgren, Alan Cox, linux-omap,
	linux-arm-kernel, linux-kernel, linux-media, riverful.kim

Hi Ming,

sorry for the late response. It's all looking better now, however there
is still a few things that could be improved.

On 12/14/2011 03:00 PM, Ming Lei wrote:
> This patch introduces two new IOCTLs and related data
> structure which will be used by the coming video device
> with object detect capability.
>
> The two IOCTLs and related data structure will be used by
> user space application to retrieve the results of object
> detection.
>
> The utility fdif[1] is useing the two IOCTLs to find
> objects(faces) deteced in raw images or video streams.
>
> [1],http://kernel.ubuntu.com/git?p=ming/fdif.git;a=shortlog;h=refs/heads/v4l2-fdif
>
> Signed-off-by: Ming Lei<ming.lei@canonical.com>
> ---
> v2:
> 	- extend face detection API to object detection API
> 	- introduce capability of V4L2_CAP_OBJ_DETECTION for object detection
> 	- 32/64 safe array parameter
> ---
>   drivers/media/video/v4l2-ioctl.c |   41 ++++++++++++-
>   include/linux/videodev2.h        |  124 ++++++++++++++++++++++++++++++++++++++
>   include/media/v4l2-ioctl.h       |    6 ++
>   3 files changed, 170 insertions(+), 1 deletions(-)
>
> diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
> index ded8b72..575d445 100644
> --- a/drivers/media/video/v4l2-ioctl.c
> +++ b/drivers/media/video/v4l2-ioctl.c
> @@ -2140,6 +2140,30 @@ static long __video_do_ioctl(struct file *file,
>   		dbgarg(cmd, "index=%d", b->index);
>   		break;
>   	}
> +	case VIDIOC_G_OD_RESULT:
> +	{
> +		struct v4l2_od_result *or = arg;
> +
> +		if (!ops->vidioc_g_od_result)
> +			break;
> +
> +		ret = ops->vidioc_g_od_result(file, fh, or);
> +
> +		dbgarg(cmd, "index=%d", or->frm_seq);
> +		break;
> +	}

> +	case VIDIOC_G_OD_COUNT:
> +	{
> +		struct v4l2_od_count *oc = arg;
> +
> +		if (!ops->vidioc_g_od_count)
> +			break;
> +
> +		ret = ops->vidioc_g_od_count(file, fh, oc);
> +
> +		dbgarg(cmd, "index=%d", oc->frm_seq);
> +		break;
> +	}

I'm uncertain if we need this ioctl at all. Now struct v4l2_od_result is:

struct v4l2_od_result {
	__u32			frame_sequence;
	__u32			object_count;
	__u32			reserved[6];
	struct v4l2_od_object	objects[0];
};

and

struct v4l2_od_object {
	__u16			type;
	__u16			confidence;
	union {
		struct v4l2_od_face_desc  face;
		struct v4l2_od_eye_desc   eye;
		struct v4l2_od_mouth_desc mouth;
		__u8	rawdata[60];
	} o;
};

If we had added a 'size' field to struct v4l2_od_result, i.e.

struct v4l2_od_result {
	__u32			size;
	__u32			frame_sequence;
	__u32			objects_count;
	__u32			reserved[5];
	struct v4l2_od_object	objects[0];
};

the application could have allocated memory for the objects array and
have the 'size' member set to the size of that allocation. Then it
would have called VIDIOC_G_OD_RESULT and the driver would have filled
the 'objects'  array, if it was big enough for the requested result
data. The driver would also update the 'objects_count'. If the size 
would be too small to fit the result data, i.e.

size < number_of_detected_objects * sizeof(struct v4l2_od_object)

the driver could return -ENOSPC error while also setting 'size' to 
the required value. Something similar is done with 
VIDIOC_G_EXT_CTRLS ioctl [3].

There is one more OD API requirement, for camera sensors with embedded
SoC ISP that support face detection, i.e. VIDIOC_G_OD_RESULT should 
allow to retrieve face detection result for the very last image frame,
i.e. current frame.

One solution to support this could be adding a 'flags' field, i.e.

struct v4l2_od_result {
	__u32			size;
	__u32			flags;
	__u32			frame_sequence;
	__u32			objects_count;
	__u16			group_index;
	__u16			group_count;
	__u16			reserved[7];
	struct v4l2_od_object	objects[0];
};

and additionally group_index to specify which face object the user is
interested in. I'm not saying we have to implement this now but it's
good to consider beforehand. The group_count would be used to return 
the number of detected faces. What do you think ?

/* flags */
#define V4L2_OD_FL_SEL_FRAME_SEQ	(0 << 0)
#define V4L2_OD_FL_SEL_FRAME_LAST	(1 << 0)
#define V4L2_OD_FL_SEL_GROUP		(1 << 1)

Or maybe we should just use "face_" instead of "group_" ?

>   	default:
>   		if (!ops->vidioc_default)
>   			break;
> @@ -2241,7 +2265,22 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
>
>   static int is_64_32_array_args(unsigned int cmd, void *parg, int *extra_len)
>   {
> -	return 0;
> +	int ret = 0;
> +
> +	switch (cmd) {
> +	case VIDIOC_G_OD_RESULT: {
> +		struct v4l2_od_result *or = parg;
> +
> +		*extra_len = or->obj_cnt *
> +			sizeof(struct v4l2_od_object);
> +		ret = 1;
> +		break;
> +	}
> +	default:
> +		break;
> +	}
> +
> +	return ret;
>   }
>
>   long
> diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
> index 4b752d5..c08ceaf 100644
> --- a/include/linux/videodev2.h
> +++ b/include/linux/videodev2.h
> @@ -270,6 +270,9 @@ struct v4l2_capability {
>   #define V4L2_CAP_RADIO			0x00040000  /* is a radio device */
>   #define V4L2_CAP_MODULATOR		0x00080000  /* has a modulator */
>
> +/* The device has capability of object detection */
> +#define V4L2_CAP_OBJ_DETECTION		0x00100000
> +
>   #define V4L2_CAP_READWRITE              0x01000000  /* read/write systemcalls */
>   #define V4L2_CAP_ASYNCIO                0x02000000  /* async I/O */
>   #define V4L2_CAP_STREAMING              0x04000000  /* streaming I/O ioctls */
> @@ -2160,6 +2163,125 @@ struct v4l2_create_buffers {
>   	__u32			reserved[8];
>   };
>
> +/**
> + * struct v4l2_od_obj_desc
> + * @centerx:	return, position in x direction of detected object

How about following convention:
    * @centerx:	[out] position of an object in horizontal direction

?
> + * @centery:	return, position in y direction of detected object
> + * @sizex:	return, size in x direction of detected object

    * @sizex:	[out] size of an object in horizontal direction

> + * @sizey:	return, size in y direction of detected object
> + * @angle:	return, angle of detected object
> + * 		0 deg ~ 359 deg, vertical is 0 deg, clockwise

So this is angle in Z axis as on figure [1], Roll [2], right ?
First let's make this number in 0.01 deg units, then our range would
be 0 ... 35999. Then we need a proper description, it's all going to
be described in the Docbook so we don't need to be that verbose at 
the comments.

 * @angle: [out] angle of object rotation in Z axis (depth) in 0.01 deg units

Of course any better definitions are welcome :)

> + * @reserved:	future extensions
> + */
> +struct v4l2_od_obj_desc {
> +	__u16		centerx;
> +	__u16		centery;
> +	__u16		sizex;
> +	__u16		sizey;
> +	__u16		angle;
> +	__u16		reserved[5];

I would prefer to avoid repeating myself again - for all pixel position
and size in v4l2 we normally use __u32, so let's follow this. Sending
same  patch over and over isn't helpful, even if by some miracle you've
had convinced me, there will be other people that won't accept that :-)

And let's not be afraid of adding new data types to v4l2, which likely
are anyway going to be needed in the future. How about:

struct v4l2_pix_position {
	__s32	x;
	__s32	y;
};

struct v4l2_pix_size {
	__u32	width;
	__u32	height;
};

Alternatively we might reuse the v4l2_frmsize_discrete structure,
however new structure has my preference.

struct v4l2_od_obj_desc {
	struct v4l2_pix_position	center;
	struct v4l2_pix_size		size;
	__u16				angle;
	__u16				reserved[5];
};

OR

struct v4l2_od_obj_desc {
	struct v4l2_pix_position	center;
	struct v4l2_frmsize_discrete	size;
	__u16				angle;
	__u16				reserved[3];
};

sizeof(struct v4l2_od_obj_desc) = 6 * 4

> +};
> +
> +/**
> + * struct v4l2_od_face_desc
> + * @id:		return, used to be associated with detected eyes, mouth,
> + * 		and other objects inside this face, and each face in one
> + * 		frame has a unique id, start from 1
> + * @smile_level:return, smile level of the face

For the smile_level it shouldn't hurt if we assume standard value range
of 0...99, but this would go to the DocBook, your comment above is fine,
except s/:return/: [out].

> + * @f:		return, face description
> + */
> +struct v4l2_od_face_desc {
> +	__u16	id;

Actually I was going to propose something like this 'id' member:) I think
we'll also need a method for user space to retrieve FD result by such sort
of a key, in addition to or instead of the frame_sequence key.

However, to be more generic, perhaps we could move it to the v4l2_od_obj_desc
structure ? And rename it to group_index, which would mean a positive non-zero
sequence number assigned to a group of objects, e.g. it would be one value
per face, eyes and mouth data set ? What do you think ?

> +	__u8	smile_level;
> +	__u8    reserved[15];
> +
> +	struct v4l2_od_obj_desc	f;
> +};

It might be good idea to align the data structures' size to at least
4 bytes. I would have changed your proposed structure to:

struct v4l2_od_face_desc {
	__u16	id;
	__u16	smile_level;
	__u16   reserved[10];
	struct v4l2_od_obj_desc	face;
};

sizeof(struct v4l2_od_face_desc) = 12 * 4

> +
> +/**
> + * struct v4l2_od_eye_desc
> + * @face_id:	return, used to associate with which face, 0 means
> + * 		no face associated with the eye
> + * @blink_level:return, blink level of the eye
> + * @e:		return, eye description
> + */
> +struct v4l2_od_eye_desc {
> +	__u16	face_id;
> +	__u8	blink_level;
> +	__u8    reserved[15];
> +
> +	struct v4l2_od_obj_desc	e;
> +};

How about:

struct v4l2_od_eye_desc {
	__u16	face_id;
	__u16	blink_level;
	__u16   reserved[10];
	struct v4l2_od_obj_desc	eye;
};
sizeof(struct v4l2_od_eye_desc) = 12 * 4

?
> +/**
> + * struct v4l2_od_mouth_desc
> + * @face_id:	return, used to associate with which face, 0 means
> + * 		no face associated with the mouth
> + * @m:		return, mouth description
> + */
> +struct v4l2_od_mouth_desc {
> +	__u16	face_id;
> +	__u8    reserved[16];
> +
> +	struct v4l2_od_obj_desc	m;
> +};
and

struct v4l2_od_mouth_desc {
	__u16	face_id;
	__u16   reserved[11];
	struct v4l2_od_obj_desc	mouth;
};
sizeof(struct v4l2_od_mouth_desc) = 12 * 4

> +
> +enum v4l2_od_type {
> +	V4L2_OD_TYPE_FACE		= 1,
> +	V4L2_OD_TYPE_LEFT_EYE		= 2,
> +	V4L2_OD_TYPE_RIGHT_EYE		= 3,
> +	V4L2_OD_TYPE_MOUTH		= 4,
> +	V4L2_OD_TYPE_USER_DEFINED	= 255,

Let's not add any "user defined" types, at the time anything more
is needed it should be added here explicitly.

> +	V4L2_OD_TYPE_MAX_CNT		= 256,

	V4L2_OD_TYPE_MAX		= 256, ?

But what do you think it is needed for ?

> +};
> +
> +/**
> + * struct v4l2_od_object
> + * @type:	return, type of detected object

How about

+ * @type:	[out] object type (from enum v4l2_od_type)

?
> + * @confidence:	return, confidence level of detection result
> + * 		0: the heighest level, 100: the lowest level

    * @confidence: [out] confidence level of the detection result
?
Let's leave the range specification for the DocBook.

> + * @face:	return, detected face object description
> + * @eye:	return, detected eye object description
> + * @mouth:	return, detected mouth object description
> + * @rawdata:	return, user defined data

No user defined data please. How the applications are supposed to know what
rawdata means ? If any new structure is needed it should be added to the union.
Let's treat 'rawdata' as a place holder only. This is how the "__u8 data[64];" 
array is specified for struct v4l2_event:

"__u8	data[64]	Event data. Defined by the event type. The union
                        should be used to define easily accessible type
                        for events."

> + */
> +struct v4l2_od_object {
> +	enum v4l2_od_type	type;

	__u16			type;

to avoid having enumeration in the user space interface ?

> +	__u16			confidence;

	__u32	reserved[7];

> +	union {
> +		struct v4l2_od_face_desc face;

> +		struct v4l2_od_face_desc eye;
> +		struct v4l2_od_face_desc mouth;

I guess you meant
		struct v4l2_od_eye_desc   eye;
		struct v4l2_od_mouth_desc mouth;
?
> +		__u8	rawdata[60];
> +	} o;

won't probably hurt here and would allow future extensions.

> +};

I think being able to fit struct v4l2_od_object in the "u" union of
struct v4l2_event is a must have, as events seem crucial for the
whole object detection interface. For instance user application could
set thresholds for some parameters to get notified with an event when
any gets out of configured bounds. The event interface could be also
used for retrieving OD result instead of polling with VIDIOC_G_OD_RESULT.
Currently struct v4l2_event is:

struct v4l2_event {
	__u32				type;
	union {
		struct v4l2_event_vsync		vsync;
		struct v4l2_event_ctrl		ctrl;
		struct v4l2_event_frame_sync	frame_sync;
		__u8				data[64];
	} u;
	__u32				pending;
	__u32				sequence;
	struct timespec			timestamp;
	__u32				id;
	__u32				reserved[8];
};

Hence we have only 64 bytes for struct v4l2_event_od. It seems that
you kept that when designing the above data structures ?

With my corrections above  sizeof(struct v4l2_od_object) (without the
reserved field would be 13 * 4, which isn't bad. I'm just a bit 
concerned about the structures alignment.

> +/**
> + * struct v4l2_od_result - VIDIOC_G_OD_RESULT argument
> + * @frm_seq:	entry, frame sequence No.

    * @frame_sequence: [in] frame sequence number

> + * @obj_cnt:	return, how many objects detected in frame @frame_seq
    * @object_count: [out] number of object detected for @frame_sequence

> + * @reserved:	reserved for future use
> + * @od:		return, result of detected objects in frame @frame_seq

    * @od: [out] objects detected for @frame_sequence ?

> + */
> +struct v4l2_od_result {
> +	__u32			frm_seq;
> +	__u32			obj_cnt;
> +	__u32			reserved[6];
> +	struct v4l2_od_object	od[0];

Let's make this:

  	struct v4l2_od_object	objects[0];

> +};
> +
> +/**
> + * struct v4l2_od_count - VIDIOC_G_OD_COUNT argument
> + * @frm_seq:	entry, frame sequence No. for ojbect detection
> + * @obj_cnt:	return, how many objects detected from the @frm_seq
> + * @reserved:	reserved for future useage.
> + */
> +struct v4l2_od_count {
> +	__u32	frm_seq;
> +	__u32	obj_cnt;
> +	__u32	reserved[6];
> +};

This structure can go away if we change the VIDIOC_G_OD_RESULT
semantics as I described above..

> +
>   /*
>    *	I O C T L   C O D E S   F O R   V I D E O   D E V I C E S
>    *
> @@ -2254,6 +2376,8 @@ struct v4l2_create_buffers {
>      versions */
>   #define VIDIOC_CREATE_BUFS	_IOWR('V', 92, struct v4l2_create_buffers)
>   #define VIDIOC_PREPARE_BUF	_IOWR('V', 93, struct v4l2_buffer)
> +#define VIDIOC_G_OD_COUNT	_IOWR('V', 94, struct v4l2_od_count)
> +#define VIDIOC_G_OD_RESULT	_IOWR('V', 95, struct v4l2_od_result)
>
>   /* Reminder: when adding new ioctls please add support for them to
>      drivers/media/video/v4l2-compat-ioctl32.c as well! */
> diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
> index 4d1c74a..81a32a3 100644
> --- a/include/media/v4l2-ioctl.h
> +++ b/include/media/v4l2-ioctl.h
> @@ -270,6 +270,12 @@ struct v4l2_ioctl_ops {
>   	int (*vidioc_unsubscribe_event)(struct v4l2_fh *fh,
>   					struct v4l2_event_subscription *sub);
>
> +	/* object detect IOCTLs */
> +	int (*vidioc_g_od_count) (struct file *file, void *fh,
> +					struct v4l2_od_count *arg);

..and this ioctl too.

> +	int (*vidioc_g_od_result) (struct file *file, void *fh,
> +					struct v4l2_od_result *arg);
> +
>   	/* For other private ioctls */
>   	long (*vidioc_default)	       (struct file *file, void *fh,
>   					bool valid_prio, int cmd, void *arg);

[1] http://i.stack.imgur.com/qb6hU.png
[2] http://www.dtic.mil/cgi-bin/GetTRDoc?AD=ADA434817
[3] http://linuxtv.org/downloads/v4l-dvb-apis/vidioc-g-ext-ctrls.html

--
Thanks,
Sylwester

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [RFC PATCH v2 7/8] media: video: introduce object detection driver module
  2012-01-04  8:13     ` Ming Lei
@ 2012-01-17 21:05       ` Sylwester Nawrocki
  0 siblings, 0 replies; 24+ messages in thread
From: Sylwester Nawrocki @ 2012-01-17 21:05 UTC (permalink / raw)
  To: Ming Lei
  Cc: Mauro Carvalho Chehab, Tony Lindgren, Alan Cox, linux-omap,
	linux-arm-kernel, linux-kernel, linux-media

On 01/04/2012 09:13 AM, Ming Lei wrote:
>>> +
>>> +int odif_open(struct file *file)
>>> +{
>>> +     struct odif_dev *dev = video_drvdata(file);
>>> +
>>> +     kref_get(&dev->ref);
>>> +     return v4l2_fh_open(file);
>>> +}
>>
>> Individual drivers may need to perform some initialization when
>> device node is opened. So I consider taking this possibility away
>> from them not a good idea.
> 
> OK, it can be handled easily by introducing new callbacks(such as.
> .open_detect and .close_detect for close) in struct odif_ops.

Certainly, we can introduce plenty of new callbacks, another levels of 
indirection that would just obfuscate things. It should rather be done 
like v4l2-mem2mem does, where file operation handlers are implemented 
by each driver and from their open()/close() handlers relevant init
and release functions are called (v4l2_m2m_init/v4l2_m2m_release).

I think we can always change the kernel interfaces once there is more 
devices like OMAP4 FDIF. It may look easy for you when you refer to 
just one hardware implementation with your theoretically generic module. 
Or have you referred to other devices than OMAP4 FDIF when designing it ?

>>> +static int vidioc_g_od_result(struct file *file, void *priv,
>>> +                                     struct v4l2_od_result *f)
>>> +{
>>> +     struct odif_dev *dev = video_drvdata(file);
>>> +     unsigned long flags;
>>> +     struct v4l2_odif_result *tmp;
>>> +     struct v4l2_odif_result *fr = NULL;
>>> +     unsigned int cnt = 0;
>>> +     int ret = -EINVAL;
>>> +
>>> +     spin_lock_irqsave(&dev->lock, flags);
>>> +     list_for_each_entry(tmp,&dev->odif_dq.complete, list)
>>> +             if (tmp->frm_seq == f->frm_seq) {
>>> +                     fr = tmp;
>>> +                     list_del(&tmp->list);
>>> +                     break;
>>> +             }
>>> +     spin_unlock_irqrestore(&dev->lock, flags);
>>> +
>>> +     if (fr) {
>>> +             ret = 0;
>>> +             cnt = min(f->obj_cnt, fr->obj_cnt);
>>> +             if (cnt)
>>> +                     memcpy(f->od, fr->objs,
>>> +                             sizeof(struct v4l2_od_object) * cnt);
>>> +             f->obj_cnt = cnt;
>>> +
>>> +             free_result(fr);
>>
>> Some drivers may be designed so they do not discard the result data
>> automatically once it is read by user application. AFAICS this module
>> doesn't allow such behaviour.
>>
>> For instance, it might be required to discard the oldest results data
>> when the ring buffer gets filled in.
> 
> Looks like no any benefit about keeping the old result data, could you
> give some use cases which require the old results need to be kept for
> some time?

Consider scenario where one thread writes image data and multiple threads
read the FD results. Having all data discarded on first read only one 
thread could get the data. This is not what we want in a standard API.

Also in V4L2 it is always assumed that multiple applications can read
state of a device, and the application priority API [1] aims at
managing the devices state modifications among multiple processes.
 ...
>>> +static int buffer_init(struct vb2_buffer *vb)
>>> +{
>>> +     struct odif_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
>>> +
>>> +     /*
>>> +      * This callback is called once per buffer, after its allocation.
>>> +      *
>>> +      * Vivi does not allow changing format during streaming, but it is
>>> +      * possible to do so when streaming is paused (i.e. in streamoff state).
>>> +      * Buffers however are not freed when going into streamoff and so
>>> +      * buffer size verification has to be done in buffer_prepare, on each
>>> +      * qbuf.
>>> +      * It would be best to move verification code here to buf_init and
>>> +      * s_fmt though.
>>> +      */
>>> +     dprintk(dev, 1, "%s vaddr=%p\n", __func__,
>>> +                     vb2_plane_vaddr(vb, 0));
>>> +
>>> +     return 0;
>>> +}
>>
>> As I already mentioned this empty callback can be removed. Anyway IMO the
>> implementation of the buffer queue operations should be left to individual
>> drivers. Having them in generic module doesn't sound flexible enough.
> 
> IMO, the buffer queue operations can be shared for use case of
> detecting objects from user space images, so it may be kept it in
> generic driver.

Not necessarily, sometimes device specific logic needs to be implemented
in these callbacks. And the ioctl handlers (vidioc_*) should be just 
pass-through for the vb2 callbacks.

>>
>>> +void odif_add_detection(struct odif_dev *dev,
>>> +             struct v4l2_odif_result *v4l2_fr)
>>> +{
>>> +     unsigned long flags;
>>> +     struct v4l2_odif_result *old = NULL;
>>> +     struct v4l2_odif_result *tmp;
>>> +
>>> +     spin_lock_irqsave(&dev->lock, flags);
>>> +     list_for_each_entry(tmp,&dev->odif_dq.complete, list)
>>> +             if (tmp->frm_seq == v4l2_fr->frm_seq) {
>>> +                     old = tmp;
>>> +                     list_del(&tmp->list);
>>> +                     break;
>>> +             }
>>> +     list_add_tail(&v4l2_fr->list,&dev->odif_dq.complete);
>>> +     spin_unlock_irqrestore(&dev->lock, flags);
>>> +
>>> +     if (old)
>>> +             free_result(old);
>>> +     else
>>> +             atomic_inc(&dev->odif_dq.complete_cnt);
>>> +}
>>> +EXPORT_SYMBOL_GPL(odif_add_detection);
>>> +
>>> +struct v4l2_odif_result *odif_allocate_detection(struct odif_dev *dev,
>>> +             int cnt)
>>> +{
>>> +     struct v4l2_odif_result *result = NULL;
>>> +     unsigned long flags;
>>> +
>>> +     if (atomic_read(&dev->odif_dq.complete_cnt)>=
>>> +                     result_cnt_threshold) {
>>> +             spin_lock_irqsave(&dev->lock, flags);
>>> +             result = list_entry(dev->odif_dq.complete.next,
>>> +                                     struct v4l2_odif_result, list);
>>> +             list_del(&result->list);
>>> +             spin_unlock_irqrestore(&dev->lock, flags);
>>> +
>>> +             atomic_dec(&dev->odif_dq.complete_cnt);
>>> +     }
>>> +
>>> +     if (!result)    goto allocate_result;
>>> +
>>> +     /* reuse the old one if count is matched */
>>> +     if (result->obj_cnt == cnt)
>>> +             goto out;
>>> +
>>> +     /*free the old result*/
>>> +     free_result(result);
>>> +
>>> +allocate_result:
>>> +     result = kzalloc(sizeof(*result) +
>>> +                     cnt * sizeof(struct v4l2_od_object), GFP_ATOMIC);
>>
>> I prefer not allocating memory in interrupt context like this, especially
>> as this can be avoided without significant effort and resources waste.
> 
> Considered that the allocated space is not very large, maybe it can be allocated
> in interrupt context. The count of v4l2_od_object to be allocated is variant,
> so it is not easy to reserve buffers during driver init to handle variant buffer
> allocation.  Also this can be left for future optimization.

Right. This is a policy decision that this module makes for all future drivers
that would use it. Some hardware also allows to limit number of detected faces,
making it easy to allocate in advance the result data buffers.

...
>>> +static int odif_init_entities(struct odif_dev *odif)
>>> +{
>>> +     const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_DYNAMIC;
>>> +     int ret;
>>> +     struct odif_entity *entity;
>>> +     struct media_entity *source;
>>> +     struct media_entity *sink;
>>> +
>>> +     /*entity[0] is the entity for object detection hw*/
>>> +     entity =&odif->entity[0];
>>> +     entity->num_links = 2;
>>> +     entity->num_pads = 1;
>>> +     entity->pads[0].flags = MEDIA_PAD_FL_SINK;
>>> +
>>> +     v4l2_subdev_init(&entity->subdev,&odif_subdev_ops);
>>> +     v4l2_set_subdevdata(&entity->subdev, odif);
>>> +     strlcpy(entity->subdev.name, "object detect",
>>> +                     sizeof(entity->subdev.name));
>>> +     ret = media_entity_init(&entity->subdev.entity,
>>> +             entity->num_pads, entity->pads, 1);
>>> +
>>> +     if (ret)
>>> +             goto out;
>>> +
>>> +     /*entity[1] is the video entity which sources the user space video*/
>>> +     entity =&odif->entity[1];
>>> +     entity->num_links = 1;
>>> +     entity->num_pads = 1;
>>> +     entity->pads[0].flags = MEDIA_PAD_FL_SOURCE;
>>> +
>>> +     v4l2_subdev_init(&entity->subdev,&odif_subdev_ops);
>>> +     v4l2_set_subdevdata(&entity->subdev, odif);
>>> +     strlcpy(entity->subdev.name, "user space video",
>>> +                     sizeof(entity->subdev.name));
>>> +     ret = media_entity_init(&entity->subdev.entity,
>>> +             entity->num_pads, entity->pads, 0);
>>> +
>>> +     /* create the default link */
>>> +     source =&odif->entity[1].subdev.entity;
>>> +     sink =&odif->entity[0].subdev.entity;
>>> +     sink->ops =&odif_entity_ops;
>>> +     ret = media_entity_create_link(source, 0,
>>> +                                    sink, 0, flags);
>>> +     if (ret)
>>> +             goto out;
>>> +
>>> +     /* init the default hw link*/
>>> +     if (odif->ops->set_input)
>>> +             ret = odif->ops->set_input(odif,
>>> +                     OD_INPUT_FROM_USER_SPACE,
>>> +&odif->entity[1].pads[0]);
>>> +     if (ret)
>>> +             goto out;
>>> +
>>> +     odif->input = OD_INPUT_FROM_USER_SPACE;
>>> +
>>> +     /* register the two subdevices */
>>> +     ret = v4l2_device_register_subdev(&odif->v4l2_dev,
>>> +&odif->entity[0].subdev);
>>> +     if (ret)
>>> +             goto out;
>>> +
>>> +     ret = v4l2_device_register_subdev(&odif->v4l2_dev,
>>> +&odif->entity[1].subdev);
>>
>> You're creating v4l2 subdevice but not all drivers would require it.
>> Also individual drivers will usually manage connections between the media
>> entities on their own. So IMHO this module goes a little to far on fixing
>> up driver's architecture.
>>
>> Also what are there two subdevs needed for ?
> 
> The two subsevs are used to describe two media entity, one of which
> is the object detection hw, and another is the video entity which sources
> the user space video. Without the two entities, I don't know how media
> controller connects these into framework.

The video device also represents a media entity. You would just have to
embed a media pad in the video device driver, so the first subdev can be 
linked to it. However the truth is that some devices might need a subdev 
instance to model FD engine, while for others it would be completely 
irrelevant. 

...
>> I suggest you to merge this module with next patch and remove what's
>> unnecessary for OMAP4 FDIF. IMHO creating generic object detection
>> module doesn't make sense, given the complexity of hardware. We have
> 
> IMO, at least now there are some functions which can be implemented
> in generic object detection module to avoid duplicated implementation in

I know from experience that inflexible frameworks are worse than no frameworks. 
And I feel we have not enough experience with the object detection devices 
right now to come up with decent generic kernel framework. With only one 
device using your generic OD module it just adds complexity and obfuscates 
things.

> object detection hw driver: interfaces with user space, handling object
> detection from user space images.  We can let object detect hw driver to
> handle the complexity of hardware by defining appropriate callbacks.

I'm still not convinced. I can't see a value in having generic module
the is used only by one driver, given diversity of nowadays media systems.

>> already the required building blocks in v4l2 core, what we need is
>> specification of Face Detection interface semantics, which should be
>> eventually added to the V4L DocBook.
>> The object detection ioctls need to be adjusted to support camera sensors
>> with face detection capability, I'll comment on patch 6/8 about this.
> 
> Expect your comments on it, :-)

I finally managed to find a time to review it. Sorry about the delay.

[1] http://linuxtv.org/downloads/v4l-dvb-apis/app-pri.html

--

Regards,
Sylwester

^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2012-01-17 21:05 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-12-14 14:00 [RFC PATCH v2 0/7] media: introduce object detection(OD) driver Ming Lei
2011-12-14 14:00 ` [RFC PATCH v2 1/8] omap4: introduce fdif(face detect module) hwmod Ming Lei
2011-12-16  5:53   ` Paul Walmsley
2011-12-16 14:54     ` Cousson, Benoit
2011-12-19 21:48       ` Paul Walmsley
2011-12-14 14:00 ` [RFC PATCH v2 2/8] omap4: build fdif omap device from hwmod Ming Lei
2011-12-14 14:00 ` [RFC PATCH v2 3/8] media: videobuf2: move out of setting pgprot_noncached from vb2_mmap_pfn_range Ming Lei
2011-12-14 14:00 ` [RFC PATCH v2 4/8] media: videobuf2: introduce VIDEOBUF2_PAGE memops Ming Lei
2011-12-22  9:28   ` Marek Szyprowski
2011-12-23  9:22     ` Ming Lei
2011-12-23  9:34       ` Marek Szyprowski
2011-12-23  9:51         ` Ming Lei
2011-12-23 10:38           ` Marek Szyprowski
2011-12-23 12:20             ` Ming Lei
2012-01-10 10:20               ` Marek Szyprowski
2012-01-10 11:55                 ` Ming Lei
2011-12-14 14:00 ` [RFC PATCH v2 5/8] media: v4l2-ioctl: support 64/32 compatible array parameter Ming Lei
2011-12-14 14:00 ` [RFC PATCH v2 6/8] media: v4l2: introduce two IOCTLs for object detection Ming Lei
2012-01-13 21:16   ` Sylwester Nawrocki
2011-12-14 14:00 ` [RFC PATCH v2 7/8] media: video: introduce object detection driver module Ming Lei
2011-12-29 17:16   ` Sylwester Nawrocki
2012-01-04  8:13     ` Ming Lei
2012-01-17 21:05       ` Sylwester Nawrocki
2011-12-14 14:00 ` [RFC PATCH v2 8/8] media: video: introduce omap4 face detection module driver Ming Lei

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).