linux-media.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v7] media: imx: add mem2mem device
@ 2019-01-17 15:50 Philipp Zabel
  2019-01-18  9:30 ` Hans Verkuil
  2019-02-12 19:01 ` Tim Harvey
  0 siblings, 2 replies; 15+ messages in thread
From: Philipp Zabel @ 2019-01-17 15:50 UTC (permalink / raw)
  To: linux-media; +Cc: Steve Longerbeam, Hans Verkuil, kernel

Add a single imx-media mem2mem video device that uses the IPU IC PP
(image converter post processing) task for scaling and colorspace
conversion.
On i.MX6Q/DL SoCs with two IPUs currently only the first IPU is used.

The hardware only supports writing to destination buffers up to
1024x1024 pixels in a single pass, arbitrary sizes can be achieved
by rendering multiple tiles per frame.

Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
[slongerbeam@gmail.com: use ipu_image_convert_adjust(), fix
 device_run() error handling, add missing media-device header,
 unregister and remove the mem2mem device in error paths in
 imx_media_probe_complete() and in imx_media_remove()]
Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
---
Changes since v6 [1]:
 - Change driver name in querycap to imx-media-csc-scaler
 - Drop V4L2_SEL_TGT_COMPOSE_PADDED from vidioc_g_selection
 - Simplify error handling in ipu_csc_scaler_init_controls

[1] https://patchwork.linuxtv.org/patch/53757/
---
 drivers/staging/media/imx/Kconfig             |   1 +
 drivers/staging/media/imx/Makefile            |   1 +
 .../staging/media/imx/imx-media-csc-scaler.c  | 856 ++++++++++++++++++
 drivers/staging/media/imx/imx-media-dev.c     |  34 +-
 drivers/staging/media/imx/imx-media.h         |  10 +
 5 files changed, 898 insertions(+), 4 deletions(-)
 create mode 100644 drivers/staging/media/imx/imx-media-csc-scaler.c

diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
index bfc17de56b17..07013cb3cb66 100644
--- a/drivers/staging/media/imx/Kconfig
+++ b/drivers/staging/media/imx/Kconfig
@@ -6,6 +6,7 @@ config VIDEO_IMX_MEDIA
 	depends on HAS_DMA
 	select VIDEOBUF2_DMA_CONTIG
 	select V4L2_FWNODE
+	select V4L2_MEM2MEM_DEV
 	---help---
 	  Say yes here to enable support for video4linux media controller
 	  driver for the i.MX5/6 SOC.
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index 698a4210316e..8f1ba788000b 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -6,6 +6,7 @@ imx-media-ic-objs := imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o
 obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media.o
 obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-common.o
 obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-capture.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-csc-scaler.o
 obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-vdic.o
 obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-ic.o
 
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
new file mode 100644
index 000000000000..44f437da0a9a
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX IPUv3 IC PP mem2mem CSC/Scaler driver
+ *
+ * Copyright (C) 2011 Pengutronix, Sascha Hauer
+ * Copyright (C) 2018 Pengutronix, Philipp Zabel
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <video/imx-ipu-v3.h>
+#include <video/imx-ipu-image-convert.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "imx-media.h"
+
+#define fh_to_ctx(__fh)	container_of(__fh, struct ipu_csc_scaler_ctx, fh)
+
+enum {
+	V4L2_M2M_SRC = 0,
+	V4L2_M2M_DST = 1,
+};
+
+struct ipu_csc_scaler_priv {
+	struct imx_media_video_dev	vdev;
+
+	struct v4l2_m2m_dev		*m2m_dev;
+	struct device			*dev;
+
+	struct imx_media_dev		*md;
+
+	struct mutex			mutex;	/* mem2mem device mutex */
+};
+
+#define vdev_to_priv(v) container_of(v, struct ipu_csc_scaler_priv, vdev)
+
+/* Per-queue, driver-specific private data */
+struct ipu_csc_scaler_q_data {
+	struct v4l2_pix_format		cur_fmt;
+	struct v4l2_rect		rect;
+};
+
+struct ipu_csc_scaler_ctx {
+	struct ipu_csc_scaler_priv	*priv;
+
+	struct v4l2_fh			fh;
+	struct ipu_csc_scaler_q_data	q_data[2];
+	struct ipu_image_convert_ctx	*icc;
+
+	struct v4l2_ctrl_handler	ctrl_hdlr;
+	int				rotate;
+	bool				hflip;
+	bool				vflip;
+	enum ipu_rotate_mode		rot_mode;
+};
+
+static struct ipu_csc_scaler_q_data *get_q_data(struct ipu_csc_scaler_ctx *ctx,
+						enum v4l2_buf_type type)
+{
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		return &ctx->q_data[V4L2_M2M_SRC];
+	else
+		return &ctx->q_data[V4L2_M2M_DST];
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+static void job_abort(void *_ctx)
+{
+	struct ipu_csc_scaler_ctx *ctx = _ctx;
+
+	if (ctx->icc)
+		ipu_image_convert_abort(ctx->icc);
+}
+
+static void ipu_ic_pp_complete(struct ipu_image_convert_run *run, void *_ctx)
+{
+	struct ipu_csc_scaler_ctx *ctx = _ctx;
+	struct ipu_csc_scaler_priv *priv = ctx->priv;
+	struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+	dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+	dst_buf->timecode = src_buf->timecode;
+
+	v4l2_m2m_buf_done(src_buf, run->status ? VB2_BUF_STATE_ERROR :
+						 VB2_BUF_STATE_DONE);
+	v4l2_m2m_buf_done(dst_buf, run->status ? VB2_BUF_STATE_ERROR :
+						 VB2_BUF_STATE_DONE);
+
+	v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
+	kfree(run);
+}
+
+static void device_run(void *_ctx)
+{
+	struct ipu_csc_scaler_ctx *ctx = _ctx;
+	struct ipu_csc_scaler_priv *priv = ctx->priv;
+	struct vb2_v4l2_buffer *src_buf, *dst_buf;
+	struct ipu_image_convert_run *run;
+	int ret;
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+	run = kzalloc(sizeof(*run), GFP_KERNEL);
+	if (!run)
+		goto err;
+
+	run->ctx = ctx->icc;
+	run->in_phys = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+	run->out_phys = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+	ret = ipu_image_convert_queue(run);
+	if (ret < 0) {
+		v4l2_err(ctx->priv->vdev.vfd->v4l2_dev,
+			 "%s: failed to queue: %d\n", __func__, ret);
+		goto err;
+	}
+
+	return;
+
+err:
+	v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+	v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+	v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+	v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+	v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+/*
+ * Video ioctls
+ */
+static int ipu_csc_scaler_querycap(struct file *file, void *priv,
+				   struct v4l2_capability *cap)
+{
+	strscpy(cap->driver, "imx-media-csc-scaler", sizeof(cap->driver));
+	strscpy(cap->card, "imx-media-csc-scaler", sizeof(cap->card));
+	strscpy(cap->bus_info, "platform:imx-media-csc-scaler",
+		sizeof(cap->bus_info));
+
+	return 0;
+}
+
+static int ipu_csc_scaler_enum_fmt(struct file *file, void *fh,
+				   struct v4l2_fmtdesc *f)
+{
+	u32 fourcc;
+	int ret;
+
+	ret = imx_media_enum_format(&fourcc, f->index, CS_SEL_ANY);
+	if (ret)
+		return ret;
+
+	f->pixelformat = fourcc;
+
+	return 0;
+}
+
+static int ipu_csc_scaler_g_fmt(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+	struct ipu_csc_scaler_q_data *q_data;
+
+	q_data = get_q_data(ctx, f->type);
+
+	f->fmt.pix = q_data->cur_fmt;
+
+	return 0;
+}
+
+static int ipu_csc_scaler_try_fmt(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+	struct ipu_csc_scaler_q_data *q_data = get_q_data(ctx, f->type);
+	struct ipu_image test_in, test_out;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		struct ipu_csc_scaler_q_data *q_data_in =
+			get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+		test_out.pix = f->fmt.pix;
+		test_in.pix = q_data_in->cur_fmt;
+	} else {
+		struct ipu_csc_scaler_q_data *q_data_out =
+			get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+		test_in.pix = f->fmt.pix;
+		test_out.pix = q_data_out->cur_fmt;
+	}
+
+	ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
+
+	f->fmt.pix = (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+		test_out.pix : test_in.pix;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		f->fmt.pix.colorspace = q_data->cur_fmt.colorspace;
+		f->fmt.pix.ycbcr_enc = q_data->cur_fmt.ycbcr_enc;
+		f->fmt.pix.xfer_func = q_data->cur_fmt.xfer_func;
+		f->fmt.pix.quantization = q_data->cur_fmt.quantization;
+	} else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT) {
+		f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+		f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+		f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+		f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT;
+	}
+
+	return 0;
+}
+
+static int ipu_csc_scaler_s_fmt(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct ipu_csc_scaler_q_data *q_data;
+	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+	struct vb2_queue *vq;
+	int ret;
+
+	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+	if (vb2_is_busy(vq)) {
+		v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s queue busy\n",
+			 __func__);
+		return -EBUSY;
+	}
+
+	q_data = get_q_data(ctx, f->type);
+
+	ret = ipu_csc_scaler_try_fmt(file, priv, f);
+	if (ret < 0)
+		return ret;
+
+	q_data->cur_fmt.width = f->fmt.pix.width;
+	q_data->cur_fmt.height = f->fmt.pix.height;
+	q_data->cur_fmt.pixelformat = f->fmt.pix.pixelformat;
+	q_data->cur_fmt.field = f->fmt.pix.field;
+	q_data->cur_fmt.bytesperline = f->fmt.pix.bytesperline;
+	q_data->cur_fmt.sizeimage = f->fmt.pix.sizeimage;
+
+	/* Reset cropping/composing rectangle */
+	q_data->rect.left = 0;
+	q_data->rect.top = 0;
+	q_data->rect.width = q_data->cur_fmt.width;
+	q_data->rect.height = q_data->cur_fmt.height;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		/* Set colorimetry on the output queue */
+		q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
+		q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+		q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
+		q_data->cur_fmt.quantization = f->fmt.pix.quantization;
+		/* Propagate colorimetry to the capture queue */
+		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+		q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
+		q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+		q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
+		q_data->cur_fmt.quantization = f->fmt.pix.quantization;
+	}
+
+	/*
+	 * TODO: Setting colorimetry on the capture queue is currently not
+	 * supported by the V4L2 API
+	 */
+
+	return 0;
+}
+
+static int ipu_csc_scaler_g_selection(struct file *file, void *priv,
+				      struct v4l2_selection *s)
+{
+	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+	struct ipu_csc_scaler_q_data *q_data;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_CROP:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+		if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+			return -EINVAL;
+		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			return -EINVAL;
+		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (s->target == V4L2_SEL_TGT_CROP ||
+	    s->target == V4L2_SEL_TGT_COMPOSE) {
+		s->r = q_data->rect;
+	} else {
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = q_data->cur_fmt.width;
+		s->r.height = q_data->cur_fmt.height;
+	}
+
+	return 0;
+}
+
+static int ipu_csc_scaler_s_selection(struct file *file, void *priv,
+				      struct v4l2_selection *s)
+{
+	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+	struct ipu_csc_scaler_q_data *q_data;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_CROP:
+		if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+			return -EINVAL;
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+		if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+	    s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, s->type);
+
+	/* The input's frame width to the IC must be a multiple of 8 pixels
+	 * When performing resizing the frame width must be multiple of burst
+	 * size - 8 or 16 pixels as defined by CB#_BURST_16 parameter.
+	 */
+	if (s->flags & V4L2_SEL_FLAG_GE)
+		s->r.width = round_up(s->r.width, 8);
+	if (s->flags & V4L2_SEL_FLAG_LE)
+		s->r.width = round_down(s->r.width, 8);
+	s->r.width = clamp_t(unsigned int, s->r.width, 8,
+			     round_down(q_data->cur_fmt.width, 8));
+	s->r.height = clamp_t(unsigned int, s->r.height, 1,
+			      q_data->cur_fmt.height);
+	s->r.left = clamp_t(unsigned int, s->r.left, 0,
+			    q_data->cur_fmt.width - s->r.width);
+	s->r.top = clamp_t(unsigned int, s->r.top, 0,
+			   q_data->cur_fmt.height - s->r.height);
+
+	/* V4L2_SEL_FLAG_KEEP_CONFIG is only valid for subdevices */
+	q_data->rect = s->r;
+
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops ipu_csc_scaler_ioctl_ops = {
+	.vidioc_querycap		= ipu_csc_scaler_querycap,
+
+	.vidioc_enum_fmt_vid_cap	= ipu_csc_scaler_enum_fmt,
+	.vidioc_g_fmt_vid_cap		= ipu_csc_scaler_g_fmt,
+	.vidioc_try_fmt_vid_cap		= ipu_csc_scaler_try_fmt,
+	.vidioc_s_fmt_vid_cap		= ipu_csc_scaler_s_fmt,
+
+	.vidioc_enum_fmt_vid_out	= ipu_csc_scaler_enum_fmt,
+	.vidioc_g_fmt_vid_out		= ipu_csc_scaler_g_fmt,
+	.vidioc_try_fmt_vid_out		= ipu_csc_scaler_try_fmt,
+	.vidioc_s_fmt_vid_out		= ipu_csc_scaler_s_fmt,
+
+	.vidioc_g_selection		= ipu_csc_scaler_g_selection,
+	.vidioc_s_selection		= ipu_csc_scaler_s_selection,
+
+	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
+
+	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
+	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
+	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
+	.vidioc_create_bufs		= v4l2_m2m_ioctl_create_bufs,
+	.vidioc_prepare_buf		= v4l2_m2m_ioctl_prepare_buf,
+
+	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
+	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
+
+	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+
+static int ipu_csc_scaler_queue_setup(struct vb2_queue *vq,
+				      unsigned int *nbuffers,
+				      unsigned int *nplanes,
+				      unsigned int sizes[],
+				      struct device *alloc_devs[])
+{
+	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
+	struct ipu_csc_scaler_q_data *q_data;
+	unsigned int size, count = *nbuffers;
+
+	q_data = get_q_data(ctx, vq->type);
+
+	size = q_data->cur_fmt.sizeimage;
+
+	*nbuffers = count;
+
+	if (*nplanes)
+		return sizes[0] < size ? -EINVAL : 0;
+
+	*nplanes = 1;
+	sizes[0] = size;
+
+	dev_dbg(ctx->priv->dev, "get %d buffer(s) of size %d each.\n",
+		count, size);
+
+	return 0;
+}
+
+static int ipu_csc_scaler_buf_prepare(struct vb2_buffer *vb)
+{
+	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct ipu_csc_scaler_q_data *q_data;
+	unsigned long size;
+
+	dev_dbg(ctx->priv->dev, "type: %d\n", vb->vb2_queue->type);
+
+	q_data = get_q_data(ctx, vb->vb2_queue->type);
+	size = q_data->cur_fmt.sizeimage;
+
+	if (vb2_plane_size(vb, 0) < size) {
+		dev_dbg(ctx->priv->dev,
+			"%s data will not fit into plane (%lu < %lu)\n",
+			__func__, vb2_plane_size(vb, 0), size);
+		return -EINVAL;
+	}
+
+	vb2_set_plane_payload(vb, 0, size);
+
+	return 0;
+}
+
+static void ipu_csc_scaler_buf_queue(struct vb2_buffer *vb)
+{
+	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static void ipu_image_from_q_data(struct ipu_image *im,
+				  struct ipu_csc_scaler_q_data *q_data)
+{
+	im->pix.width = q_data->cur_fmt.width;
+	im->pix.height = q_data->cur_fmt.height;
+	im->pix.bytesperline = q_data->cur_fmt.bytesperline;
+	im->pix.pixelformat = q_data->cur_fmt.pixelformat;
+	im->rect = q_data->rect;
+}
+
+static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
+					  unsigned int count)
+{
+	const enum ipu_ic_task ic_task = IC_TASK_POST_PROCESSOR;
+	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
+	struct ipu_csc_scaler_priv *priv = ctx->priv;
+	struct ipu_soc *ipu = priv->md->ipu[0];
+	struct ipu_csc_scaler_q_data *q_data;
+	struct vb2_queue *other_q;
+	struct ipu_image in, out;
+
+	other_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+				  (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+				  V4L2_BUF_TYPE_VIDEO_OUTPUT :
+				  V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	if (!vb2_is_streaming(other_q))
+		return 0;
+
+	if (ctx->icc) {
+		v4l2_warn(ctx->priv->vdev.vfd->v4l2_dev, "removing old ICC\n");
+		ipu_image_convert_unprepare(ctx->icc);
+	}
+
+	q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+	ipu_image_from_q_data(&in, q_data);
+
+	q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	ipu_image_from_q_data(&out, q_data);
+
+	ctx->icc = ipu_image_convert_prepare(ipu, ic_task, &in, &out,
+					     ctx->rot_mode,
+					     ipu_ic_pp_complete, ctx);
+	if (IS_ERR(ctx->icc)) {
+		struct vb2_v4l2_buffer *buf;
+		int ret = PTR_ERR(ctx->icc);
+
+		ctx->icc = NULL;
+		v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: error %d\n",
+			 __func__, ret);
+		while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+		while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
+{
+	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
+	struct vb2_v4l2_buffer *buf;
+
+	if (ctx->icc) {
+		ipu_image_convert_unprepare(ctx->icc);
+		ctx->icc = NULL;
+	}
+
+	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+	} else {
+		while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+	}
+}
+
+static const struct vb2_ops ipu_csc_scaler_qops = {
+	.queue_setup		= ipu_csc_scaler_queue_setup,
+	.buf_prepare		= ipu_csc_scaler_buf_prepare,
+	.buf_queue		= ipu_csc_scaler_buf_queue,
+	.wait_prepare		= vb2_ops_wait_prepare,
+	.wait_finish		= vb2_ops_wait_finish,
+	.start_streaming	= ipu_csc_scaler_start_streaming,
+	.stop_streaming		= ipu_csc_scaler_stop_streaming,
+};
+
+static int ipu_csc_scaler_queue_init(void *priv, struct vb2_queue *src_vq,
+				     struct vb2_queue *dst_vq)
+{
+	struct ipu_csc_scaler_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+	src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+	src_vq->drv_priv = ctx;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	src_vq->ops = &ipu_csc_scaler_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock = &ctx->priv->mutex;
+	src_vq->dev = ctx->priv->dev;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+	dst_vq->drv_priv = ctx;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	dst_vq->ops = &ipu_csc_scaler_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock = &ctx->priv->mutex;
+	dst_vq->dev = ctx->priv->dev;
+
+	return vb2_queue_init(dst_vq);
+}
+
+static int ipu_csc_scaler_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct ipu_csc_scaler_ctx *ctx = container_of(ctrl->handler,
+						      struct ipu_csc_scaler_ctx,
+						      ctrl_hdlr);
+	enum ipu_rotate_mode rot_mode;
+	int rotate;
+	bool hflip, vflip;
+	int ret = 0;
+
+	rotate = ctx->rotate;
+	hflip = ctx->hflip;
+	vflip = ctx->vflip;
+
+	switch (ctrl->id) {
+	case V4L2_CID_HFLIP:
+		hflip = ctrl->val;
+		break;
+	case V4L2_CID_VFLIP:
+		vflip = ctrl->val;
+		break;
+	case V4L2_CID_ROTATE:
+		rotate = ctrl->val;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = ipu_degrees_to_rot_mode(&rot_mode, rotate, hflip, vflip);
+	if (ret)
+		return ret;
+
+	if (rot_mode != ctx->rot_mode) {
+		struct vb2_queue *cap_q;
+
+		cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+					V4L2_BUF_TYPE_VIDEO_CAPTURE);
+		if (vb2_is_busy(cap_q))
+			return -EBUSY;
+
+		ctx->rot_mode = rot_mode;
+		ctx->rotate = rotate;
+		ctx->hflip = hflip;
+		ctx->vflip = vflip;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops ipu_csc_scaler_ctrl_ops = {
+	.s_ctrl = ipu_csc_scaler_s_ctrl,
+};
+
+static int ipu_csc_scaler_init_controls(struct ipu_csc_scaler_ctx *ctx)
+{
+	struct v4l2_ctrl_handler *hdlr = &ctx->ctrl_hdlr;
+	int ret;
+
+	v4l2_ctrl_handler_init(hdlr, 3);
+
+	v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_HFLIP,
+			  0, 1, 1, 0);
+	v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_VFLIP,
+			  0, 1, 1, 0);
+	v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_ROTATE,
+			  0, 270, 90, 0);
+
+	if (hdlr->error) {
+		v4l2_ctrl_handler_free(hdlr);
+		return hdlr->error;
+	}
+
+	v4l2_ctrl_handler_setup(hdlr);
+	return 0;
+}
+
+#define DEFAULT_WIDTH	720
+#define DEFAULT_HEIGHT	576
+static const struct ipu_csc_scaler_q_data ipu_csc_scaler_q_data_default = {
+	.cur_fmt = {
+		.width = DEFAULT_WIDTH,
+		.height = DEFAULT_HEIGHT,
+		.pixelformat = V4L2_PIX_FMT_YUV420,
+		.field = V4L2_FIELD_NONE,
+		.bytesperline = DEFAULT_WIDTH,
+		.sizeimage = DEFAULT_WIDTH * DEFAULT_HEIGHT * 3 / 2,
+		.colorspace = V4L2_COLORSPACE_SRGB,
+	},
+	.rect = {
+		.width = DEFAULT_WIDTH,
+		.height = DEFAULT_HEIGHT,
+	},
+};
+
+/*
+ * File operations
+ */
+static int ipu_csc_scaler_open(struct file *file)
+{
+	struct ipu_csc_scaler_priv *priv = video_drvdata(file);
+	struct ipu_csc_scaler_ctx *ctx = NULL;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->rot_mode = IPU_ROTATE_NONE;
+
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+	ctx->priv = priv;
+
+	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(priv->m2m_dev, ctx,
+					    &ipu_csc_scaler_queue_init);
+	if (IS_ERR(ctx->fh.m2m_ctx)) {
+		ret = PTR_ERR(ctx->fh.m2m_ctx);
+		goto err_ctx;
+	}
+
+	ret = ipu_csc_scaler_init_controls(ctx);
+	if (ret)
+		goto err_ctrls;
+
+	ctx->fh.ctrl_handler = &ctx->ctrl_hdlr;
+
+	ctx->q_data[V4L2_M2M_SRC] = ipu_csc_scaler_q_data_default;
+	ctx->q_data[V4L2_M2M_DST] = ipu_csc_scaler_q_data_default;
+
+	dev_dbg(priv->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
+		ctx->fh.m2m_ctx);
+
+	return 0;
+
+err_ctrls:
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+err_ctx:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+	return ret;
+}
+
+static int ipu_csc_scaler_release(struct file *file)
+{
+	struct ipu_csc_scaler_priv *priv = video_drvdata(file);
+	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(file->private_data);
+
+	dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+
+	return 0;
+}
+
+static const struct v4l2_file_operations ipu_csc_scaler_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ipu_csc_scaler_open,
+	.release	= ipu_csc_scaler_release,
+	.poll		= v4l2_m2m_fop_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= v4l2_m2m_fop_mmap,
+};
+
+static struct v4l2_m2m_ops m2m_ops = {
+	.device_run	= device_run,
+	.job_abort	= job_abort,
+};
+
+static const struct video_device ipu_csc_scaler_videodev_template = {
+	.name		= "ipu0_ic_pp mem2mem",
+	.fops		= &ipu_csc_scaler_fops,
+	.ioctl_ops	= &ipu_csc_scaler_ioctl_ops,
+	.minor		= -1,
+	.release	= video_device_release,
+	.vfl_dir	= VFL_DIR_M2M,
+	.device_caps	= V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev)
+{
+	struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+	struct video_device *vfd = vdev->vfd;
+	int ret;
+
+	vfd->v4l2_dev = &priv->md->v4l2_dev;
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+	if (ret) {
+		v4l2_err(vfd->v4l2_dev, "Failed to register video device\n");
+		return ret;
+	}
+
+	v4l2_info(vfd->v4l2_dev, "Registered %s as /dev/%s\n", vfd->name,
+		  video_device_node_name(vfd));
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_csc_scaler_device_register);
+
+void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
+{
+	struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+	struct video_device *vfd = priv->vdev.vfd;
+
+	mutex_lock(&priv->mutex);
+
+	if (video_is_registered(vfd))
+		video_unregister_device(vfd);
+
+	mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(imx_media_csc_scaler_device_unregister);
+
+struct imx_media_video_dev *
+imx_media_csc_scaler_device_init(struct imx_media_dev *md)
+{
+	struct ipu_csc_scaler_priv *priv;
+	struct video_device *vfd;
+	int ret;
+
+	priv = devm_kzalloc(md->md.dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return ERR_PTR(-ENOMEM);
+
+	priv->md = md;
+	priv->dev = md->md.dev;
+
+	mutex_init(&priv->mutex);
+
+	vfd = video_device_alloc();
+	if (!vfd)
+		return ERR_PTR(-ENOMEM);
+
+	*vfd = ipu_csc_scaler_videodev_template;
+	snprintf(vfd->name, sizeof(vfd->name), "ipu_ic_pp mem2mem");
+	vfd->lock = &priv->mutex;
+	priv->vdev.vfd = vfd;
+
+	INIT_LIST_HEAD(&priv->vdev.list);
+
+	video_set_drvdata(vfd, priv);
+
+	priv->m2m_dev = v4l2_m2m_init(&m2m_ops);
+	if (IS_ERR(priv->m2m_dev)) {
+		ret = PTR_ERR(priv->m2m_dev);
+		v4l2_err(&md->v4l2_dev, "Failed to init mem2mem device: %d\n",
+			 ret);
+		return ERR_PTR(ret);
+	}
+
+	return &priv->vdev;
+}
+EXPORT_SYMBOL_GPL(imx_media_csc_scaler_device_init);
+
+void imx_media_csc_scaler_device_remove(struct imx_media_video_dev *vdev)
+{
+	struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+
+	v4l2_m2m_release(priv->m2m_dev);
+}
+EXPORT_SYMBOL_GPL(imx_media_csc_scaler_device_remove);
+
+MODULE_DESCRIPTION("i.MX IPUv3 mem2mem scaler/CSC driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 4b344a4a3706..fee2ece0a6f8 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -318,12 +318,36 @@ static int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
 		goto unlock;
 
 	ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
-unlock:
-	mutex_unlock(&imxmd->mutex);
 	if (ret)
-		return ret;
+		goto unlock;
+
+	imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
+	if (IS_ERR(imxmd->m2m_vdev)) {
+		ret = PTR_ERR(imxmd->m2m_vdev);
+		goto unlock;
+	}
 
-	return media_device_register(&imxmd->md);
+	ret = imx_media_csc_scaler_device_register(imxmd->m2m_vdev);
+	if (ret)
+		goto m2m_remove;
+
+	mutex_unlock(&imxmd->mutex);
+
+	ret = media_device_register(&imxmd->md);
+	if (ret) {
+		mutex_lock(&imxmd->mutex);
+		goto m2m_unreg;
+	}
+
+	return 0;
+
+m2m_unreg:
+	imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
+m2m_remove:
+	imx_media_csc_scaler_device_remove(imxmd->m2m_vdev);
+unlock:
+	mutex_unlock(&imxmd->mutex);
+	return ret;
 }
 
 static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
@@ -532,6 +556,8 @@ static int imx_media_remove(struct platform_device *pdev)
 	v4l2_async_notifier_unregister(&imxmd->notifier);
 	imx_media_remove_internal_subdevs(imxmd);
 	v4l2_async_notifier_cleanup(&imxmd->notifier);
+	imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
+	imx_media_csc_scaler_device_remove(imxmd->m2m_vdev);
 	v4l2_device_unregister(&imxmd->v4l2_dev);
 	media_device_unregister(&imxmd->md);
 	media_device_cleanup(&imxmd->md);
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index bc7feb81937c..d1c4df4445cf 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -149,6 +149,9 @@ struct imx_media_dev {
 
 	/* for async subdev registration */
 	struct v4l2_async_notifier notifier;
+
+	/* IC scaler/CSC mem2mem video device */
+	struct imx_media_video_dev *m2m_vdev;
 };
 
 enum codespace_sel {
@@ -262,6 +265,13 @@ void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
 					 struct v4l2_pix_format *pix);
 void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
 
+/* imx-media-mem2mem.c */
+struct imx_media_video_dev *
+imx_media_csc_scaler_device_init(struct imx_media_dev *dev);
+void imx_media_csc_scaler_device_remove(struct imx_media_video_dev *vdev);
+int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev);
+void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev);
+
 /* subdev group ids */
 #define IMX_MEDIA_GRP_ID_CSI2      BIT(8)
 #define IMX_MEDIA_GRP_ID_CSI_BIT   9
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-01-17 15:50 [PATCH v7] media: imx: add mem2mem device Philipp Zabel
@ 2019-01-18  9:30 ` Hans Verkuil
  2019-01-18 11:18   ` Philipp Zabel
  2019-02-12 19:01 ` Tim Harvey
  1 sibling, 1 reply; 15+ messages in thread
From: Hans Verkuil @ 2019-01-18  9:30 UTC (permalink / raw)
  To: Philipp Zabel, linux-media; +Cc: Steve Longerbeam, Hans Verkuil, kernel

On 1/17/19 4:50 PM, Philipp Zabel wrote:
> Add a single imx-media mem2mem video device that uses the IPU IC PP
> (image converter post processing) task for scaling and colorspace
> conversion.
> On i.MX6Q/DL SoCs with two IPUs currently only the first IPU is used.
> 
> The hardware only supports writing to destination buffers up to
> 1024x1024 pixels in a single pass, arbitrary sizes can be achieved
> by rendering multiple tiles per frame.
> 
> Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> [slongerbeam@gmail.com: use ipu_image_convert_adjust(), fix
>  device_run() error handling, add missing media-device header,
>  unregister and remove the mem2mem device in error paths in
>  imx_media_probe_complete() and in imx_media_remove()]
> Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
> ---
> Changes since v6 [1]:
>  - Change driver name in querycap to imx-media-csc-scaler
>  - Drop V4L2_SEL_TGT_COMPOSE_PADDED from vidioc_g_selection
>  - Simplify error handling in ipu_csc_scaler_init_controls
> 
> [1] https://patchwork.linuxtv.org/patch/53757/
> ---
>  drivers/staging/media/imx/Kconfig             |   1 +
>  drivers/staging/media/imx/Makefile            |   1 +
>  .../staging/media/imx/imx-media-csc-scaler.c  | 856 ++++++++++++++++++
>  drivers/staging/media/imx/imx-media-dev.c     |  34 +-
>  drivers/staging/media/imx/imx-media.h         |  10 +
>  5 files changed, 898 insertions(+), 4 deletions(-)
>  create mode 100644 drivers/staging/media/imx/imx-media-csc-scaler.c
> 
> diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
> index bfc17de56b17..07013cb3cb66 100644
> --- a/drivers/staging/media/imx/Kconfig
> +++ b/drivers/staging/media/imx/Kconfig
> @@ -6,6 +6,7 @@ config VIDEO_IMX_MEDIA
>  	depends on HAS_DMA
>  	select VIDEOBUF2_DMA_CONTIG
>  	select V4L2_FWNODE
> +	select V4L2_MEM2MEM_DEV
>  	---help---
>  	  Say yes here to enable support for video4linux media controller
>  	  driver for the i.MX5/6 SOC.
> diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
> index 698a4210316e..8f1ba788000b 100644
> --- a/drivers/staging/media/imx/Makefile
> +++ b/drivers/staging/media/imx/Makefile
> @@ -6,6 +6,7 @@ imx-media-ic-objs := imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o
>  obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media.o
>  obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-common.o
>  obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-capture.o
> +obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-csc-scaler.o
>  obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-vdic.o
>  obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-ic.o
>  
> diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
> new file mode 100644
> index 000000000000..44f437da0a9a
> --- /dev/null
> +++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
> @@ -0,0 +1,856 @@
> +// SPDX-License-Identifier: GPL-2.0+
> +/*
> + * i.MX IPUv3 IC PP mem2mem CSC/Scaler driver
> + *
> + * Copyright (C) 2011 Pengutronix, Sascha Hauer
> + * Copyright (C) 2018 Pengutronix, Philipp Zabel
> + */
> +#include <linux/module.h>
> +#include <linux/delay.h>
> +#include <linux/fs.h>
> +#include <linux/version.h>
> +#include <linux/sched.h>
> +#include <linux/slab.h>
> +#include <video/imx-ipu-v3.h>
> +#include <video/imx-ipu-image-convert.h>
> +
> +#include <media/media-device.h>
> +#include <media/v4l2-ctrls.h>
> +#include <media/v4l2-event.h>
> +#include <media/v4l2-mem2mem.h>
> +#include <media/v4l2-device.h>
> +#include <media/v4l2-ioctl.h>
> +#include <media/videobuf2-dma-contig.h>
> +
> +#include "imx-media.h"
> +
> +#define fh_to_ctx(__fh)	container_of(__fh, struct ipu_csc_scaler_ctx, fh)
> +
> +enum {
> +	V4L2_M2M_SRC = 0,
> +	V4L2_M2M_DST = 1,
> +};
> +
> +struct ipu_csc_scaler_priv {
> +	struct imx_media_video_dev	vdev;
> +
> +	struct v4l2_m2m_dev		*m2m_dev;
> +	struct device			*dev;
> +
> +	struct imx_media_dev		*md;
> +
> +	struct mutex			mutex;	/* mem2mem device mutex */
> +};
> +
> +#define vdev_to_priv(v) container_of(v, struct ipu_csc_scaler_priv, vdev)
> +
> +/* Per-queue, driver-specific private data */
> +struct ipu_csc_scaler_q_data {
> +	struct v4l2_pix_format		cur_fmt;
> +	struct v4l2_rect		rect;
> +};
> +
> +struct ipu_csc_scaler_ctx {
> +	struct ipu_csc_scaler_priv	*priv;
> +
> +	struct v4l2_fh			fh;
> +	struct ipu_csc_scaler_q_data	q_data[2];
> +	struct ipu_image_convert_ctx	*icc;
> +
> +	struct v4l2_ctrl_handler	ctrl_hdlr;
> +	int				rotate;
> +	bool				hflip;
> +	bool				vflip;
> +	enum ipu_rotate_mode		rot_mode;
> +};
> +
> +static struct ipu_csc_scaler_q_data *get_q_data(struct ipu_csc_scaler_ctx *ctx,
> +						enum v4l2_buf_type type)
> +{
> +	if (V4L2_TYPE_IS_OUTPUT(type))
> +		return &ctx->q_data[V4L2_M2M_SRC];
> +	else
> +		return &ctx->q_data[V4L2_M2M_DST];
> +}
> +
> +/*
> + * mem2mem callbacks
> + */
> +
> +static void job_abort(void *_ctx)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = _ctx;
> +
> +	if (ctx->icc)
> +		ipu_image_convert_abort(ctx->icc);
> +}
> +
> +static void ipu_ic_pp_complete(struct ipu_image_convert_run *run, void *_ctx)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = _ctx;
> +	struct ipu_csc_scaler_priv *priv = ctx->priv;
> +	struct vb2_v4l2_buffer *src_buf, *dst_buf;
> +
> +	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
> +	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
> +
> +	dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
> +	dst_buf->timecode = src_buf->timecode;
> +
> +	v4l2_m2m_buf_done(src_buf, run->status ? VB2_BUF_STATE_ERROR :
> +						 VB2_BUF_STATE_DONE);
> +	v4l2_m2m_buf_done(dst_buf, run->status ? VB2_BUF_STATE_ERROR :
> +						 VB2_BUF_STATE_DONE);
> +
> +	v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
> +	kfree(run);
> +}
> +
> +static void device_run(void *_ctx)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = _ctx;
> +	struct ipu_csc_scaler_priv *priv = ctx->priv;
> +	struct vb2_v4l2_buffer *src_buf, *dst_buf;
> +	struct ipu_image_convert_run *run;
> +	int ret;
> +
> +	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
> +	dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
> +
> +	run = kzalloc(sizeof(*run), GFP_KERNEL);
> +	if (!run)
> +		goto err;
> +
> +	run->ctx = ctx->icc;
> +	run->in_phys = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
> +	run->out_phys = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
> +
> +	ret = ipu_image_convert_queue(run);
> +	if (ret < 0) {
> +		v4l2_err(ctx->priv->vdev.vfd->v4l2_dev,
> +			 "%s: failed to queue: %d\n", __func__, ret);
> +		goto err;
> +	}
> +
> +	return;
> +
> +err:
> +	v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
> +	v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
> +	v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
> +	v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
> +	v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
> +}
> +
> +/*
> + * Video ioctls
> + */
> +static int ipu_csc_scaler_querycap(struct file *file, void *priv,
> +				   struct v4l2_capability *cap)
> +{
> +	strscpy(cap->driver, "imx-media-csc-scaler", sizeof(cap->driver));
> +	strscpy(cap->card, "imx-media-csc-scaler", sizeof(cap->card));
> +	strscpy(cap->bus_info, "platform:imx-media-csc-scaler",
> +		sizeof(cap->bus_info));
> +
> +	return 0;
> +}
> +
> +static int ipu_csc_scaler_enum_fmt(struct file *file, void *fh,
> +				   struct v4l2_fmtdesc *f)
> +{
> +	u32 fourcc;
> +	int ret;
> +
> +	ret = imx_media_enum_format(&fourcc, f->index, CS_SEL_ANY);
> +	if (ret)
> +		return ret;
> +
> +	f->pixelformat = fourcc;
> +
> +	return 0;
> +}
> +
> +static int ipu_csc_scaler_g_fmt(struct file *file, void *priv,
> +				struct v4l2_format *f)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
> +	struct ipu_csc_scaler_q_data *q_data;
> +
> +	q_data = get_q_data(ctx, f->type);
> +
> +	f->fmt.pix = q_data->cur_fmt;
> +
> +	return 0;
> +}
> +
> +static int ipu_csc_scaler_try_fmt(struct file *file, void *priv,
> +				  struct v4l2_format *f)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
> +	struct ipu_csc_scaler_q_data *q_data = get_q_data(ctx, f->type);
> +	struct ipu_image test_in, test_out;
> +
> +	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
> +		struct ipu_csc_scaler_q_data *q_data_in =
> +			get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
> +
> +		test_out.pix = f->fmt.pix;
> +		test_in.pix = q_data_in->cur_fmt;
> +	} else {
> +		struct ipu_csc_scaler_q_data *q_data_out =
> +			get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
> +
> +		test_in.pix = f->fmt.pix;
> +		test_out.pix = q_data_out->cur_fmt;
> +	}
> +
> +	ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
> +
> +	f->fmt.pix = (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
> +		test_out.pix : test_in.pix;
> +
> +	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
> +		f->fmt.pix.colorspace = q_data->cur_fmt.colorspace;
> +		f->fmt.pix.ycbcr_enc = q_data->cur_fmt.ycbcr_enc;
> +		f->fmt.pix.xfer_func = q_data->cur_fmt.xfer_func;
> +		f->fmt.pix.quantization = q_data->cur_fmt.quantization;
> +	} else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT) {
> +		f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
> +		f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
> +		f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT;
> +		f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT;
> +	}
> +
> +	return 0;
> +}
> +
> +static int ipu_csc_scaler_s_fmt(struct file *file, void *priv,
> +				struct v4l2_format *f)
> +{
> +	struct ipu_csc_scaler_q_data *q_data;
> +	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
> +	struct vb2_queue *vq;
> +	int ret;
> +
> +	vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
> +	if (vb2_is_busy(vq)) {
> +		v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s queue busy\n",
> +			 __func__);
> +		return -EBUSY;
> +	}
> +
> +	q_data = get_q_data(ctx, f->type);
> +
> +	ret = ipu_csc_scaler_try_fmt(file, priv, f);
> +	if (ret < 0)
> +		return ret;
> +
> +	q_data->cur_fmt.width = f->fmt.pix.width;
> +	q_data->cur_fmt.height = f->fmt.pix.height;
> +	q_data->cur_fmt.pixelformat = f->fmt.pix.pixelformat;
> +	q_data->cur_fmt.field = f->fmt.pix.field;
> +	q_data->cur_fmt.bytesperline = f->fmt.pix.bytesperline;
> +	q_data->cur_fmt.sizeimage = f->fmt.pix.sizeimage;
> +
> +	/* Reset cropping/composing rectangle */
> +	q_data->rect.left = 0;
> +	q_data->rect.top = 0;
> +	q_data->rect.width = q_data->cur_fmt.width;
> +	q_data->rect.height = q_data->cur_fmt.height;
> +
> +	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
> +		/* Set colorimetry on the output queue */
> +		q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
> +		q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
> +		q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
> +		q_data->cur_fmt.quantization = f->fmt.pix.quantization;
> +		/* Propagate colorimetry to the capture queue */
> +		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
> +		q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
> +		q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
> +		q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
> +		q_data->cur_fmt.quantization = f->fmt.pix.quantization;
> +	}
> +
> +	/*
> +	 * TODO: Setting colorimetry on the capture queue is currently not
> +	 * supported by the V4L2 API
> +	 */
> +
> +	return 0;
> +}
> +
> +static int ipu_csc_scaler_g_selection(struct file *file, void *priv,
> +				      struct v4l2_selection *s)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
> +	struct ipu_csc_scaler_q_data *q_data;
> +
> +	switch (s->target) {
> +	case V4L2_SEL_TGT_CROP:
> +	case V4L2_SEL_TGT_CROP_DEFAULT:
> +	case V4L2_SEL_TGT_CROP_BOUNDS:
> +		if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
> +			return -EINVAL;
> +		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
> +		break;
> +	case V4L2_SEL_TGT_COMPOSE:
> +	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
> +	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
> +		if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
> +			return -EINVAL;
> +		q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	if (s->target == V4L2_SEL_TGT_CROP ||
> +	    s->target == V4L2_SEL_TGT_COMPOSE) {
> +		s->r = q_data->rect;
> +	} else {
> +		s->r.left = 0;
> +		s->r.top = 0;
> +		s->r.width = q_data->cur_fmt.width;
> +		s->r.height = q_data->cur_fmt.height;
> +	}
> +
> +	return 0;
> +}
> +
> +static int ipu_csc_scaler_s_selection(struct file *file, void *priv,
> +				      struct v4l2_selection *s)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
> +	struct ipu_csc_scaler_q_data *q_data;
> +
> +	switch (s->target) {
> +	case V4L2_SEL_TGT_CROP:
> +		if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
> +			return -EINVAL;
> +		break;
> +	case V4L2_SEL_TGT_COMPOSE:
> +		if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
> +			return -EINVAL;
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
> +	    s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
> +		return -EINVAL;
> +
> +	q_data = get_q_data(ctx, s->type);
> +
> +	/* The input's frame width to the IC must be a multiple of 8 pixels
> +	 * When performing resizing the frame width must be multiple of burst
> +	 * size - 8 or 16 pixels as defined by CB#_BURST_16 parameter.
> +	 */
> +	if (s->flags & V4L2_SEL_FLAG_GE)
> +		s->r.width = round_up(s->r.width, 8);
> +	if (s->flags & V4L2_SEL_FLAG_LE)
> +		s->r.width = round_down(s->r.width, 8);
> +	s->r.width = clamp_t(unsigned int, s->r.width, 8,
> +			     round_down(q_data->cur_fmt.width, 8));
> +	s->r.height = clamp_t(unsigned int, s->r.height, 1,
> +			      q_data->cur_fmt.height);
> +	s->r.left = clamp_t(unsigned int, s->r.left, 0,
> +			    q_data->cur_fmt.width - s->r.width);
> +	s->r.top = clamp_t(unsigned int, s->r.top, 0,
> +			   q_data->cur_fmt.height - s->r.height);
> +
> +	/* V4L2_SEL_FLAG_KEEP_CONFIG is only valid for subdevices */
> +	q_data->rect = s->r;
> +
> +	return 0;
> +}
> +
> +static const struct v4l2_ioctl_ops ipu_csc_scaler_ioctl_ops = {
> +	.vidioc_querycap		= ipu_csc_scaler_querycap,
> +
> +	.vidioc_enum_fmt_vid_cap	= ipu_csc_scaler_enum_fmt,
> +	.vidioc_g_fmt_vid_cap		= ipu_csc_scaler_g_fmt,
> +	.vidioc_try_fmt_vid_cap		= ipu_csc_scaler_try_fmt,
> +	.vidioc_s_fmt_vid_cap		= ipu_csc_scaler_s_fmt,
> +
> +	.vidioc_enum_fmt_vid_out	= ipu_csc_scaler_enum_fmt,
> +	.vidioc_g_fmt_vid_out		= ipu_csc_scaler_g_fmt,
> +	.vidioc_try_fmt_vid_out		= ipu_csc_scaler_try_fmt,
> +	.vidioc_s_fmt_vid_out		= ipu_csc_scaler_s_fmt,
> +
> +	.vidioc_g_selection		= ipu_csc_scaler_g_selection,
> +	.vidioc_s_selection		= ipu_csc_scaler_s_selection,
> +
> +	.vidioc_reqbufs			= v4l2_m2m_ioctl_reqbufs,
> +	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
> +
> +	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
> +	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
> +	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
> +	.vidioc_create_bufs		= v4l2_m2m_ioctl_create_bufs,
> +	.vidioc_prepare_buf		= v4l2_m2m_ioctl_prepare_buf,
> +
> +	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
> +	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
> +
> +	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
> +	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
> +};
> +
> +/*
> + * Queue operations
> + */
> +
> +static int ipu_csc_scaler_queue_setup(struct vb2_queue *vq,
> +				      unsigned int *nbuffers,
> +				      unsigned int *nplanes,
> +				      unsigned int sizes[],
> +				      struct device *alloc_devs[])
> +{
> +	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
> +	struct ipu_csc_scaler_q_data *q_data;
> +	unsigned int size, count = *nbuffers;
> +
> +	q_data = get_q_data(ctx, vq->type);
> +
> +	size = q_data->cur_fmt.sizeimage;
> +
> +	*nbuffers = count;
> +
> +	if (*nplanes)
> +		return sizes[0] < size ? -EINVAL : 0;
> +
> +	*nplanes = 1;
> +	sizes[0] = size;
> +
> +	dev_dbg(ctx->priv->dev, "get %d buffer(s) of size %d each.\n",
> +		count, size);
> +
> +	return 0;
> +}
> +
> +static int ipu_csc_scaler_buf_prepare(struct vb2_buffer *vb)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
> +	struct ipu_csc_scaler_q_data *q_data;
> +	unsigned long size;
> +
> +	dev_dbg(ctx->priv->dev, "type: %d\n", vb->vb2_queue->type);
> +
> +	q_data = get_q_data(ctx, vb->vb2_queue->type);
> +	size = q_data->cur_fmt.sizeimage;
> +
> +	if (vb2_plane_size(vb, 0) < size) {
> +		dev_dbg(ctx->priv->dev,
> +			"%s data will not fit into plane (%lu < %lu)\n",
> +			__func__, vb2_plane_size(vb, 0), size);
> +		return -EINVAL;
> +	}
> +
> +	vb2_set_plane_payload(vb, 0, size);
> +
> +	return 0;
> +}
> +
> +static void ipu_csc_scaler_buf_queue(struct vb2_buffer *vb)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
> +
> +	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
> +}
> +
> +static void ipu_image_from_q_data(struct ipu_image *im,
> +				  struct ipu_csc_scaler_q_data *q_data)
> +{
> +	im->pix.width = q_data->cur_fmt.width;
> +	im->pix.height = q_data->cur_fmt.height;
> +	im->pix.bytesperline = q_data->cur_fmt.bytesperline;
> +	im->pix.pixelformat = q_data->cur_fmt.pixelformat;
> +	im->rect = q_data->rect;
> +}
> +
> +static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
> +					  unsigned int count)
> +{
> +	const enum ipu_ic_task ic_task = IC_TASK_POST_PROCESSOR;
> +	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
> +	struct ipu_csc_scaler_priv *priv = ctx->priv;
> +	struct ipu_soc *ipu = priv->md->ipu[0];
> +	struct ipu_csc_scaler_q_data *q_data;
> +	struct vb2_queue *other_q;
> +	struct ipu_image in, out;
> +
> +	other_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
> +				  (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
> +				  V4L2_BUF_TYPE_VIDEO_OUTPUT :
> +				  V4L2_BUF_TYPE_VIDEO_CAPTURE);
> +	if (!vb2_is_streaming(other_q))
> +		return 0;
> +
> +	if (ctx->icc) {
> +		v4l2_warn(ctx->priv->vdev.vfd->v4l2_dev, "removing old ICC\n");
> +		ipu_image_convert_unprepare(ctx->icc);
> +	}
> +
> +	q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
> +	ipu_image_from_q_data(&in, q_data);
> +
> +	q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
> +	ipu_image_from_q_data(&out, q_data);
> +
> +	ctx->icc = ipu_image_convert_prepare(ipu, ic_task, &in, &out,
> +					     ctx->rot_mode,
> +					     ipu_ic_pp_complete, ctx);
> +	if (IS_ERR(ctx->icc)) {
> +		struct vb2_v4l2_buffer *buf;
> +		int ret = PTR_ERR(ctx->icc);
> +
> +		ctx->icc = NULL;
> +		v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: error %d\n",
> +			 __func__, ret);
> +		while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
> +			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
> +		while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
> +			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
> +	struct vb2_v4l2_buffer *buf;
> +
> +	if (ctx->icc) {
> +		ipu_image_convert_unprepare(ctx->icc);
> +		ctx->icc = NULL;
> +	}
> +
> +	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
> +		while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
> +			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
> +	} else {
> +		while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
> +			v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
> +	}
> +}
> +
> +static const struct vb2_ops ipu_csc_scaler_qops = {
> +	.queue_setup		= ipu_csc_scaler_queue_setup,
> +	.buf_prepare		= ipu_csc_scaler_buf_prepare,
> +	.buf_queue		= ipu_csc_scaler_buf_queue,
> +	.wait_prepare		= vb2_ops_wait_prepare,
> +	.wait_finish		= vb2_ops_wait_finish,
> +	.start_streaming	= ipu_csc_scaler_start_streaming,
> +	.stop_streaming		= ipu_csc_scaler_stop_streaming,
> +};
> +
> +static int ipu_csc_scaler_queue_init(void *priv, struct vb2_queue *src_vq,
> +				     struct vb2_queue *dst_vq)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = priv;
> +	int ret;
> +
> +	memset(src_vq, 0, sizeof(*src_vq));
> +	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
> +	src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
> +	src_vq->drv_priv = ctx;
> +	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
> +	src_vq->ops = &ipu_csc_scaler_qops;
> +	src_vq->mem_ops = &vb2_dma_contig_memops;
> +	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
> +	src_vq->lock = &ctx->priv->mutex;
> +	src_vq->dev = ctx->priv->dev;
> +
> +	ret = vb2_queue_init(src_vq);
> +	if (ret)
> +		return ret;
> +
> +	memset(dst_vq, 0, sizeof(*dst_vq));
> +	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
> +	dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
> +	dst_vq->drv_priv = ctx;
> +	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
> +	dst_vq->ops = &ipu_csc_scaler_qops;
> +	dst_vq->mem_ops = &vb2_dma_contig_memops;
> +	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
> +	dst_vq->lock = &ctx->priv->mutex;
> +	dst_vq->dev = ctx->priv->dev;
> +
> +	return vb2_queue_init(dst_vq);
> +}
> +
> +static int ipu_csc_scaler_s_ctrl(struct v4l2_ctrl *ctrl)
> +{
> +	struct ipu_csc_scaler_ctx *ctx = container_of(ctrl->handler,
> +						      struct ipu_csc_scaler_ctx,
> +						      ctrl_hdlr);
> +	enum ipu_rotate_mode rot_mode;
> +	int rotate;
> +	bool hflip, vflip;
> +	int ret = 0;
> +
> +	rotate = ctx->rotate;
> +	hflip = ctx->hflip;
> +	vflip = ctx->vflip;
> +
> +	switch (ctrl->id) {
> +	case V4L2_CID_HFLIP:
> +		hflip = ctrl->val;
> +		break;
> +	case V4L2_CID_VFLIP:
> +		vflip = ctrl->val;
> +		break;
> +	case V4L2_CID_ROTATE:
> +		rotate = ctrl->val;
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	ret = ipu_degrees_to_rot_mode(&rot_mode, rotate, hflip, vflip);
> +	if (ret)
> +		return ret;
> +
> +	if (rot_mode != ctx->rot_mode) {
> +		struct vb2_queue *cap_q;
> +
> +		cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
> +					V4L2_BUF_TYPE_VIDEO_CAPTURE);
> +		if (vb2_is_busy(cap_q))
> +			return -EBUSY;
> +
> +		ctx->rot_mode = rot_mode;
> +		ctx->rotate = rotate;
> +		ctx->hflip = hflip;
> +		ctx->vflip = vflip;
> +	}
> +
> +	return 0;
> +}
> +
> +static const struct v4l2_ctrl_ops ipu_csc_scaler_ctrl_ops = {
> +	.s_ctrl = ipu_csc_scaler_s_ctrl,
> +};
> +
> +static int ipu_csc_scaler_init_controls(struct ipu_csc_scaler_ctx *ctx)
> +{
> +	struct v4l2_ctrl_handler *hdlr = &ctx->ctrl_hdlr;
> +	int ret;
> +
> +	v4l2_ctrl_handler_init(hdlr, 3);
> +
> +	v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_HFLIP,
> +			  0, 1, 1, 0);
> +	v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_VFLIP,
> +			  0, 1, 1, 0);
> +	v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_ROTATE,
> +			  0, 270, 90, 0);
> +
> +	if (hdlr->error) {
> +		v4l2_ctrl_handler_free(hdlr);
> +		return hdlr->error;
> +	}
> +
> +	v4l2_ctrl_handler_setup(hdlr);
> +	return 0;
> +}
> +
> +#define DEFAULT_WIDTH	720
> +#define DEFAULT_HEIGHT	576
> +static const struct ipu_csc_scaler_q_data ipu_csc_scaler_q_data_default = {
> +	.cur_fmt = {
> +		.width = DEFAULT_WIDTH,
> +		.height = DEFAULT_HEIGHT,
> +		.pixelformat = V4L2_PIX_FMT_YUV420,
> +		.field = V4L2_FIELD_NONE,
> +		.bytesperline = DEFAULT_WIDTH,
> +		.sizeimage = DEFAULT_WIDTH * DEFAULT_HEIGHT * 3 / 2,
> +		.colorspace = V4L2_COLORSPACE_SRGB,
> +	},
> +	.rect = {
> +		.width = DEFAULT_WIDTH,
> +		.height = DEFAULT_HEIGHT,
> +	},
> +};
> +
> +/*
> + * File operations
> + */
> +static int ipu_csc_scaler_open(struct file *file)
> +{
> +	struct ipu_csc_scaler_priv *priv = video_drvdata(file);
> +	struct ipu_csc_scaler_ctx *ctx = NULL;
> +	int ret;
> +
> +	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
> +	if (!ctx)
> +		return -ENOMEM;
> +
> +	ctx->rot_mode = IPU_ROTATE_NONE;
> +
> +	v4l2_fh_init(&ctx->fh, video_devdata(file));
> +	file->private_data = &ctx->fh;
> +	v4l2_fh_add(&ctx->fh);
> +	ctx->priv = priv;
> +
> +	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(priv->m2m_dev, ctx,
> +					    &ipu_csc_scaler_queue_init);
> +	if (IS_ERR(ctx->fh.m2m_ctx)) {
> +		ret = PTR_ERR(ctx->fh.m2m_ctx);
> +		goto err_ctx;
> +	}
> +
> +	ret = ipu_csc_scaler_init_controls(ctx);
> +	if (ret)
> +		goto err_ctrls;
> +
> +	ctx->fh.ctrl_handler = &ctx->ctrl_hdlr;
> +
> +	ctx->q_data[V4L2_M2M_SRC] = ipu_csc_scaler_q_data_default;
> +	ctx->q_data[V4L2_M2M_DST] = ipu_csc_scaler_q_data_default;
> +
> +	dev_dbg(priv->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
> +		ctx->fh.m2m_ctx);
> +
> +	return 0;
> +
> +err_ctrls:
> +	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
> +err_ctx:
> +	v4l2_fh_del(&ctx->fh);
> +	v4l2_fh_exit(&ctx->fh);
> +	kfree(ctx);
> +	return ret;
> +}
> +
> +static int ipu_csc_scaler_release(struct file *file)
> +{
> +	struct ipu_csc_scaler_priv *priv = video_drvdata(file);
> +	struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(file->private_data);
> +
> +	dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
> +
> +	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
> +	v4l2_fh_del(&ctx->fh);
> +	v4l2_fh_exit(&ctx->fh);
> +	kfree(ctx);
> +
> +	return 0;
> +}
> +
> +static const struct v4l2_file_operations ipu_csc_scaler_fops = {
> +	.owner		= THIS_MODULE,
> +	.open		= ipu_csc_scaler_open,
> +	.release	= ipu_csc_scaler_release,
> +	.poll		= v4l2_m2m_fop_poll,
> +	.unlocked_ioctl	= video_ioctl2,
> +	.mmap		= v4l2_m2m_fop_mmap,
> +};
> +
> +static struct v4l2_m2m_ops m2m_ops = {
> +	.device_run	= device_run,
> +	.job_abort	= job_abort,
> +};
> +
> +static const struct video_device ipu_csc_scaler_videodev_template = {
> +	.name		= "ipu0_ic_pp mem2mem",

I would expect to see something like 'imx-media-csc-scaler' as the name.
Wouldn't that be more descriptive?

> +	.fops		= &ipu_csc_scaler_fops,
> +	.ioctl_ops	= &ipu_csc_scaler_ioctl_ops,
> +	.minor		= -1,
> +	.release	= video_device_release,
> +	.vfl_dir	= VFL_DIR_M2M,
> +	.device_caps	= V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
> +};
> +
> +int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev)
> +{
> +	struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
> +	struct video_device *vfd = vdev->vfd;
> +	int ret;
> +
> +	vfd->v4l2_dev = &priv->md->v4l2_dev;
> +
> +	ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
> +	if (ret) {
> +		v4l2_err(vfd->v4l2_dev, "Failed to register video device\n");
> +		return ret;
> +	}
> +
> +	v4l2_info(vfd->v4l2_dev, "Registered %s as /dev/%s\n", vfd->name,
> +		  video_device_node_name(vfd));
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(imx_media_csc_scaler_device_register);
> +
> +void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
> +{
> +	struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
> +	struct video_device *vfd = priv->vdev.vfd;
> +
> +	mutex_lock(&priv->mutex);
> +
> +	if (video_is_registered(vfd))
> +		video_unregister_device(vfd);
> +
> +	mutex_unlock(&priv->mutex);
> +}
> +EXPORT_SYMBOL_GPL(imx_media_csc_scaler_device_unregister);
> +
> +struct imx_media_video_dev *
> +imx_media_csc_scaler_device_init(struct imx_media_dev *md)
> +{
> +	struct ipu_csc_scaler_priv *priv;
> +	struct video_device *vfd;
> +	int ret;
> +
> +	priv = devm_kzalloc(md->md.dev, sizeof(*priv), GFP_KERNEL);
> +	if (!priv)
> +		return ERR_PTR(-ENOMEM);
> +
> +	priv->md = md;
> +	priv->dev = md->md.dev;
> +
> +	mutex_init(&priv->mutex);
> +
> +	vfd = video_device_alloc();
> +	if (!vfd)
> +		return ERR_PTR(-ENOMEM);
> +
> +	*vfd = ipu_csc_scaler_videodev_template;
> +	snprintf(vfd->name, sizeof(vfd->name), "ipu_ic_pp mem2mem");
> +	vfd->lock = &priv->mutex;
> +	priv->vdev.vfd = vfd;
> +
> +	INIT_LIST_HEAD(&priv->vdev.list);
> +
> +	video_set_drvdata(vfd, priv);
> +
> +	priv->m2m_dev = v4l2_m2m_init(&m2m_ops);
> +	if (IS_ERR(priv->m2m_dev)) {
> +		ret = PTR_ERR(priv->m2m_dev);
> +		v4l2_err(&md->v4l2_dev, "Failed to init mem2mem device: %d\n",
> +			 ret);
> +		return ERR_PTR(ret);
> +	}
> +
> +	return &priv->vdev;
> +}
> +EXPORT_SYMBOL_GPL(imx_media_csc_scaler_device_init);
> +
> +void imx_media_csc_scaler_device_remove(struct imx_media_video_dev *vdev)
> +{
> +	struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
> +
> +	v4l2_m2m_release(priv->m2m_dev);
> +}
> +EXPORT_SYMBOL_GPL(imx_media_csc_scaler_device_remove);
> +
> +MODULE_DESCRIPTION("i.MX IPUv3 mem2mem scaler/CSC driver");
> +MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
> +MODULE_LICENSE("GPL");
> diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
> index 4b344a4a3706..fee2ece0a6f8 100644
> --- a/drivers/staging/media/imx/imx-media-dev.c
> +++ b/drivers/staging/media/imx/imx-media-dev.c
> @@ -318,12 +318,36 @@ static int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
>  		goto unlock;
>  
>  	ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
> -unlock:
> -	mutex_unlock(&imxmd->mutex);
>  	if (ret)
> -		return ret;
> +		goto unlock;
> +
> +	imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
> +	if (IS_ERR(imxmd->m2m_vdev)) {
> +		ret = PTR_ERR(imxmd->m2m_vdev);
> +		goto unlock;
> +	}
>  
> -	return media_device_register(&imxmd->md);
> +	ret = imx_media_csc_scaler_device_register(imxmd->m2m_vdev);
> +	if (ret)
> +		goto m2m_remove;
> +
> +	mutex_unlock(&imxmd->mutex);
> +
> +	ret = media_device_register(&imxmd->md);
> +	if (ret) {
> +		mutex_lock(&imxmd->mutex);
> +		goto m2m_unreg;
> +	}

I am missing a call to v4l2_m2m_register_media_controller() to ensure that this
device shows up in the media controller.

> +
> +	return 0;
> +
> +m2m_unreg:
> +	imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
> +m2m_remove:
> +	imx_media_csc_scaler_device_remove(imxmd->m2m_vdev);
> +unlock:
> +	mutex_unlock(&imxmd->mutex);
> +	return ret;
>  }
>  
>  static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
> @@ -532,6 +556,8 @@ static int imx_media_remove(struct platform_device *pdev)
>  	v4l2_async_notifier_unregister(&imxmd->notifier);
>  	imx_media_remove_internal_subdevs(imxmd);
>  	v4l2_async_notifier_cleanup(&imxmd->notifier);
> +	imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
> +	imx_media_csc_scaler_device_remove(imxmd->m2m_vdev);
>  	v4l2_device_unregister(&imxmd->v4l2_dev);
>  	media_device_unregister(&imxmd->md);
>  	media_device_cleanup(&imxmd->md);
> diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
> index bc7feb81937c..d1c4df4445cf 100644
> --- a/drivers/staging/media/imx/imx-media.h
> +++ b/drivers/staging/media/imx/imx-media.h
> @@ -149,6 +149,9 @@ struct imx_media_dev {
>  
>  	/* for async subdev registration */
>  	struct v4l2_async_notifier notifier;
> +
> +	/* IC scaler/CSC mem2mem video device */
> +	struct imx_media_video_dev *m2m_vdev;
>  };
>  
>  enum codespace_sel {
> @@ -262,6 +265,13 @@ void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
>  					 struct v4l2_pix_format *pix);
>  void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
>  
> +/* imx-media-mem2mem.c */
> +struct imx_media_video_dev *
> +imx_media_csc_scaler_device_init(struct imx_media_dev *dev);
> +void imx_media_csc_scaler_device_remove(struct imx_media_video_dev *vdev);
> +int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev);
> +void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev);
> +
>  /* subdev group ids */
>  #define IMX_MEDIA_GRP_ID_CSI2      BIT(8)
>  #define IMX_MEDIA_GRP_ID_CSI_BIT   9
> 

How did you test the rotate control? I'm asking because I would expect to see code
that checks this control in the *_fmt ioctls: rotating 90 or 270 degrees would mean
that the reported width and height are swapped for the capture queue. And I see no
sign that that is done. For the same reason this should also impact the g/s_selection
code.

Regards,

	Hans

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-01-18  9:30 ` Hans Verkuil
@ 2019-01-18 11:18   ` Philipp Zabel
  2019-01-18 12:41     ` Hans Verkuil
  2019-01-18 16:51     ` Philipp Zabel
  0 siblings, 2 replies; 15+ messages in thread
From: Philipp Zabel @ 2019-01-18 11:18 UTC (permalink / raw)
  To: Hans Verkuil, linux-media; +Cc: Steve Longerbeam, Hans Verkuil, kernel

Hi Hans,

On Fri, 2019-01-18 at 10:30 +0100, Hans Verkuil wrote:
> On 1/17/19 4:50 PM, Philipp Zabel wrote:
[...]
> > +
> > +static const struct video_device ipu_csc_scaler_videodev_template = {
> > +	.name		= "ipu0_ic_pp mem2mem",
> 
> I would expect to see something like 'imx-media-csc-scaler' as the name.
> Wouldn't that be more descriptive?

Yes, thank you. I'll change this as well.

[...]
> > diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
> > index 4b344a4a3706..fee2ece0a6f8 100644
> > --- a/drivers/staging/media/imx/imx-media-dev.c
> > +++ b/drivers/staging/media/imx/imx-media-dev.c
> > @@ -318,12 +318,36 @@ static int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
> >  		goto unlock;
> >  
> >  	ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
> > -unlock:
> > -	mutex_unlock(&imxmd->mutex);
> >  	if (ret)
> > -		return ret;
> > +		goto unlock;
> > +
> > +	imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
> > +	if (IS_ERR(imxmd->m2m_vdev)) {
> > +		ret = PTR_ERR(imxmd->m2m_vdev);
> > +		goto unlock;
> > +	}
> >  
> > -	return media_device_register(&imxmd->md);
> > +	ret = imx_media_csc_scaler_device_register(imxmd->m2m_vdev);
> > +	if (ret)
> > +		goto m2m_remove;
> > +
> > +	mutex_unlock(&imxmd->mutex);
> > +
> > +	ret = media_device_register(&imxmd->md);
> > +	if (ret) {
> > +		mutex_lock(&imxmd->mutex);
> > +		goto m2m_unreg;
> > +	}
> 
> I am missing a call to v4l2_m2m_register_media_controller() to ensure that this
> device shows up in the media controller.

I can do that, but what would be the purpose of it showing up in the
media controller?
There is nothing to be configured, no interaction with the rest of the
graph, and the processing subdevice wouldn't even correspond to an
actual hardware unit. I assumed this would clutter the media controller
for no good reason.

[...]
> > @@ -262,6 +265,13 @@ void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
> >  					 struct v4l2_pix_format *pix);
> >  void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
> >  
> > +/* imx-media-mem2mem.c */
> > +struct imx_media_video_dev *
> > +imx_media_csc_scaler_device_init(struct imx_media_dev *dev);
> > +void imx_media_csc_scaler_device_remove(struct imx_media_video_dev *vdev);
> > +int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev);
> > +void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev);
> > +
> >  /* subdev group ids */
> >  #define IMX_MEDIA_GRP_ID_CSI2      BIT(8)
> >  #define IMX_MEDIA_GRP_ID_CSI_BIT   9
> 
> How did you test the rotate control? I'm asking because I would expect to see code
> that checks this control in the *_fmt ioctls: rotating 90 or 270 degrees would mean
> that the reported width and height are swapped for the capture queue. And I see no
> sign that that is done. For the same reason this should also impact the g/s_selection
> code.

I'm probably misunderstanding something.

Which "reported width and height" have to be swapped compared to what?
Since this is a scaler, the capture queue has its own width and height,
independent of the output queue width and height.
What currently happens is that the chosen capture width and height stay
the same upon rotation, so the image is stretched into the configured
dimensions.

The V4L2_CID_ROTATE documentation [1] states:

    Rotates the image by specified angle. Common angles are 90, 270 and
    180. Rotating the image to 90 and 270 will reverse the height and
    width of the display window. It is necessary to set the new height
    and width of the picture using the VIDIOC_S_FMT ioctl according to
    the rotation angle selected.

[1] https://linuxtv.org/downloads/v4l-dvb-apis-new/uapi/v4l/control.html#control-ids

I didn't understand what the "display window" is in the context of a
mem2mem scaler/rotator/CSC converter. Is this intended to mean that
aspect ratio should be kept as intact as possible, and that every time
V4L2_CID_ROTATE changes between 0/180 and 90/270, an automatic S_FMT
should be issued on the capture queue with width and height switched
compared to the currently set value? This might still slightly modify
width and height due to alignment restrictions.

regards
Philipp

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-01-18 11:18   ` Philipp Zabel
@ 2019-01-18 12:41     ` Hans Verkuil
  2019-01-18 13:42       ` Philipp Zabel
  2019-01-18 16:51     ` Philipp Zabel
  1 sibling, 1 reply; 15+ messages in thread
From: Hans Verkuil @ 2019-01-18 12:41 UTC (permalink / raw)
  To: Philipp Zabel, linux-media; +Cc: Steve Longerbeam, Hans Verkuil, kernel

On 1/18/19 12:18 PM, Philipp Zabel wrote:
> Hi Hans,
> 
> On Fri, 2019-01-18 at 10:30 +0100, Hans Verkuil wrote:
>> On 1/17/19 4:50 PM, Philipp Zabel wrote:
> [...]
>>> +
>>> +static const struct video_device ipu_csc_scaler_videodev_template = {
>>> +	.name		= "ipu0_ic_pp mem2mem",
>>
>> I would expect to see something like 'imx-media-csc-scaler' as the name.
>> Wouldn't that be more descriptive?
> 
> Yes, thank you. I'll change this as well.
> 
> [...]
>>> diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
>>> index 4b344a4a3706..fee2ece0a6f8 100644
>>> --- a/drivers/staging/media/imx/imx-media-dev.c
>>> +++ b/drivers/staging/media/imx/imx-media-dev.c
>>> @@ -318,12 +318,36 @@ static int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
>>>  		goto unlock;
>>>  
>>>  	ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
>>> -unlock:
>>> -	mutex_unlock(&imxmd->mutex);
>>>  	if (ret)
>>> -		return ret;
>>> +		goto unlock;
>>> +
>>> +	imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
>>> +	if (IS_ERR(imxmd->m2m_vdev)) {
>>> +		ret = PTR_ERR(imxmd->m2m_vdev);
>>> +		goto unlock;
>>> +	}
>>>  
>>> -	return media_device_register(&imxmd->md);
>>> +	ret = imx_media_csc_scaler_device_register(imxmd->m2m_vdev);
>>> +	if (ret)
>>> +		goto m2m_remove;
>>> +
>>> +	mutex_unlock(&imxmd->mutex);
>>> +
>>> +	ret = media_device_register(&imxmd->md);
>>> +	if (ret) {
>>> +		mutex_lock(&imxmd->mutex);
>>> +		goto m2m_unreg;
>>> +	}
>>
>> I am missing a call to v4l2_m2m_register_media_controller() to ensure that this
>> device shows up in the media controller.
> 
> I can do that, but what would be the purpose of it showing up in the
> media controller?
> There is nothing to be configured, no interaction with the rest of the
> graph, and the processing subdevice wouldn't even correspond to an
> actual hardware unit. I assumed this would clutter the media controller
> for no good reason.

Just because you can't change routing doesn't mean you can't expose it in the
media controller topology. It makes sense to show in the topology that you
have this block.

That said, I can't decide whether or not to add this. For a standalone m2m
device I would not require it, but in this case you already have a media
device.

I guess it is easy enough to add later, so leave this for now.

> 
> [...]
>>> @@ -262,6 +265,13 @@ void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
>>>  					 struct v4l2_pix_format *pix);
>>>  void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
>>>  
>>> +/* imx-media-mem2mem.c */
>>> +struct imx_media_video_dev *
>>> +imx_media_csc_scaler_device_init(struct imx_media_dev *dev);
>>> +void imx_media_csc_scaler_device_remove(struct imx_media_video_dev *vdev);
>>> +int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev);
>>> +void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev);
>>> +
>>>  /* subdev group ids */
>>>  #define IMX_MEDIA_GRP_ID_CSI2      BIT(8)
>>>  #define IMX_MEDIA_GRP_ID_CSI_BIT   9
>>
>> How did you test the rotate control? I'm asking because I would expect to see code
>> that checks this control in the *_fmt ioctls: rotating 90 or 270 degrees would mean
>> that the reported width and height are swapped for the capture queue. And I see no
>> sign that that is done. For the same reason this should also impact the g/s_selection
>> code.
> 
> I'm probably misunderstanding something.
> 
> Which "reported width and height" have to be swapped compared to what?
> Since this is a scaler, the capture queue has its own width and height,
> independent of the output queue width and height.
> What currently happens is that the chosen capture width and height stay
> the same upon rotation, so the image is stretched into the configured
> dimensions.
> 
> The V4L2_CID_ROTATE documentation [1] states:
> 
>     Rotates the image by specified angle. Common angles are 90, 270 and
>     180. Rotating the image to 90 and 270 will reverse the height and
>     width of the display window. It is necessary to set the new height
>     and width of the picture using the VIDIOC_S_FMT ioctl according to
>     the rotation angle selected.
> 
> [1] https://linuxtv.org/downloads/v4l-dvb-apis-new/uapi/v4l/control.html#control-ids
> 
> I didn't understand what the "display window" is in the context of a
> mem2mem scaler/rotator/CSC converter. Is this intended to mean that
> aspect ratio should be kept as intact as possible, and that every time
> V4L2_CID_ROTATE changes between 0/180 and 90/270, an automatic S_FMT
> should be issued on the capture queue with width and height switched
> compared to the currently set value? This might still slightly modify
> width and height due to alignment restrictions.

Most drivers that implement rotate do not have a scaler, so rotating a
640x480 image would result in a 480x640 result. Hence for an m2m device
the output queue would have format 640x480 and the capture queue 480x640.

So the question becomes: what if you can both rotate and scale, what
do you do when you change the rotate control?

I would expect as a user of this API that if I first scale 640x480 to
320x240, then rotate 90 degrees, that the capture format is now 240x320.

In other words, rotating comes after scaling.

But even if you keep the current behavior I suspect you still need to
update the format due to alignment restriction. Either due to 4:2:2
formats or due to the 'resizer cannot downsize more than 4:1' limitation.

E.g. in the latter case it is fine to downscale 640x480 to 640x120,
but if you now rotate 90 degrees, then you can no longer downscale
480x640 to 640x120 (640 / 120 > 4).

At least, if I understand the code correctly.

There may also be a corner case with rotate and MIN_W/MIN_H resolutions
(MAX_W == MAX_H, so that's probably fine).

It might be a good idea to set MIN_H equal to MIN_W to simplify the rotate
code.

Regards,

	Hans

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-01-18 12:41     ` Hans Verkuil
@ 2019-01-18 13:42       ` Philipp Zabel
  2019-01-18 13:45         ` Hans Verkuil
  0 siblings, 1 reply; 15+ messages in thread
From: Philipp Zabel @ 2019-01-18 13:42 UTC (permalink / raw)
  To: Hans Verkuil, linux-media; +Cc: Steve Longerbeam, Hans Verkuil, kernel

On Fri, 2019-01-18 at 13:41 +0100, Hans Verkuil wrote:
[...]
> > I can do that, but what would be the purpose of it showing up in the
> > media controller?
> > There is nothing to be configured, no interaction with the rest of the
> > graph, and the processing subdevice wouldn't even correspond to an
> > actual hardware unit. I assumed this would clutter the media controller
> > for no good reason.
> 
> Just because you can't change routing doesn't mean you can't expose it in the
> media controller topology. It makes sense to show in the topology that you
> have this block.
> 
> That said, I can't decide whether or not to add this. For a standalone m2m
> device I would not require it, but in this case you already have a media
> device.
> 
> I guess it is easy enough to add later, so leave this for now.

Ok.

[...]
> > > How did you test the rotate control?

Just FTR, I used GStreamer for most of my testing, something like
(simplified):

gst-launch-1.0 videotestsrc ! v4l2video14convert extra-controls=cid,rotate=90 ! autovideosink

> > > I'm asking because I would expect to see code
> > > that checks this control in the *_fmt ioctls:

In a way this does happen, since _try_fmt calls ipu_image_convert_adjust
with a ctx->rot_mode parameter. It only has an influence on the
alignment though.

[...]
> > The V4L2_CID_ROTATE documentation [1] states:
> > 
> >     Rotates the image by specified angle. Common angles are 90, 270 and
> >     180. Rotating the image to 90 and 270 will reverse the height and
> >     width of the display window. It is necessary to set the new height
> >     and width of the picture using the VIDIOC_S_FMT ioctl according to
> >     the rotation angle selected.
> > 
> > [1] https://linuxtv.org/downloads/v4l-dvb-apis-new/uapi/v4l/control.html#control-ids
> > 
> > I didn't understand what the "display window" is in the context of a
> > mem2mem scaler/rotator/CSC converter. Is this intended to mean that
> > aspect ratio should be kept as intact as possible, and that every time
> > V4L2_CID_ROTATE changes between 0/180 and 90/270, an automatic S_FMT
> > should be issued on the capture queue with width and height switched
> > compared to the currently set value? This might still slightly modify
> > width and height due to alignment restrictions.
> 
> Most drivers that implement rotate do not have a scaler, so rotating a
> 640x480 image would result in a 480x640 result. Hence for an m2m device
> the output queue would have format 640x480 and the capture queue 480x640.
> 
> So the question becomes: what if you can both rotate and scale, what
> do you do when you change the rotate control?
>
> I would expect as a user of this API that if I first scale 640x480 to
> 320x240, then rotate 90 degrees, that the capture format is now 240x320.
> 
> In other words, rotating comes after scaling.

Ok, that makes sense. I had always thought of the rotation property
being set first.

> But even if you keep the current behavior I suspect you still need to
> update the format due to alignment restriction. Either due to 4:2:2
> formats or due to the 'resizer cannot downsize more than 4:1' limitation.
> 
> E.g. in the latter case it is fine to downscale 640x480 to 640x120,
> but if you now rotate 90 degrees, then you can no longer downscale
> 480x640 to 640x120 (640 / 120 > 4).
> 
> At least, if I understand the code correctly.

Oh. Worse, the output queue's width alignment restrictions also depend
on the rotation mode.

Are we allowed to change the output queue format to meet alignment
restrictions when changing the ROTATE property? The same is true
for HFLIP.

regards
Philipp

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-01-18 13:42       ` Philipp Zabel
@ 2019-01-18 13:45         ` Hans Verkuil
  2019-01-18 14:17           ` Philipp Zabel
  0 siblings, 1 reply; 15+ messages in thread
From: Hans Verkuil @ 2019-01-18 13:45 UTC (permalink / raw)
  To: Philipp Zabel, linux-media; +Cc: Steve Longerbeam, Hans Verkuil, kernel

On 1/18/19 2:42 PM, Philipp Zabel wrote:
> On Fri, 2019-01-18 at 13:41 +0100, Hans Verkuil wrote:
> [...]
>>> I can do that, but what would be the purpose of it showing up in the
>>> media controller?
>>> There is nothing to be configured, no interaction with the rest of the
>>> graph, and the processing subdevice wouldn't even correspond to an
>>> actual hardware unit. I assumed this would clutter the media controller
>>> for no good reason.
>>
>> Just because you can't change routing doesn't mean you can't expose it in the
>> media controller topology. It makes sense to show in the topology that you
>> have this block.
>>
>> That said, I can't decide whether or not to add this. For a standalone m2m
>> device I would not require it, but in this case you already have a media
>> device.
>>
>> I guess it is easy enough to add later, so leave this for now.
> 
> Ok.
> 
> [...]
>>>> How did you test the rotate control?
> 
> Just FTR, I used GStreamer for most of my testing, something like
> (simplified):
> 
> gst-launch-1.0 videotestsrc ! v4l2video14convert extra-controls=cid,rotate=90 ! autovideosink
> 
>>>> I'm asking because I would expect to see code
>>>> that checks this control in the *_fmt ioctls:
> 
> In a way this does happen, since _try_fmt calls ipu_image_convert_adjust
> with a ctx->rot_mode parameter. It only has an influence on the
> alignment though.
> 
> [...]
>>> The V4L2_CID_ROTATE documentation [1] states:
>>>
>>>     Rotates the image by specified angle. Common angles are 90, 270 and
>>>     180. Rotating the image to 90 and 270 will reverse the height and
>>>     width of the display window. It is necessary to set the new height
>>>     and width of the picture using the VIDIOC_S_FMT ioctl according to
>>>     the rotation angle selected.
>>>
>>> [1] https://linuxtv.org/downloads/v4l-dvb-apis-new/uapi/v4l/control.html#control-ids
>>>
>>> I didn't understand what the "display window" is in the context of a
>>> mem2mem scaler/rotator/CSC converter. Is this intended to mean that
>>> aspect ratio should be kept as intact as possible, and that every time
>>> V4L2_CID_ROTATE changes between 0/180 and 90/270, an automatic S_FMT
>>> should be issued on the capture queue with width and height switched
>>> compared to the currently set value? This might still slightly modify
>>> width and height due to alignment restrictions.
>>
>> Most drivers that implement rotate do not have a scaler, so rotating a
>> 640x480 image would result in a 480x640 result. Hence for an m2m device
>> the output queue would have format 640x480 and the capture queue 480x640.
>>
>> So the question becomes: what if you can both rotate and scale, what
>> do you do when you change the rotate control?
>>
>> I would expect as a user of this API that if I first scale 640x480 to
>> 320x240, then rotate 90 degrees, that the capture format is now 240x320.
>>
>> In other words, rotating comes after scaling.
> 
> Ok, that makes sense. I had always thought of the rotation property
> being set first.
> 
>> But even if you keep the current behavior I suspect you still need to
>> update the format due to alignment restriction. Either due to 4:2:2
>> formats or due to the 'resizer cannot downsize more than 4:1' limitation.
>>
>> E.g. in the latter case it is fine to downscale 640x480 to 640x120,
>> but if you now rotate 90 degrees, then you can no longer downscale
>> 480x640 to 640x120 (640 / 120 > 4).
>>
>> At least, if I understand the code correctly.
> 
> Oh. Worse, the output queue's width alignment restrictions also depend
> on the rotation mode.
> 
> Are we allowed to change the output queue format to meet alignment
> restrictions when changing the ROTATE property? The same is true
> for HFLIP.

Certainly. Unless vb2_is_busy() is true, of course, since no format changes
are allowed once buffers are allocated. So s_ctrl would return -EBUSY in
that case.

Does HFLIP change the format size? Or just the order? E.g. YUYV becomes VYUY?

In the latter case I would expect that you can compensate for that in the
driver.

Regards,

	Hans

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-01-18 13:45         ` Hans Verkuil
@ 2019-01-18 14:17           ` Philipp Zabel
  0 siblings, 0 replies; 15+ messages in thread
From: Philipp Zabel @ 2019-01-18 14:17 UTC (permalink / raw)
  To: Hans Verkuil, linux-media; +Cc: Steve Longerbeam, Hans Verkuil, kernel

On Fri, 2019-01-18 at 14:45 +0100, Hans Verkuil wrote:
> On 1/18/19 2:42 PM, Philipp Zabel wrote:
> > On Fri, 2019-01-18 at 13:41 +0100, Hans Verkuil wrote:
> > [...]
> > > > I can do that, but what would be the purpose of it showing up in the
> > > > media controller?
> > > > There is nothing to be configured, no interaction with the rest of the
> > > > graph, and the processing subdevice wouldn't even correspond to an
> > > > actual hardware unit. I assumed this would clutter the media controller
> > > > for no good reason.
> > > 
> > > Just because you can't change routing doesn't mean you can't expose it in the
> > > media controller topology. It makes sense to show in the topology that you
> > > have this block.
> > > 
> > > That said, I can't decide whether or not to add this. For a standalone m2m
> > > device I would not require it, but in this case you already have a media
> > > device.
> > > 
> > > I guess it is easy enough to add later, so leave this for now.
> > 
> > Ok.
> > 
> > [...]
> > > > > How did you test the rotate control?
> > 
> > Just FTR, I used GStreamer for most of my testing, something like
> > (simplified):
> > 
> > gst-launch-1.0 videotestsrc ! v4l2video14convert extra-controls=cid,rotate=90 ! autovideosink
> > 
> > > > > I'm asking because I would expect to see code
> > > > > that checks this control in the *_fmt ioctls:
> > 
> > In a way this does happen, since _try_fmt calls ipu_image_convert_adjust
> > with a ctx->rot_mode parameter. It only has an influence on the
> > alignment though.
> > 
> > [...]
> > > > The V4L2_CID_ROTATE documentation [1] states:
> > > > 
> > > >     Rotates the image by specified angle. Common angles are 90, 270 and
> > > >     180. Rotating the image to 90 and 270 will reverse the height and
> > > >     width of the display window. It is necessary to set the new height
> > > >     and width of the picture using the VIDIOC_S_FMT ioctl according to
> > > >     the rotation angle selected.
> > > > 
> > > > [1] https://linuxtv.org/downloads/v4l-dvb-apis-new/uapi/v4l/control.html#control-ids
> > > > 
> > > > I didn't understand what the "display window" is in the context of a
> > > > mem2mem scaler/rotator/CSC converter. Is this intended to mean that
> > > > aspect ratio should be kept as intact as possible, and that every time
> > > > V4L2_CID_ROTATE changes between 0/180 and 90/270, an automatic S_FMT
> > > > should be issued on the capture queue with width and height switched
> > > > compared to the currently set value? This might still slightly modify
> > > > width and height due to alignment restrictions.
> > > 
> > > Most drivers that implement rotate do not have a scaler, so rotating a
> > > 640x480 image would result in a 480x640 result. Hence for an m2m device
> > > the output queue would have format 640x480 and the capture queue 480x640.
> > > 
> > > So the question becomes: what if you can both rotate and scale, what
> > > do you do when you change the rotate control?
> > > 
> > > I would expect as a user of this API that if I first scale 640x480 to
> > > 320x240, then rotate 90 degrees, that the capture format is now 240x320.
> > > 
> > > In other words, rotating comes after scaling.
> > 
> > Ok, that makes sense. I had always thought of the rotation property
> > being set first.
> > 
> > > But even if you keep the current behavior I suspect you still need to
> > > update the format due to alignment restriction. Either due to 4:2:2
> > > formats or due to the 'resizer cannot downsize more than 4:1' limitation.
> > > 
> > > E.g. in the latter case it is fine to downscale 640x480 to 640x120,
> > > but if you now rotate 90 degrees, then you can no longer downscale
> > > 480x640 to 640x120 (640 / 120 > 4).
> > > 
> > > At least, if I understand the code correctly.
> > 
> > Oh. Worse, the output queue's width alignment restrictions also depend
> > on the rotation mode.
> > 
> > Are we allowed to change the output queue format to meet alignment
> > restrictions when changing the ROTATE property? The same is true
> > for HFLIP.
> 
> Certainly. Unless vb2_is_busy() is true, of course, since no format changes
> are allowed once buffers are allocated. So s_ctrl would return -EBUSY in
> that case.

Ok. I'll do that then.

> Does HFLIP change the format size? Or just the order? E.g. YUYV becomes VYUY?

No, the DMA controller always reads bursts of (at least) 8 pixels,
converts them to 32-bit AYUV, and writes them into an internal FIFO.
That doesn't change, HFLIP just makes the Image Converter process pixels
in each line from right to left. But the first processed pixel then is
always the last pixel of a burst, so the only widths we can support
hflipped are aligned to a multiple of 8 pixels.

Non-flipped reads are still burst aligned, but we can just read over the
end of lines with unaligned width into the horizontal padding / next
line and ignore the last few pixels.

thanks
Philipp

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-01-18 11:18   ` Philipp Zabel
  2019-01-18 12:41     ` Hans Verkuil
@ 2019-01-18 16:51     ` Philipp Zabel
  2019-01-19  9:28       ` Hans Verkuil
  1 sibling, 1 reply; 15+ messages in thread
From: Philipp Zabel @ 2019-01-18 16:51 UTC (permalink / raw)
  To: Hans Verkuil, Steve Longerbeam, linux-media; +Cc: Hans Verkuil, kernel

On Fri, 2019-01-18 at 12:18 +0100, Philipp Zabel wrote:
> Hi Hans,
> 
> On Fri, 2019-01-18 at 10:30 +0100, Hans Verkuil wrote:
> > On 1/17/19 4:50 PM, Philipp Zabel wrote:
> 
> [...]
> > > +
> > > +static const struct video_device ipu_csc_scaler_videodev_template = {
> > > +	.name		= "ipu0_ic_pp mem2mem",
> > 
> > I would expect to see something like 'imx-media-csc-scaler' as the name.
> > Wouldn't that be more descriptive?
> 
> Yes, thank you. I'll change this as well.

Actually, this is overwritten a few lines later anyway:

       snprintf(vfd->name, sizeof(vfd->name), "ipu_ic_pp mem2mem");

Not that it makes a difference. But I noticed that I chose this name for
something close to consistency with the other IPU devices:

$ cat /sys/class/video4linux/video*/name
ipu_ic_pp mem2mem
coda-encoder
coda-decoder
ipu1_ic_prpenc capture
ipu1_ic_prpvf capture
ipu2_ic_prpenc capture
ipu2_ic_prpvf capture
ipu1_csi0 capture
ipu1_csi1 capture
ipu2_csi0 capture
ipu2_csi1 capture

They all start with the IPU / subdevice (/ IC task) prefix.
Maybe "ipu_ic_pp csc/scaler" would be more appropriate?

regards
Philipp

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-01-18 16:51     ` Philipp Zabel
@ 2019-01-19  9:28       ` Hans Verkuil
  0 siblings, 0 replies; 15+ messages in thread
From: Hans Verkuil @ 2019-01-19  9:28 UTC (permalink / raw)
  To: Philipp Zabel, Steve Longerbeam, linux-media; +Cc: Hans Verkuil, kernel

On 01/18/2019 05:51 PM, Philipp Zabel wrote:
> On Fri, 2019-01-18 at 12:18 +0100, Philipp Zabel wrote:
>> Hi Hans,
>>
>> On Fri, 2019-01-18 at 10:30 +0100, Hans Verkuil wrote:
>>> On 1/17/19 4:50 PM, Philipp Zabel wrote:
>>
>> [...]
>>>> +
>>>> +static const struct video_device ipu_csc_scaler_videodev_template = {
>>>> +	.name		= "ipu0_ic_pp mem2mem",
>>>
>>> I would expect to see something like 'imx-media-csc-scaler' as the name.
>>> Wouldn't that be more descriptive?
>>
>> Yes, thank you. I'll change this as well.
> 
> Actually, this is overwritten a few lines later anyway:
> 
>        snprintf(vfd->name, sizeof(vfd->name), "ipu_ic_pp mem2mem");
> 
> Not that it makes a difference. But I noticed that I chose this name for
> something close to consistency with the other IPU devices:
> 
> $ cat /sys/class/video4linux/video*/name
> ipu_ic_pp mem2mem
> coda-encoder
> coda-decoder
> ipu1_ic_prpenc capture
> ipu1_ic_prpvf capture
> ipu2_ic_prpenc capture
> ipu2_ic_prpvf capture
> ipu1_csi0 capture
> ipu1_csi1 capture
> ipu2_csi0 capture
> ipu2_csi1 capture
> 
> They all start with the IPU / subdevice (/ IC task) prefix.
> Maybe "ipu_ic_pp csc/scaler" would be more appropriate?

That will work for me.

Regards,

	Hans

> 
> regards
> Philipp
> 


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-01-17 15:50 [PATCH v7] media: imx: add mem2mem device Philipp Zabel
  2019-01-18  9:30 ` Hans Verkuil
@ 2019-02-12 19:01 ` Tim Harvey
  2019-02-13 17:22   ` Nicolas Dufresne
  2019-02-15 11:10   ` Philipp Zabel
  1 sibling, 2 replies; 15+ messages in thread
From: Tim Harvey @ 2019-02-12 19:01 UTC (permalink / raw)
  To: Philipp Zabel; +Cc: linux-media, Steve Longerbeam, Hans Verkuil, Sascha Hauer

On Thu, Jan 17, 2019 at 7:50 AM Philipp Zabel <p.zabel@pengutronix.de> wrote:
>
> Add a single imx-media mem2mem video device that uses the IPU IC PP
> (image converter post processing) task for scaling and colorspace
> conversion.
> On i.MX6Q/DL SoCs with two IPUs currently only the first IPU is used.
>
> The hardware only supports writing to destination buffers up to
> 1024x1024 pixels in a single pass, arbitrary sizes can be achieved
> by rendering multiple tiles per frame.
>
> Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> [slongerbeam@gmail.com: use ipu_image_convert_adjust(), fix
>  device_run() error handling, add missing media-device header,
>  unregister and remove the mem2mem device in error paths in
>  imx_media_probe_complete() and in imx_media_remove()]
> Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
> ---
> Changes since v6 [1]:
>  - Change driver name in querycap to imx-media-csc-scaler
>  - Drop V4L2_SEL_TGT_COMPOSE_PADDED from vidioc_g_selection
>  - Simplify error handling in ipu_csc_scaler_init_controls
>
> [1] https://patchwork.linuxtv.org/patch/53757/
> ---

Hi Philipp,

Thanks for this driver - this is providing support that I need to
overcome direct CSI->IC limitations.

Can you give me some examples on how your using this? I'm testing this
on top of linux-media and trying the following gstreamer pipelines
(gstreamer recent master) and running into trouble but it could very
likely be me doing something wrong in my pipelines:

# upscale
gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 !
v4l2convert output-io-mode=dmabuf-import !
video/x-raw,width=640,height=480 ! kmssink
^^^ fails with
ERROR: from element
/GstPipeline:pipeline0/GstAutoVideoSink:autovideosink0/GstKMSSink:autovideosink0-actual-sink-kms:
GStreamer encountered a general resource error.
Additional debug info:
gstkmssink.c(1529): gst_kms_sink_show_frame ():
/GstPipeline:pipeline0/GstAutoVideoSink:autovideosink0/GstKMSSink:autovideosink0-actual-sink-kms:
drmModeSetPlane failed: No space left on device (-28)
perhaps this is something strange with kmssink or is a buffer size not
being set properly in the mem2mem scaler?

# downscale
gst-launch-1.0 videotestsrc ! video/x-raw,width=640,height=480 !
v4l2convert output-io-mode=dmabuf-import !
video/x-raw,width=320,height=280 ! kmssink
(gst-launch-1.0:15493): GStreamer-CRITICAL **: 18:06:49.029:
gst_buffer_resize_range: assertion 'bufmax >= bufoffs + offset + size'
failed
ERROR: from element
/GstPipeline:pipeline0/GstVideoTestSrc:videotestsrc0: Internal data
stream error.
Additional debug info:
gstbasesrc.c(3064): gst_base_src_loop ():
/GstPipeline:pipeline0/GstVideoTestSrc:videotestsrc0:
streaming stopped, reason error (-5)
ERROR: pipeline doesn't want to preroll.
Setting pipeline to NULL ...
Freeing pipeline ...

# downscale using videotstsrc defaults
gst-launch-1.0 videotestsrc ! v4l2convert output-io-mode=dmabuf-import
! video/x-raw,width=100,height=200 ! kmssink
^^^ works

# rotation
gst-launch-1.0 videotestsrc ! v4l2convert output-io-mode=dmabuf-import
extra-controls=cid,rotate=90 ! kmssink
^^^ shows no rotation in displayed video but kernel debugging shows
ipu_csc_scaler_s_ctrl getting called with V4L2_CID_ROTATE,
ctrl->val=90 and ipu_degrees_to_rot_mode sets rot_mode=IPU_ROT_BIT_90
and returns no error. I also see that
ipu_image_convert_adjust gets called with rot_mode=IPU_ROT_BIT_90

I'm also not sure how to specify hflip/vflip... I don't think
extra-controls parses 'hflip', 'vflip' as ipu_csc_scaler_s_ctrl gets
called with V4L2_CID_HFLIP/V4L2_CID_VFLIP but ctrl->val is always 0.

Thanks,

Tim

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-02-12 19:01 ` Tim Harvey
@ 2019-02-13 17:22   ` Nicolas Dufresne
  2019-02-15 11:10   ` Philipp Zabel
  1 sibling, 0 replies; 15+ messages in thread
From: Nicolas Dufresne @ 2019-02-13 17:22 UTC (permalink / raw)
  To: Tim Harvey, Philipp Zabel
  Cc: linux-media, Steve Longerbeam, Hans Verkuil, Sascha Hauer

Le mardi 12 février 2019 à 11:01 -0800, Tim Harvey a écrit :
> On Thu, Jan 17, 2019 at 7:50 AM Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > Add a single imx-media mem2mem video device that uses the IPU IC PP
> > (image converter post processing) task for scaling and colorspace
> > conversion.
> > On i.MX6Q/DL SoCs with two IPUs currently only the first IPU is used.
> > 
> > The hardware only supports writing to destination buffers up to
> > 1024x1024 pixels in a single pass, arbitrary sizes can be achieved
> > by rendering multiple tiles per frame.
> > 
> > Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> > [slongerbeam@gmail.com: use ipu_image_convert_adjust(), fix
> >  device_run() error handling, add missing media-device header,
> >  unregister and remove the mem2mem device in error paths in
> >  imx_media_probe_complete() and in imx_media_remove()]
> > Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
> > ---
> > Changes since v6 [1]:
> >  - Change driver name in querycap to imx-media-csc-scaler
> >  - Drop V4L2_SEL_TGT_COMPOSE_PADDED from vidioc_g_selection
> >  - Simplify error handling in ipu_csc_scaler_init_controls
> > 
> > [1] https://patchwork.linuxtv.org/patch/53757/
> > ---
> 
> Hi Philipp,
> 
> Thanks for this driver - this is providing support that I need to
> overcome direct CSI->IC limitations.
> 
> Can you give me some examples on how your using this? I'm testing this
> on top of linux-media and trying the following gstreamer pipelines
> (gstreamer recent master) and running into trouble but it could very
> likely be me doing something wrong in my pipelines:
> 
> # upscale
> gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 !
> v4l2convert output-io-mode=dmabuf-import !

Something important to note, is that there is no stride adaptation done
in OUTPUT device dmabuf importation path (yet). This is a difficult
task with the V4L2 API, and the reason this need to be opt-in by user.
You need to make sure yourself (for now) that the stride and buffer
alignment matches between the two drivers, and the only way to fix it
if not is by modifying the respective drivers (for now). Some initial
work has been done the other way around, we are trying to make sure we
cover all cases before implementing the other side.

> video/x-raw,width=640,height=480 ! kmssink
> ^^^ fails with
> ERROR: from element
> /GstPipeline:pipeline0/GstAutoVideoSink:autovideosink0/GstKMSSink:autovideosink0-actual-sink-kms:
> GStreamer encountered a general resource error.
> Additional debug info:
> gstkmssink.c(1529): gst_kms_sink_show_frame ():
> /GstPipeline:pipeline0/GstAutoVideoSink:autovideosink0/GstKMSSink:autovideosink0-actual-sink-kms:
> drmModeSetPlane failed: No space left on device (-28)
> perhaps this is something strange with kmssink or is a buffer size not
> being set properly in the mem2mem scaler?

Note that this one can happen randomly as HW may have limitation that
isn't grammatically exposed to userspace. That being said, use
GST_DEBUG="kmssink:7" to enable related debug traces, that should help
to see what is being done. Matching against some kernel trace is always
useful.

> 
> # downscale
> gst-launch-1.0 videotestsrc ! video/x-raw,width=640,height=480 !
> v4l2convert output-io-mode=dmabuf-import !
> video/x-raw,width=320,height=280 ! kmssink
> (gst-launch-1.0:15493): GStreamer-CRITICAL **: 18:06:49.029:
> gst_buffer_resize_range: assertion 'bufmax >= bufoffs + offset + size'

Would be really nice if you could report this, mentioning the GStreamer
version you are running. GStreamer-CRITICAL means that crash avoidance
code (assert) has been reached. This means you have reached a code path
that wasn't expected by the developers. It's a bit like the kernel
BUG_ON.

https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/issues/new

> failed
> ERROR: from element
> /GstPipeline:pipeline0/GstVideoTestSrc:videotestsrc0: Internal data
> stream error.
> Additional debug info:
> gstbasesrc.c(3064): gst_base_src_loop ():
> /GstPipeline:pipeline0/GstVideoTestSrc:videotestsrc0:
> streaming stopped, reason error (-5)
> ERROR: pipeline doesn't want to preroll.
> Setting pipeline to NULL ...
> Freeing pipeline ...
> 
> # downscale using videotstsrc defaults
> gst-launch-1.0 videotestsrc ! v4l2convert output-io-mode=dmabuf-import
> ! video/x-raw,width=100,height=200 ! kmssink
> ^^^ works
> 
> # rotation
> gst-launch-1.0 videotestsrc ! v4l2convert output-io-mode=dmabuf-import
> extra-controls=cid,rotate=90 ! kmssink
> ^^^ shows no rotation in displayed video but kernel debugging shows
> ipu_csc_scaler_s_ctrl getting called with V4L2_CID_ROTATE,
> ctrl->val=90 and ipu_degrees_to_rot_mode sets rot_mode=IPU_ROT_BIT_90
> and returns no error. I also see that
> ipu_image_convert_adjust gets called with rot_mode=IPU_ROT_BIT_90

There is no support for 90 degree rotation in the GstV4l2Transform
class. 90 degree rotation must be implemented in the element to be
usable, as the capability negotiation must do the width/height
flipping.

There is also no support for arbitrary rotation, I believe one would
need to add code to suggest a large capture buffer size that fits the
rotated image. It's also ambiguous what would happen otherwise, and
that should be specified the Linux Media documentation in my opinion.

> 
> I'm also not sure how to specify hflip/vflip... I don't think
> extra-controls parses 'hflip', 'vflip' as ipu_csc_scaler_s_ctrl gets
> called with V4L2_CID_HFLIP/V4L2_CID_VFLIP but ctrl->val is always 0.

hflip/vflip was reported to work on vim2m by Mauro, not sure why it
wouldn't for your driver. Maybe both driver don't expose this the same
way ?

> 
> Thanks,
> 
> Tim


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-02-12 19:01 ` Tim Harvey
  2019-02-13 17:22   ` Nicolas Dufresne
@ 2019-02-15 11:10   ` Philipp Zabel
  2019-02-15 16:24     ` Nicolas Dufresne
  2019-02-15 23:34     ` Tim Harvey
  1 sibling, 2 replies; 15+ messages in thread
From: Philipp Zabel @ 2019-02-15 11:10 UTC (permalink / raw)
  To: Tim Harvey, Nicolas Dufresne
  Cc: linux-media, Steve Longerbeam, Hans Verkuil, Sascha Hauer

Hi Tim,

On Tue, 2019-02-12 at 11:01 -0800, Tim Harvey wrote:
> On Thu, Jan 17, 2019 at 7:50 AM Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > 
> > Add a single imx-media mem2mem video device that uses the IPU IC PP
> > (image converter post processing) task for scaling and colorspace
> > conversion.
> > On i.MX6Q/DL SoCs with two IPUs currently only the first IPU is used.
> > 
> > The hardware only supports writing to destination buffers up to
> > 1024x1024 pixels in a single pass, arbitrary sizes can be achieved
> > by rendering multiple tiles per frame.
> > 
> > Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> > [slongerbeam@gmail.com: use ipu_image_convert_adjust(), fix
> >  device_run() error handling, add missing media-device header,
> >  unregister and remove the mem2mem device in error paths in
> >  imx_media_probe_complete() and in imx_media_remove()]
> > Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
> > ---
> > Changes since v6 [1]:
> >  - Change driver name in querycap to imx-media-csc-scaler
> >  - Drop V4L2_SEL_TGT_COMPOSE_PADDED from vidioc_g_selection
> >  - Simplify error handling in ipu_csc_scaler_init_controls
> > 
> > [1] https://patchwork.linuxtv.org/patch/53757/
> > ---
> 
> Hi Philipp,
> 
> Thanks for this driver - this is providing support that I need to
> overcome direct CSI->IC limitations.
> 
> Can you give me some examples on how your using this? I'm testing this
> on top of linux-media and trying the following gstreamer pipelines
> (gstreamer recent master) and running into trouble but it could very
> likely be me doing something wrong in my pipelines:
> 
> # upscale
> gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 !
> v4l2convert output-io-mode=dmabuf-import !
> video/x-raw,width=640,height=480 ! kmssink

You can't have v4l2convert import dmabufs because videotestsrc doesn't
produce any. I used:

gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! v4l2video10convert ! video/x-raw,width=640,height=480 ! kmssink

That should work, passing dmabufs between v4l2 and kms automatically.

I usually use kmssink but waylandsink, glimagesink, or v4l2h264enc for
testing though.

> # downscale
> gst-launch-1.0 videotestsrc ! video/x-raw,width=640,height=480 !
> v4l2convert output-io-mode=dmabuf-import !
> video/x-raw,width=320,height=280 ! kmssink

Drop output-io-mode unless your source is capable of producing dmabufs.
I think kmssink is trying to scale in this case, which imx-drm doesn't
support. You may have to either keep aspect ratio, or give kmssink the
can-scale=false parameter.

> # downscale using videotstsrc defaults
> gst-launch-1.0 videotestsrc ! v4l2convert output-io-mode=dmabuf-import
> ! video/x-raw,width=100,height=200 ! kmssink
> ^^^ works

That will probably just negotiate 100x200 on the input side and bypass
conversion altogether.

> # rotation
> gst-launch-1.0 videotestsrc ! v4l2convert output-io-mode=dmabuf-import
> extra-controls=cid,rotate=90 ! kmssink

This will likely negotiate the same format and dimensions on both sides
and bypass conversion as well. GStreamer has no concept of the rotation
as of yet. Try:

gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! v4l2video10convert extra-controls=cid,rotate=90 ! video/x-raw,width=240,height=320 ! kmssink can-scale=false

> I'm also not sure how to specify hflip/vflip... I don't think
> extra-controls parses 'hflip', 'vflip' as ipu_csc_scaler_s_ctrl gets
> called with V4L2_CID_HFLIP/V4L2_CID_VFLIP but ctrl->val is always 0.

You can use v4l2-ctl -L to list the CID names, they are horizontal_flip
and vertical_flip, respectively. Again, the input and output formats
must be different because GStreamer doesn't know about the flipping yet:

gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! v4l2video10convert extra-controls=cid,horizontal_flip=1 ! video/x-raw,width=640,height=480 ! kmssink can-scale=false

We'd have to add actual properties for rotate/flip to v4l2convert,
instead of using theextra-controls workaround, probable something
similar to the video-direction property of the software videoflip
element.

regards
Philipp

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-02-15 11:10   ` Philipp Zabel
@ 2019-02-15 16:24     ` Nicolas Dufresne
  2019-02-15 23:36       ` Tim Harvey
  2019-02-15 23:34     ` Tim Harvey
  1 sibling, 1 reply; 15+ messages in thread
From: Nicolas Dufresne @ 2019-02-15 16:24 UTC (permalink / raw)
  To: Philipp Zabel, Tim Harvey
  Cc: linux-media, Steve Longerbeam, Hans Verkuil, Sascha Hauer

[-- Attachment #1: Type: text/plain, Size: 1143 bytes --]

Le vendredi 15 février 2019 à 12:10 +0100, Philipp Zabel a écrit :
> > I'm also not sure how to specify hflip/vflip... I don't think
> > extra-controls parses 'hflip', 'vflip' as ipu_csc_scaler_s_ctrl gets
> > called with V4L2_CID_HFLIP/V4L2_CID_VFLIP but ctrl->val is always 0.
> 
> You can use v4l2-ctl -L to list the CID names, they are horizontal_flip
> and vertical_flip, respectively. Again, the input and output formats
> must be different because GStreamer doesn't know about the flipping yet:
> 
> gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! v4l2video10convert extra-controls=cid,horizontal_flip=1 ! video/x-raw,width=640,height=480 ! kmssink can-scale=false
> 
> We'd have to add actual properties for rotate/flip to v4l2convert,
> instead of using theextra-controls workaround, probable something
> similar to the video-direction property of the software videoflip
> element.

Note that we have a disable-passthrough property in master, a trivial
patch to backport if you need it.

https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/commit/fe5236be8771ea82c850ebebe19cf1064d112bf0

[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 195 bytes --]

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-02-15 11:10   ` Philipp Zabel
  2019-02-15 16:24     ` Nicolas Dufresne
@ 2019-02-15 23:34     ` Tim Harvey
  1 sibling, 0 replies; 15+ messages in thread
From: Tim Harvey @ 2019-02-15 23:34 UTC (permalink / raw)
  To: Philipp Zabel
  Cc: Nicolas Dufresne, linux-media, Steve Longerbeam, Hans Verkuil,
	Sascha Hauer

On Fri, Feb 15, 2019 at 3:10 AM Philipp Zabel <p.zabel@pengutronix.de> wrote:
>
> Hi Tim,
>
> On Tue, 2019-02-12 at 11:01 -0800, Tim Harvey wrote:
> > On Thu, Jan 17, 2019 at 7:50 AM Philipp Zabel <p.zabel@pengutronix.de> wrote:
> > >
> > > Add a single imx-media mem2mem video device that uses the IPU IC PP
> > > (image converter post processing) task for scaling and colorspace
> > > conversion.
> > > On i.MX6Q/DL SoCs with two IPUs currently only the first IPU is used.
> > >
> > > The hardware only supports writing to destination buffers up to
> > > 1024x1024 pixels in a single pass, arbitrary sizes can be achieved
> > > by rendering multiple tiles per frame.
> > >
> > > Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
> > > [slongerbeam@gmail.com: use ipu_image_convert_adjust(), fix
> > >  device_run() error handling, add missing media-device header,
> > >  unregister and remove the mem2mem device in error paths in
> > >  imx_media_probe_complete() and in imx_media_remove()]
> > > Signed-off-by: Steve Longerbeam <slongerbeam@gmail.com>
> > > ---
> > > Changes since v6 [1]:
> > >  - Change driver name in querycap to imx-media-csc-scaler
> > >  - Drop V4L2_SEL_TGT_COMPOSE_PADDED from vidioc_g_selection
> > >  - Simplify error handling in ipu_csc_scaler_init_controls
> > >
> > > [1] https://patchwork.linuxtv.org/patch/53757/
> > > ---
> >
> > Hi Philipp,
> >
> > Thanks for this driver - this is providing support that I need to
> > overcome direct CSI->IC limitations.
> >
> > Can you give me some examples on how your using this? I'm testing this
> > on top of linux-media and trying the following gstreamer pipelines
> > (gstreamer recent master) and running into trouble but it could very
> > likely be me doing something wrong in my pipelines:
> >
> > # upscale
> > gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 !
> > v4l2convert output-io-mode=dmabuf-import !
> > video/x-raw,width=640,height=480 ! kmssink
>
> You can't have v4l2convert import dmabufs because videotestsrc doesn't
> produce any. I used:
>
> gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! v4l2video10convert ! video/x-raw,width=640,height=480 ! kmssink
>
> That should work, passing dmabufs between v4l2 and kms automatically.
>
> I usually use kmssink but waylandsink, glimagesink, or v4l2h264enc for
> testing though.
>

Philipp,

That makes sense and jives with what Nicolas was saying about alignment.

I'm currently testing linux-media/master with your v7 mem2mem patch
and 'gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 !
v4l2video10convert ! video/x-raw,width=640,height=480 ! kmssink' hangs
the system. Do you have some other patches queued up?

> > # downscale
> > gst-launch-1.0 videotestsrc ! video/x-raw,width=640,height=480 !
> > v4l2convert output-io-mode=dmabuf-import !
> > video/x-raw,width=320,height=280 ! kmssink
>
> Drop output-io-mode unless your source is capable of producing dmabufs.
> I think kmssink is trying to scale in this case, which imx-drm doesn't
> support. You may have to either keep aspect ratio, or give kmssink the
> can-scale=false parameter.
>
> > # downscale using videotstsrc defaults
> > gst-launch-1.0 videotestsrc ! v4l2convert output-io-mode=dmabuf-import
> > ! video/x-raw,width=100,height=200 ! kmssink
> > ^^^ works
>
> That will probably just negotiate 100x200 on the input side and bypass
> conversion altogether.
>
> > # rotation
> > gst-launch-1.0 videotestsrc ! v4l2convert output-io-mode=dmabuf-import
> > extra-controls=cid,rotate=90 ! kmssink
>
> This will likely negotiate the same format and dimensions on both sides
> and bypass conversion as well. GStreamer has no concept of the rotation
> as of yet. Try:
>
> gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! v4l2video10convert extra-controls=cid,rotate=90 ! video/x-raw,width=240,height=320 ! kmssink can-scale=false
>
> > I'm also not sure how to specify hflip/vflip... I don't think
> > extra-controls parses 'hflip', 'vflip' as ipu_csc_scaler_s_ctrl gets
> > called with V4L2_CID_HFLIP/V4L2_CID_VFLIP but ctrl->val is always 0.
>
> You can use v4l2-ctl -L to list the CID names, they are horizontal_flip
> and vertical_flip, respectively. Again, the input and output formats
> must be different because GStreamer doesn't know about the flipping yet:
>
> gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! v4l2video10convert extra-controls=cid,horizontal_flip=1 ! video/x-raw,width=640,height=480 ! kmssink can-scale=false
>
> We'd have to add actual properties for rotate/flip to v4l2convert,
> instead of using theextra-controls workaround, probable something
> similar to the video-direction property of the software videoflip
> element.
>

Philipp,

Removing the dmabuf options makes sense along with what Nicolas was
saying about the dma buffer alignment.

The fact that Gstreamer doesn't understand flip/rotate also makes
complete sense - it bypasses the conversion completely unless you
change the format.

All of these work perfectly:
# upscale
gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 !
v4l2convert ! video/x-raw,width=640,height=480 ! kmssink
can-scale=false
# downscale
gst-launch-1.0 videotestsrc ! video/x-raw,width=640,height=480 !
v4l2convert ! video/x-raw,width=320,height=240 ! kmssink
can-scale=false
# rotate
gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 !
v4l2convert extra-controls=cid,rotate=90 !
video/x-raw,width=240,height=320 ! kmssink can-scale=false
# flip
gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 !
v4l2convert extra-controls=cid,horizontal_flip=1 !
video/x-raw,width=640,height=480 ! kmssink can-scale=false
gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 !
v4l2convert extra-controls=cid,vertical_flip=1 !
video/x-raw,width=640,height=480 ! kmssink can-scale=false

As well as the following using an imx-media capture pipeline (480p
source on the sensor) where we can now use aligned dmabuf's:
# encode
gst-launch-1.0 v4l2src device=/dev/video4 ! v4l2convert
output-io-mode=dmabuf-import ! v4l2h264enc
output-io-mode=dmabuf-import ! rtph264pay ! udpsink host=172.24.20.19
port=5001
# scale/encode
gst-launch-1.0 v4l2src device=/dev/video4 ! v4l2convert
output-io-mode=dmabuf-import ! video/x-raw,width=1440,height=960 !
v4l2h264enc output-io-mode=dmabuf-import ! rtph264pay ! udpsink
host=172.24.20.19 port=5001
# scale/flip/encode
gst-launch-1.0 v4l2src device=/dev/video4 ! v4l2convert
output-io-mode=dmabuf-import extra-controls=cid,horizontal_flip=1 !
video/x-raw,width=1440,height=960 ! v4l2h264enc
output-io-mode=dmabuf-import ! rtph264pay ! udpsink host=172.24.20.19
port=5001
# scale/rotate/encode
gst-launch-1.0 v4l2src device=/dev/video4 ! v4l2convert
output-io-mode=dmabuf-import extra-controls=cid,rotate=90 !
video/x-raw,width=1440,height=960 ! v4l2h264enc
output-io-mode=dmabuf-import ! rtph264pay ! udpsink host=172.24.20.19
port=5001

Many thanks - hoping to see this merged soon!

Tim

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH v7] media: imx: add mem2mem device
  2019-02-15 16:24     ` Nicolas Dufresne
@ 2019-02-15 23:36       ` Tim Harvey
  0 siblings, 0 replies; 15+ messages in thread
From: Tim Harvey @ 2019-02-15 23:36 UTC (permalink / raw)
  To: Nicolas Dufresne
  Cc: Philipp Zabel, linux-media, Steve Longerbeam, Hans Verkuil, Sascha Hauer

On Fri, Feb 15, 2019 at 8:24 AM Nicolas Dufresne <nicolas@ndufresne.ca> wrote:
>
> Le vendredi 15 février 2019 à 12:10 +0100, Philipp Zabel a écrit :
> > > I'm also not sure how to specify hflip/vflip... I don't think
> > > extra-controls parses 'hflip', 'vflip' as ipu_csc_scaler_s_ctrl gets
> > > called with V4L2_CID_HFLIP/V4L2_CID_VFLIP but ctrl->val is always 0.
> >
> > You can use v4l2-ctl -L to list the CID names, they are horizontal_flip
> > and vertical_flip, respectively. Again, the input and output formats
> > must be different because GStreamer doesn't know about the flipping yet:
> >
> > gst-launch-1.0 videotestsrc ! video/x-raw,width=320,height=240 ! v4l2video10convert extra-controls=cid,horizontal_flip=1 ! video/x-raw,width=640,height=480 ! kmssink can-scale=false
> >
> > We'd have to add actual properties for rotate/flip to v4l2convert,
> > instead of using theextra-controls workaround, probable something
> > similar to the video-direction property of the software videoflip
> > element.
>
> Note that we have a disable-passthrough property in master, a trivial
> patch to backport if you need it.
>
> https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/commit/fe5236be8771ea82c850ebebe19cf1064d112bf0

Nicolas,

Yes, this works great on gstreamer-master:

gst-launch-1.0 videotestsrc ! v4l2convert
extra-controls=cid,vertical_flip=1 ! kmssink
^^^ fails to flip b/c gstreamer bypases the conversion as input and
output formats are the same

gst-launch-1.0 videotestsrc ! v4l2convert
extra-controls=cid,vertical_flip=1 disable-passthrough=1 ! kmssink
^^^ flips as expected

Tim

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2019-02-15 23:37 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-17 15:50 [PATCH v7] media: imx: add mem2mem device Philipp Zabel
2019-01-18  9:30 ` Hans Verkuil
2019-01-18 11:18   ` Philipp Zabel
2019-01-18 12:41     ` Hans Verkuil
2019-01-18 13:42       ` Philipp Zabel
2019-01-18 13:45         ` Hans Verkuil
2019-01-18 14:17           ` Philipp Zabel
2019-01-18 16:51     ` Philipp Zabel
2019-01-19  9:28       ` Hans Verkuil
2019-02-12 19:01 ` Tim Harvey
2019-02-13 17:22   ` Nicolas Dufresne
2019-02-15 11:10   ` Philipp Zabel
2019-02-15 16:24     ` Nicolas Dufresne
2019-02-15 23:36       ` Tim Harvey
2019-02-15 23:34     ` Tim Harvey

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).