summaryrefslogtreecommitdiffstats
path: root/drivers/staging/media/imx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/media/imx')
-rw-r--r--drivers/staging/media/imx/Kconfig34
-rw-r--r--drivers/staging/media/imx/Makefile18
-rw-r--r--drivers/staging/media/imx/TODO54
-rw-r--r--drivers/staging/media/imx/imx-ic-common.c87
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c514
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c1372
-rw-r--r--drivers/staging/media/imx/imx-ic.h32
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c1051
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c924
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2089
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c417
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c145
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c485
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c306
-rw-r--r--drivers/staging/media/imx/imx-media-of.c76
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c886
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c971
-rw-r--r--drivers/staging/media/imx/imx-media.h313
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c803
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c2310
-rw-r--r--drivers/staging/media/imx/imx8mq-mipi-csi2.c1004
21 files changed, 13891 insertions, 0 deletions
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
new file mode 100644
index 000000000..0bacac302
--- /dev/null
+++ b/drivers/staging/media/imx/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_IMX_MEDIA
+ tristate "i.MX5/6 V4L2 media core driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_DMA
+ depends on VIDEO_DEV
+ depends on VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Say yes here to enable support for video4linux media controller
+ driver for the i.MX5/6 SOC.
+
+if VIDEO_IMX_MEDIA
+menu "i.MX5/6/7/8 Media Sub devices"
+
+config VIDEO_IMX_CSI
+ tristate "i.MX5/6 Camera Sensor Interface driver"
+ depends on IMX_IPUV3_CORE
+ default y
+ help
+ A video4linux camera sensor interface driver for i.MX5/6.
+
+config VIDEO_IMX7_CSI
+ tristate "i.MX6UL/L / i.MX7 / i.MX8M Camera Sensor Interface driver"
+ default y
+ help
+ Enable support for video4linux camera sensor interface driver for
+ i.MX6UL/L, i.MX7 or i.MX8M.
+endmenu
+endif
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
new file mode 100644
index 000000000..d82be8981
--- /dev/null
+++ b/drivers/staging/media/imx/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+imx-media-common-objs := imx-media-capture.o imx-media-dev-common.o \
+ imx-media-of.o imx-media-utils.o
+
+imx6-media-objs := imx-media-dev.o imx-media-internal-sd.o \
+ imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o imx-media-vdic.o \
+ imx-media-csc-scaler.o
+
+imx6-media-csi-objs := imx-media-csi.o imx-media-fim.o
+
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-common.o
+
+obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-media.o
+obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-media-csi.o
+obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-mipi-csi2.o
+
+obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-media-csi.o
+obj-$(CONFIG_VIDEO_IMX7_CSI) += imx8mq-mipi-csi2.o
diff --git a/drivers/staging/media/imx/TODO b/drivers/staging/media/imx/TODO
new file mode 100644
index 000000000..5d3a337c8
--- /dev/null
+++ b/drivers/staging/media/imx/TODO
@@ -0,0 +1,54 @@
+
+- The Frame Interval Monitor could be exported to v4l2-core for
+ general use.
+
+- The CSI subdevice parses its nearest upstream neighbor's device-tree
+ bus config in order to setup the CSI. Laurent Pinchart argues that
+ instead the CSI subdev should call its neighbor's g_mbus_config op
+ (which should be propagated if necessary) to get this info. However
+ Hans Verkuil is planning to remove the g_mbus_config op. For now this
+ driver uses the parsed DT bus config method until this issue is
+ resolved.
+
+ 2020-06: g_mbus has been removed in favour of the get_mbus_config pad
+ operation which should be used to avoid parsing the remote endpoint
+ configuration.
+
+- This media driver supports inheriting V4L2 controls to the
+ video capture devices, from the subdevices in the capture device's
+ pipeline. The controls for each capture device are updated in the
+ link_notify callback when the pipeline is modified. This feature should be
+ removed, userspace should use the subdev-based userspace API instead.
+
+- Similarly to the legacy control handling, legacy format handling where
+ formats on the video nodes are influenced by the active format of the
+ connected subdev should be removed.
+
+- i.MX7: all of the above, since it uses the imx media core
+
+- i.MX7: use Frame Interval Monitor
+
+- imx7-media-csi: Restrict the supported formats list to the SoC version.
+
+ The imx7 CSI bridge can be configured to sample pixel components from the Rx
+ queue in single (8bpp) or double (16bpp) component modes. Image format
+ variants with different sample sizes (ie YUYV_2X8 vs YUYV_1X16) determine the
+ pixel components sampling size per each clock cycle and their packing mode
+ (see imx7_csi_configure() for details).
+
+ As the imx7 CSI bridge can be interfaced with different IP blocks depending on
+ the SoC model it is integrated on, the Rx queue sampling size should match
+ the size of the samples transferred by the transmitting IP block.
+
+ To avoid mis-configurations of the capture pipeline, the enumeration of the
+ supported formats should be restricted to match the pixel source transmitting
+ mode.
+
+ Example: i.MX8MM SoC integrates the CSI bridge with the Samsung CSIS CSI-2
+ receiver which operates in dual pixel sampling mode. The CSI bridge should
+ only expose the 1X16 formats variant which instructs it to operate in dual
+ pixel sampling mode. When the CSI bridge is instead integrated on an i.MX7,
+ which supports both serial and parallel input, it should expose both variants.
+
+ This currently only applies to YUYV formats, but other formats might need
+ to be handled in the same way.
diff --git a/drivers/staging/media/imx/imx-ic-common.c b/drivers/staging/media/imx/imx-ic-common.c
new file mode 100644
index 000000000..6df1ffb53
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic-common.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Image Converter Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2014-2016 Mentor Graphics Inc.
+ */
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include "imx-media.h"
+#include "imx-ic.h"
+
+#define IC_TASK_PRP IC_NUM_TASKS
+#define IC_NUM_OPS (IC_NUM_TASKS + 1)
+
+static struct imx_ic_ops *ic_ops[IC_NUM_OPS] = {
+ [IC_TASK_PRP] = &imx_ic_prp_ops,
+ [IC_TASK_ENCODER] = &imx_ic_prpencvf_ops,
+ [IC_TASK_VIEWFINDER] = &imx_ic_prpencvf_ops,
+};
+
+struct v4l2_subdev *imx_media_ic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id)
+{
+ struct imx_ic_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->ipu_dev = ipu_dev;
+ priv->ipu = ipu;
+
+ /* get our IC task id */
+ switch (grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
+ priv->task_id = IC_TASK_PRP;
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
+ priv->task_id = IC_TASK_ENCODER;
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
+ priv->task_id = IC_TASK_VIEWFINDER;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ v4l2_subdev_init(&priv->sd, ic_ops[priv->task_id]->subdev_ops);
+ v4l2_set_subdevdata(&priv->sd, priv);
+ priv->sd.internal_ops = ic_ops[priv->task_id]->internal_ops;
+ priv->sd.entity.ops = ic_ops[priv->task_id]->entity_ops;
+ priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+ priv->sd.owner = ipu_dev->driver->owner;
+ priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ priv->sd.grp_id = grp_id;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(ipu));
+
+ ret = ic_ops[priv->task_id]->init(priv);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
+ if (ret) {
+ ic_ops[priv->task_id]->remove(priv);
+ return ERR_PTR(ret);
+ }
+
+ return &priv->sd;
+}
+
+int imx_media_ic_unregister(struct v4l2_subdev *sd)
+{
+ struct imx_ic_priv *priv = container_of(sd, struct imx_ic_priv, sd);
+
+ v4l2_info(sd, "Removing\n");
+
+ ic_ops[priv->task_id]->remove(priv);
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
new file mode 100644
index 000000000..ac5fb3320
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
+ *
+ * This subdevice handles capture of video frames from the CSI or VDIC,
+ * which are routed directly to the Image Converter preprocess tasks,
+ * for resizing, colorspace conversion, and rotation.
+ *
+ * Copyright (c) 2012-2017 Mentor Graphics Inc.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+#include "imx-ic.h"
+
+/*
+ * Min/Max supported width and heights.
+ */
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 4096
+#define MAX_H 4096
+#define W_ALIGN 4 /* multiple of 16 pixels */
+#define H_ALIGN 1 /* multiple of 2 lines */
+#define S_ALIGN 1 /* multiple of 2 */
+
+struct prp_priv {
+ struct imx_ic_priv *ic_priv;
+ struct media_pad pad[PRP_NUM_PADS];
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ struct v4l2_subdev *src_sd;
+ struct v4l2_subdev *sink_sd_prpenc;
+ struct v4l2_subdev *sink_sd_prpvf;
+
+ /* the CSI id at link validate */
+ int csi_id;
+
+ struct v4l2_mbus_framefmt format_mbus;
+ struct v4l2_fract frame_interval;
+
+ int stream_count;
+};
+
+static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+
+ return ic_priv->task_priv;
+}
+
+static int prp_start(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ bool src_is_vdic;
+
+ /* set IC to receive from CSI or VDI depending on source */
+ src_is_vdic = !!(priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC);
+
+ ipu_set_ic_src_mux(ic_priv->ipu, priv->csi_id, src_is_vdic);
+
+ return 0;
+}
+
+static void prp_stop(struct prp_priv *priv)
+{
+}
+
+static struct v4l2_mbus_framefmt *
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
+ else
+ return &priv->format_mbus;
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int prp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *infmt;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ switch (code->pad) {
+ case PRP_SINK_PAD:
+ ret = imx_media_enum_ipu_formats(&code->code, code->index,
+ PIXFMT_SEL_YUV_RGB);
+ break;
+ case PRP_SRC_PAD_PRPENC:
+ case PRP_SRC_PAD_PRPVF:
+ if (code->index != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD,
+ code->which);
+ code->code = infmt->code;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *fmt, *infmt;
+ const struct imx_media_pixfmt *cc;
+ int ret = 0;
+ u32 code;
+
+ if (sdformat->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD, sdformat->which);
+
+ switch (sdformat->pad) {
+ case PRP_SINK_PAD:
+ v4l_bound_align_image(&sdformat->format.width, MIN_W, MAX_W,
+ W_ALIGN, &sdformat->format.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+
+ cc = imx_media_find_ipu_format(sdformat->format.code,
+ PIXFMT_SEL_YUV_RGB);
+ if (!cc) {
+ imx_media_enum_ipu_formats(&code, 0,
+ PIXFMT_SEL_YUV_RGB);
+ cc = imx_media_find_ipu_format(code,
+ PIXFMT_SEL_YUV_RGB);
+ sdformat->format.code = cc->codes[0];
+ }
+
+ if (sdformat->format.field == V4L2_FIELD_ANY)
+ sdformat->format.field = V4L2_FIELD_NONE;
+ break;
+ case PRP_SRC_PAD_PRPENC:
+ case PRP_SRC_PAD_PRPVF:
+ /* Output pads mirror input pad */
+ sdformat->format = *infmt;
+ break;
+ }
+
+ imx_media_try_colorimetry(&sdformat->format, true);
+
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
+ ic_priv->sd.name, remote->entity->name, local->entity->name);
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (priv->sink_sd_prpenc &&
+ (remote_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ priv->src_sd = remote_sd;
+ } else {
+ priv->src_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is a source pad */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ switch (local->index) {
+ case PRP_SRC_PAD_PRPENC:
+ if (priv->sink_sd_prpenc) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (priv->src_sd && (priv->src_sd->grp_id &
+ IMX_MEDIA_GRP_ID_IPU_VDIC)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ priv->sink_sd_prpenc = remote_sd;
+ break;
+ case PRP_SRC_PAD_PRPVF:
+ if (priv->sink_sd_prpvf) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->sink_sd_prpvf = remote_sd;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ } else {
+ switch (local->index) {
+ case PRP_SRC_PAD_PRPENC:
+ priv->sink_sd_prpenc = NULL;
+ break;
+ case PRP_SRC_PAD_PRPVF:
+ priv->sink_sd_prpvf = NULL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ struct v4l2_subdev *csi;
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ csi = imx_media_pipeline_subdev(&ic_priv->sd.entity,
+ IMX_MEDIA_GRP_ID_IPU_CSI, true);
+ if (IS_ERR(csi))
+ csi = NULL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC) {
+ /*
+ * the ->PRPENC link cannot be enabled if the source
+ * is the VDIC
+ */
+ if (priv->sink_sd_prpenc) {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ /* the source is a CSI */
+ if (!csi) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (csi) {
+ switch (csi->grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_CSI0:
+ priv->csi_id = 0;
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_CSI1:
+ priv->csi_id = 1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ } else {
+ priv->csi_id = 0;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src_sd || (!priv->sink_sd_prpenc && !priv->sink_sd_prpvf)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
+
+ if (enable)
+ ret = prp_start(priv);
+ else
+ prp_stop(priv);
+ if (ret)
+ goto out;
+
+ /* start/stop upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ if (enable)
+ prp_stop(priv);
+ goto out;
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ if (fi->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+ fi->interval = priv->frame_interval;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ if (fi->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ /* No limits on valid frame intervals */
+ if (fi->interval.numerator == 0 || fi->interval.denominator == 0)
+ fi->interval = priv->frame_interval;
+ else
+ priv->frame_interval = fi->interval;
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_registered(struct v4l2_subdev *sd)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ u32 code;
+
+ /* init default frame interval */
+ priv->frame_interval.numerator = 1;
+ priv->frame_interval.denominator = 30;
+
+ /* set a default mbus format */
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ return imx_media_init_mbus_fmt(&priv->format_mbus,
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, code,
+ V4L2_FIELD_NONE, NULL);
+}
+
+static const struct v4l2_subdev_pad_ops prp_pad_ops = {
+ .init_cfg = imx_media_init_cfg,
+ .enum_mbus_code = prp_enum_mbus_code,
+ .get_fmt = prp_get_fmt,
+ .set_fmt = prp_set_fmt,
+ .link_validate = prp_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops prp_video_ops = {
+ .g_frame_interval = prp_g_frame_interval,
+ .s_frame_interval = prp_s_frame_interval,
+ .s_stream = prp_s_stream,
+};
+
+static const struct media_entity_operations prp_entity_ops = {
+ .link_setup = prp_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_ops prp_subdev_ops = {
+ .video = &prp_video_ops,
+ .pad = &prp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops prp_internal_ops = {
+ .registered = prp_registered,
+};
+
+static int prp_init(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv;
+ int i;
+
+ priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ ic_priv->task_priv = priv;
+ priv->ic_priv = ic_priv;
+
+ for (i = 0; i < PRP_NUM_PADS; i++)
+ priv->pad[i].flags = (i == PRP_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ return media_entity_pads_init(&ic_priv->sd.entity, PRP_NUM_PADS,
+ priv->pad);
+}
+
+static void prp_remove(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv = ic_priv->task_priv;
+
+ mutex_destroy(&priv->lock);
+}
+
+struct imx_ic_ops imx_ic_prp_ops = {
+ .subdev_ops = &prp_subdev_ops,
+ .internal_ops = &prp_internal_ops,
+ .entity_ops = &prp_entity_ops,
+ .init = prp_init,
+ .remove = prp_remove,
+};
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
new file mode 100644
index 000000000..9b81cfbcd
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -0,0 +1,1372 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
+ *
+ * This subdevice handles capture of video frames from the CSI or VDIC,
+ * which are routed directly to the Image Converter preprocess tasks,
+ * for resizing, colorspace conversion, and rotation.
+ *
+ * Copyright (c) 2012-2017 Mentor Graphics Inc.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+#include "imx-ic.h"
+
+/*
+ * Min/Max supported width and heights.
+ *
+ * We allow planar output, so we have to align width at the source pad
+ * by 16 pixels to meet IDMAC alignment requirements for possible planar
+ * output.
+ *
+ * TODO: move this into pad format negotiation, if capture device
+ * has not requested a planar format, we should allow 8 pixel
+ * alignment at the source pad.
+ */
+#define MIN_W_SINK 32
+#define MIN_H_SINK 32
+#define MAX_W_SINK 4096
+#define MAX_H_SINK 4096
+#define W_ALIGN_SINK 3 /* multiple of 8 pixels */
+#define H_ALIGN_SINK 1 /* multiple of 2 lines */
+
+#define MAX_W_SRC 1024
+#define MAX_H_SRC 1024
+#define W_ALIGN_SRC 1 /* multiple of 2 pixels */
+#define H_ALIGN_SRC 1 /* multiple of 2 lines */
+
+#define S_ALIGN 1 /* multiple of 2 */
+
+struct prp_priv {
+ struct imx_ic_priv *ic_priv;
+ struct media_pad pad[PRPENCVF_NUM_PADS];
+ /* the video device at output pad */
+ struct imx_media_video_dev *vdev;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ /* IPU units we require */
+ struct ipu_ic *ic;
+ struct ipuv3_channel *out_ch;
+ struct ipuv3_channel *rot_in_ch;
+ struct ipuv3_channel *rot_out_ch;
+
+ /* active vb2 buffers to send to video dev sink */
+ struct imx_media_buffer *active_vb2_buf[2];
+ struct imx_media_dma_buf underrun_buf;
+
+ int ipu_buf_num; /* ipu double buffer index: 0-1 */
+
+ /* the sink for the captured frames */
+ struct media_entity *sink;
+ /* the source subdev */
+ struct v4l2_subdev *src_sd;
+
+ struct v4l2_mbus_framefmt format_mbus[PRPENCVF_NUM_PADS];
+ const struct imx_media_pixfmt *cc[PRPENCVF_NUM_PADS];
+ struct v4l2_fract frame_interval;
+
+ struct imx_media_dma_buf rot_buf[2];
+
+ /* controls */
+ struct v4l2_ctrl_handler ctrl_hdlr;
+ int rotation; /* degrees */
+ bool hflip;
+ bool vflip;
+
+ /* derived from rotation, hflip, vflip controls */
+ enum ipu_rotate_mode rot_mode;
+
+ spinlock_t irqlock; /* protect eof_irq handler */
+
+ struct timer_list eof_timeout_timer;
+ int eof_irq;
+ int nfb4eof_irq;
+
+ int stream_count;
+ u32 frame_sequence; /* frame sequence counter */
+ bool last_eof; /* waiting for last EOF at stream off */
+ bool nfb4eof; /* NFB4EOF encountered during streaming */
+ bool interweave_swap; /* swap top/bottom lines when interweaving */
+ struct completion last_eof_comp;
+};
+
+static const struct prp_channels {
+ u32 out_ch;
+ u32 rot_in_ch;
+ u32 rot_out_ch;
+} prp_channel[] = {
+ [IC_TASK_ENCODER] = {
+ .out_ch = IPUV3_CHANNEL_IC_PRP_ENC_MEM,
+ .rot_in_ch = IPUV3_CHANNEL_MEM_ROT_ENC,
+ .rot_out_ch = IPUV3_CHANNEL_ROT_ENC_MEM,
+ },
+ [IC_TASK_VIEWFINDER] = {
+ .out_ch = IPUV3_CHANNEL_IC_PRP_VF_MEM,
+ .rot_in_ch = IPUV3_CHANNEL_MEM_ROT_VF,
+ .rot_out_ch = IPUV3_CHANNEL_ROT_VF_MEM,
+ },
+};
+
+static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+
+ return ic_priv->task_priv;
+}
+
+static void prp_put_ipu_resources(struct prp_priv *priv)
+{
+ if (priv->ic)
+ ipu_ic_put(priv->ic);
+ priv->ic = NULL;
+
+ if (priv->out_ch)
+ ipu_idmac_put(priv->out_ch);
+ priv->out_ch = NULL;
+
+ if (priv->rot_in_ch)
+ ipu_idmac_put(priv->rot_in_ch);
+ priv->rot_in_ch = NULL;
+
+ if (priv->rot_out_ch)
+ ipu_idmac_put(priv->rot_out_ch);
+ priv->rot_out_ch = NULL;
+}
+
+static int prp_get_ipu_resources(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ struct ipu_ic *ic;
+ struct ipuv3_channel *out_ch, *rot_in_ch, *rot_out_ch;
+ int ret, task = ic_priv->task_id;
+
+ ic = ipu_ic_get(ic_priv->ipu, task);
+ if (IS_ERR(ic)) {
+ v4l2_err(&ic_priv->sd, "failed to get IC\n");
+ ret = PTR_ERR(ic);
+ goto out;
+ }
+ priv->ic = ic;
+
+ out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].out_ch);
+ if (IS_ERR(out_ch)) {
+ v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
+ prp_channel[task].out_ch);
+ ret = PTR_ERR(out_ch);
+ goto out;
+ }
+ priv->out_ch = out_ch;
+
+ rot_in_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_in_ch);
+ if (IS_ERR(rot_in_ch)) {
+ v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
+ prp_channel[task].rot_in_ch);
+ ret = PTR_ERR(rot_in_ch);
+ goto out;
+ }
+ priv->rot_in_ch = rot_in_ch;
+
+ rot_out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_out_ch);
+ if (IS_ERR(rot_out_ch)) {
+ v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
+ prp_channel[task].rot_out_ch);
+ ret = PTR_ERR(rot_out_ch);
+ goto out;
+ }
+ priv->rot_out_ch = rot_out_ch;
+
+ return 0;
+out:
+ prp_put_ipu_resources(priv);
+ return ret;
+}
+
+static void prp_vb2_buf_done(struct prp_priv *priv, struct ipuv3_channel *ch)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *done, *next;
+ struct vb2_buffer *vb;
+ dma_addr_t phys;
+
+ done = priv->active_vb2_buf[priv->ipu_buf_num];
+ if (done) {
+ done->vbuf.field = vdev->fmt.field;
+ done->vbuf.sequence = priv->frame_sequence;
+ vb = &done->vbuf.vb2_buf;
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, priv->nfb4eof ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ }
+
+ priv->frame_sequence++;
+ priv->nfb4eof = false;
+
+ /* get next queued buffer */
+ next = imx_media_capture_device_next_buf(vdev);
+ if (next) {
+ phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
+ priv->active_vb2_buf[priv->ipu_buf_num] = next;
+ } else {
+ phys = priv->underrun_buf.phys;
+ priv->active_vb2_buf[priv->ipu_buf_num] = NULL;
+ }
+
+ if (ipu_idmac_buffer_is_ready(ch, priv->ipu_buf_num))
+ ipu_idmac_clear_buffer(ch, priv->ipu_buf_num);
+
+ if (priv->interweave_swap && ch == priv->out_ch)
+ phys += vdev->fmt.bytesperline;
+
+ ipu_cpmem_set_buffer(ch, priv->ipu_buf_num, phys);
+}
+
+static irqreturn_t prp_eof_interrupt(int irq, void *dev_id)
+{
+ struct prp_priv *priv = dev_id;
+ struct ipuv3_channel *channel;
+
+ spin_lock(&priv->irqlock);
+
+ if (priv->last_eof) {
+ complete(&priv->last_eof_comp);
+ priv->last_eof = false;
+ goto unlock;
+ }
+
+ channel = (ipu_rot_mode_is_irt(priv->rot_mode)) ?
+ priv->rot_out_ch : priv->out_ch;
+
+ prp_vb2_buf_done(priv, channel);
+
+ /* select new IPU buf */
+ ipu_idmac_select_buffer(channel, priv->ipu_buf_num);
+ /* toggle IPU double-buffer index */
+ priv->ipu_buf_num ^= 1;
+
+ /* bump the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+unlock:
+ spin_unlock(&priv->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
+{
+ struct prp_priv *priv = dev_id;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ spin_lock(&priv->irqlock);
+
+ /*
+ * this is not an unrecoverable error, just mark
+ * the next captured frame with vb2 error flag.
+ */
+ priv->nfb4eof = true;
+
+ v4l2_err(&ic_priv->sd, "NFB4EOF\n");
+
+ spin_unlock(&priv->irqlock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * EOF timeout timer function.
+ */
+/*
+ * EOF timeout timer function. This is an unrecoverable condition
+ * without a stream restart.
+ */
+static void prp_eof_timeout(struct timer_list *t)
+{
+ struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ v4l2_err(&ic_priv->sd, "EOF timeout\n");
+
+ /* signal a fatal error to capture device */
+ imx_media_capture_device_error(vdev);
+}
+
+static void prp_setup_vb2_buf(struct prp_priv *priv, dma_addr_t *phys)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *buf;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ buf = imx_media_capture_device_next_buf(vdev);
+ if (buf) {
+ priv->active_vb2_buf[i] = buf;
+ phys[i] = vb2_dma_contig_plane_dma_addr(
+ &buf->vbuf.vb2_buf, 0);
+ } else {
+ priv->active_vb2_buf[i] = NULL;
+ phys[i] = priv->underrun_buf.phys;
+ }
+ }
+}
+
+static void prp_unsetup_vb2_buf(struct prp_priv *priv,
+ enum vb2_buffer_state return_status)
+{
+ struct imx_media_buffer *buf;
+ int i;
+
+ /* return any remaining active frames with return_status */
+ for (i = 0; i < 2; i++) {
+ buf = priv->active_vb2_buf[i];
+ if (buf) {
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, return_status);
+ }
+ }
+}
+
+static int prp_setup_channel(struct prp_priv *priv,
+ struct ipuv3_channel *channel,
+ enum ipu_rotate_mode rot_mode,
+ dma_addr_t addr0, dma_addr_t addr1,
+ bool rot_swap_width_height)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ unsigned int burst_size;
+ struct ipu_image image;
+ bool interweave;
+ int ret;
+
+ outfmt = &priv->format_mbus[PRPENCVF_SRC_PAD];
+ outcc = vdev->cc;
+
+ ipu_cpmem_zero(channel);
+
+ memset(&image, 0, sizeof(image));
+ image.pix = vdev->fmt;
+ image.rect = vdev->compose;
+
+ /*
+ * If the field type at capture interface is interlaced, and
+ * the output IDMAC pad is sequential, enable interweave at
+ * the IDMAC output channel.
+ */
+ interweave = V4L2_FIELD_IS_INTERLACED(image.pix.field) &&
+ V4L2_FIELD_IS_SEQUENTIAL(outfmt->field);
+ priv->interweave_swap = interweave &&
+ image.pix.field == V4L2_FIELD_INTERLACED_BT;
+
+ if (rot_swap_width_height) {
+ swap(image.pix.width, image.pix.height);
+ swap(image.rect.width, image.rect.height);
+ /* recalc stride using swapped width */
+ image.pix.bytesperline = outcc->planar ?
+ image.pix.width :
+ (image.pix.width * outcc->bpp) >> 3;
+ }
+
+ if (priv->interweave_swap && channel == priv->out_ch) {
+ /* start interweave scan at 1st top line (2nd line) */
+ image.rect.top = 1;
+ }
+
+ image.phys0 = addr0;
+ image.phys1 = addr1;
+
+ /*
+ * Skip writing U and V components to odd rows in the output
+ * channels for planar 4:2:0 (but not when enabling IDMAC
+ * interweaving, they are incompatible).
+ */
+ if ((channel == priv->out_ch && !interweave) ||
+ channel == priv->rot_out_ch) {
+ switch (image.pix.pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_NV12:
+ ipu_cpmem_skip_odd_chroma_rows(channel);
+ break;
+ }
+ }
+
+ ret = ipu_cpmem_set_image(channel, &image);
+ if (ret)
+ return ret;
+
+ if (channel == priv->rot_in_ch ||
+ channel == priv->rot_out_ch) {
+ burst_size = 8;
+ ipu_cpmem_set_block_mode(channel);
+ } else {
+ burst_size = (image.pix.width & 0xf) ? 8 : 16;
+ }
+
+ ipu_cpmem_set_burstsize(channel, burst_size);
+
+ if (rot_mode)
+ ipu_cpmem_set_rotation(channel, rot_mode);
+
+ if (interweave && channel == priv->out_ch)
+ ipu_cpmem_interlaced_scan(channel,
+ priv->interweave_swap ?
+ -image.pix.bytesperline :
+ image.pix.bytesperline,
+ image.pix.pixelformat);
+
+ ret = ipu_ic_task_idma_init(priv->ic, channel,
+ image.pix.width, image.pix.height,
+ burst_size, rot_mode);
+ if (ret)
+ return ret;
+
+ ipu_cpmem_set_axi_id(channel, 1);
+
+ ipu_idmac_set_double_buffer(channel, true);
+
+ return 0;
+}
+
+static int prp_setup_rotation(struct prp_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ const struct imx_media_pixfmt *outcc, *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_pix_format *outfmt;
+ struct ipu_ic_csc csc;
+ dma_addr_t phys[2];
+ int ret;
+
+ infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
+ outfmt = &vdev->fmt;
+ incc = priv->cc[PRPENCVF_SINK_PAD];
+ outcc = vdev->cc;
+
+ ret = ipu_ic_calc_csc(&csc,
+ infmt->ycbcr_enc, infmt->quantization,
+ incc->cs,
+ outfmt->ycbcr_enc, outfmt->quantization,
+ outcc->cs);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_calc_csc failed, %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0],
+ outfmt->sizeimage);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[0], %d\n", ret);
+ return ret;
+ }
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1],
+ outfmt->sizeimage);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[1], %d\n", ret);
+ goto free_rot0;
+ }
+
+ ret = ipu_ic_task_init(priv->ic, &csc,
+ infmt->width, infmt->height,
+ outfmt->height, outfmt->width);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_task_init failed, %d\n", ret);
+ goto free_rot1;
+ }
+
+ /* init the IC-PRP-->MEM IDMAC channel */
+ ret = prp_setup_channel(priv, priv->out_ch, IPU_ROTATE_NONE,
+ priv->rot_buf[0].phys, priv->rot_buf[1].phys,
+ true);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(out_ch) failed, %d\n", ret);
+ goto free_rot1;
+ }
+
+ /* init the MEM-->IC-PRP ROT IDMAC channel */
+ ret = prp_setup_channel(priv, priv->rot_in_ch, priv->rot_mode,
+ priv->rot_buf[0].phys, priv->rot_buf[1].phys,
+ true);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(rot_in_ch) failed, %d\n", ret);
+ goto free_rot1;
+ }
+
+ prp_setup_vb2_buf(priv, phys);
+
+ /* init the destination IC-PRP ROT-->MEM IDMAC channel */
+ ret = prp_setup_channel(priv, priv->rot_out_ch, IPU_ROTATE_NONE,
+ phys[0], phys[1],
+ false);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(rot_out_ch) failed, %d\n", ret);
+ goto unsetup_vb2;
+ }
+
+ /* now link IC-PRP-->MEM to MEM-->IC-PRP ROT */
+ ipu_idmac_link(priv->out_ch, priv->rot_in_ch);
+
+ /* enable the IC */
+ ipu_ic_enable(priv->ic);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(priv->out_ch, 0);
+ ipu_idmac_select_buffer(priv->out_ch, 1);
+ ipu_idmac_select_buffer(priv->rot_out_ch, 0);
+ ipu_idmac_select_buffer(priv->rot_out_ch, 1);
+
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->out_ch);
+ ipu_idmac_enable_channel(priv->rot_in_ch);
+ ipu_idmac_enable_channel(priv->rot_out_ch);
+
+ /* and finally enable the IC PRP task */
+ ipu_ic_task_enable(priv->ic);
+
+ return 0;
+
+unsetup_vb2:
+ prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
+free_rot1:
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
+free_rot0:
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
+ return ret;
+}
+
+static void prp_unsetup_rotation(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ ipu_ic_task_disable(priv->ic);
+
+ ipu_idmac_disable_channel(priv->out_ch);
+ ipu_idmac_disable_channel(priv->rot_in_ch);
+ ipu_idmac_disable_channel(priv->rot_out_ch);
+
+ ipu_idmac_unlink(priv->out_ch, priv->rot_in_ch);
+
+ ipu_ic_disable(priv->ic);
+
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
+}
+
+static int prp_setup_norotation(struct prp_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ const struct imx_media_pixfmt *outcc, *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_pix_format *outfmt;
+ struct ipu_ic_csc csc;
+ dma_addr_t phys[2];
+ int ret;
+
+ infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
+ outfmt = &vdev->fmt;
+ incc = priv->cc[PRPENCVF_SINK_PAD];
+ outcc = vdev->cc;
+
+ ret = ipu_ic_calc_csc(&csc,
+ infmt->ycbcr_enc, infmt->quantization,
+ incc->cs,
+ outfmt->ycbcr_enc, outfmt->quantization,
+ outcc->cs);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_calc_csc failed, %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ipu_ic_task_init(priv->ic, &csc,
+ infmt->width, infmt->height,
+ outfmt->width, outfmt->height);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_task_init failed, %d\n", ret);
+ return ret;
+ }
+
+ prp_setup_vb2_buf(priv, phys);
+
+ /* init the IC PRP-->MEM IDMAC channel */
+ ret = prp_setup_channel(priv, priv->out_ch, priv->rot_mode,
+ phys[0], phys[1], false);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(out_ch) failed, %d\n", ret);
+ goto unsetup_vb2;
+ }
+
+ ipu_cpmem_dump(priv->out_ch);
+ ipu_ic_dump(priv->ic);
+ ipu_dump(ic_priv->ipu);
+
+ ipu_ic_enable(priv->ic);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(priv->out_ch, 0);
+ ipu_idmac_select_buffer(priv->out_ch, 1);
+
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->out_ch);
+
+ /* enable the IC task */
+ ipu_ic_task_enable(priv->ic);
+
+ return 0;
+
+unsetup_vb2:
+ prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void prp_unsetup_norotation(struct prp_priv *priv)
+{
+ ipu_ic_task_disable(priv->ic);
+ ipu_idmac_disable_channel(priv->out_ch);
+ ipu_ic_disable(priv->ic);
+}
+
+static void prp_unsetup(struct prp_priv *priv,
+ enum vb2_buffer_state state)
+{
+ if (ipu_rot_mode_is_irt(priv->rot_mode))
+ prp_unsetup_rotation(priv);
+ else
+ prp_unsetup_norotation(priv);
+
+ prp_unsetup_vb2_buf(priv, state);
+}
+
+static int prp_start(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ struct imx_media_video_dev *vdev = priv->vdev;
+ int ret;
+
+ ret = prp_get_ipu_resources(priv);
+ if (ret)
+ return ret;
+
+ ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf,
+ vdev->fmt.sizeimage);
+ if (ret)
+ goto out_put_ipu;
+
+ priv->ipu_buf_num = 0;
+
+ /* init EOF completion waitq */
+ init_completion(&priv->last_eof_comp);
+ priv->frame_sequence = 0;
+ priv->last_eof = false;
+ priv->nfb4eof = false;
+
+ if (ipu_rot_mode_is_irt(priv->rot_mode))
+ ret = prp_setup_rotation(priv);
+ else
+ ret = prp_setup_norotation(priv);
+ if (ret)
+ goto out_free_underrun;
+
+ priv->nfb4eof_irq = ipu_idmac_channel_irq(ic_priv->ipu,
+ priv->out_ch,
+ IPU_IRQ_NFB4EOF);
+ ret = devm_request_irq(ic_priv->ipu_dev, priv->nfb4eof_irq,
+ prp_nfb4eof_interrupt, 0,
+ "imx-ic-prp-nfb4eof", priv);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "Error registering NFB4EOF irq: %d\n", ret);
+ goto out_unsetup;
+ }
+
+ if (ipu_rot_mode_is_irt(priv->rot_mode))
+ priv->eof_irq = ipu_idmac_channel_irq(
+ ic_priv->ipu, priv->rot_out_ch, IPU_IRQ_EOF);
+ else
+ priv->eof_irq = ipu_idmac_channel_irq(
+ ic_priv->ipu, priv->out_ch, IPU_IRQ_EOF);
+
+ ret = devm_request_irq(ic_priv->ipu_dev, priv->eof_irq,
+ prp_eof_interrupt, 0,
+ "imx-ic-prp-eof", priv);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "Error registering eof irq: %d\n", ret);
+ goto out_free_nfb4eof_irq;
+ }
+
+ /* start upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "upstream stream on failed: %d\n", ret);
+ goto out_free_eof_irq;
+ }
+
+ /* start the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+ return 0;
+
+out_free_eof_irq:
+ devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
+out_free_nfb4eof_irq:
+ devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
+out_unsetup:
+ prp_unsetup(priv, VB2_BUF_STATE_QUEUED);
+out_free_underrun:
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
+out_put_ipu:
+ prp_put_ipu_resources(priv);
+ return ret;
+}
+
+static void prp_stop(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ unsigned long flags;
+ int ret;
+
+ /* mark next EOF interrupt as the last before stream off */
+ spin_lock_irqsave(&priv->irqlock, flags);
+ priv->last_eof = true;
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+
+ /*
+ * and then wait for interrupt handler to mark completion.
+ */
+ ret = wait_for_completion_timeout(
+ &priv->last_eof_comp,
+ msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
+
+ /* stop upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ v4l2_warn(&ic_priv->sd,
+ "upstream stream off failed: %d\n", ret);
+
+ devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
+ devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
+
+ prp_unsetup(priv, VB2_BUF_STATE_ERROR);
+
+ imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
+
+ /* cancel the EOF timeout timer */
+ del_timer_sync(&priv->eof_timeout_timer);
+
+ prp_put_ipu_resources(priv);
+}
+
+static struct v4l2_mbus_framefmt *
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
+ else
+ return &priv->format_mbus[pad];
+}
+
+/*
+ * Applies IC resizer and IDMAC alignment restrictions to output
+ * rectangle given the input rectangle, and depending on given
+ * rotation mode.
+ *
+ * The IC resizer cannot downsize more than 4:1. Note also that
+ * for 90 or 270 rotation, _both_ output width and height must
+ * be aligned by W_ALIGN_SRC, because the intermediate rotation
+ * buffer swaps output width/height, and the final output buffer
+ * does not.
+ *
+ * Returns true if the output rectangle was modified.
+ */
+static bool prp_bound_align_output(struct v4l2_mbus_framefmt *outfmt,
+ struct v4l2_mbus_framefmt *infmt,
+ enum ipu_rotate_mode rot_mode)
+{
+ u32 orig_width = outfmt->width;
+ u32 orig_height = outfmt->height;
+
+ if (ipu_rot_mode_is_irt(rot_mode))
+ v4l_bound_align_image(&outfmt->width,
+ infmt->height / 4, MAX_H_SRC,
+ W_ALIGN_SRC,
+ &outfmt->height,
+ infmt->width / 4, MAX_W_SRC,
+ W_ALIGN_SRC, S_ALIGN);
+ else
+ v4l_bound_align_image(&outfmt->width,
+ infmt->width / 4, MAX_W_SRC,
+ W_ALIGN_SRC,
+ &outfmt->height,
+ infmt->height / 4, MAX_H_SRC,
+ H_ALIGN_SRC, S_ALIGN);
+
+ return outfmt->width != orig_width || outfmt->height != orig_height;
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int prp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ return imx_media_enum_ipu_formats(&code->code, code->index,
+ PIXFMT_SEL_YUV_RGB);
+}
+
+static int prp_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void prp_try_fmt(struct prp_priv *priv,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat,
+ const struct imx_media_pixfmt **cc)
+{
+ struct v4l2_mbus_framefmt *infmt;
+
+ *cc = imx_media_find_ipu_format(sdformat->format.code,
+ PIXFMT_SEL_YUV_RGB);
+ if (!*cc) {
+ u32 code;
+
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV_RGB);
+ *cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
+
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ infmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SINK_PAD,
+ sdformat->which);
+
+ if (sdformat->pad == PRPENCVF_SRC_PAD) {
+ sdformat->format.field = infmt->field;
+
+ prp_bound_align_output(&sdformat->format, infmt,
+ priv->rot_mode);
+
+ /* propagate colorimetry from sink */
+ sdformat->format.colorspace = infmt->colorspace;
+ sdformat->format.xfer_func = infmt->xfer_func;
+ } else {
+ v4l_bound_align_image(&sdformat->format.width,
+ MIN_W_SINK, MAX_W_SINK, W_ALIGN_SINK,
+ &sdformat->format.height,
+ MIN_H_SINK, MAX_H_SINK, H_ALIGN_SINK,
+ S_ALIGN);
+
+ if (sdformat->format.field == V4L2_FIELD_ANY)
+ sdformat->format.field = V4L2_FIELD_NONE;
+ }
+
+ imx_media_try_colorimetry(&sdformat->format, true);
+}
+
+static int prp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ prp_try_fmt(priv, sd_state, sdformat, &cc);
+
+ fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+
+ /* propagate a default format to source pad */
+ if (sdformat->pad == PRPENCVF_SINK_PAD) {
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_subdev_format format;
+
+ format.pad = PRPENCVF_SRC_PAD;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ prp_try_fmt(priv, sd_state, &format, &outcc);
+
+ outfmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SRC_PAD,
+ sdformat->which);
+ *outfmt = format.format;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[PRPENCVF_SRC_PAD] = outcc;
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_subdev_format format = {};
+ const struct imx_media_pixfmt *cc;
+ int ret = 0;
+
+ if (fse->pad >= PRPENCVF_NUM_PADS || fse->index != 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ format.pad = fse->pad;
+ format.which = fse->which;
+ format.format.code = fse->code;
+ format.format.width = 1;
+ format.format.height = 1;
+ prp_try_fmt(priv, sd_state, &format, &cc);
+ fse->min_width = format.format.width;
+ fse->min_height = format.format.height;
+
+ if (format.format.code != fse->code) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ format.format.code = fse->code;
+ format.format.width = -1;
+ format.format.height = -1;
+ prp_try_fmt(priv, sd_state, &format, &cc);
+ fse->max_width = format.format.width;
+ fse->max_height = format.format.height;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
+ ic_priv->sd.name, remote->entity->name, local->entity->name);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->src_sd = remote_sd;
+ } else {
+ priv->src_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is the source pad */
+
+ /* the remote must be the device node */
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->sink) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ priv->sink = NULL;
+ goto out;
+ }
+
+ priv->sink = remote->entity;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct prp_priv *priv = container_of(ctrl->handler,
+ struct prp_priv, ctrl_hdlr);
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ enum ipu_rotate_mode rot_mode;
+ int rotation, ret = 0;
+ bool hflip, vflip;
+
+ mutex_lock(&priv->lock);
+
+ rotation = priv->rotation;
+ hflip = priv->hflip;
+ vflip = priv->vflip;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ hflip = (ctrl->val == 1);
+ break;
+ case V4L2_CID_VFLIP:
+ vflip = (ctrl->val == 1);
+ break;
+ case V4L2_CID_ROTATE:
+ rotation = ctrl->val;
+ break;
+ default:
+ v4l2_err(&ic_priv->sd, "Invalid control\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ipu_degrees_to_rot_mode(&rot_mode, rotation, hflip, vflip);
+ if (ret)
+ goto out;
+
+ if (rot_mode != priv->rot_mode) {
+ struct v4l2_mbus_framefmt outfmt, infmt;
+
+ /* can't change rotation mid-streaming */
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ outfmt = priv->format_mbus[PRPENCVF_SRC_PAD];
+ infmt = priv->format_mbus[PRPENCVF_SINK_PAD];
+
+ if (prp_bound_align_output(&outfmt, &infmt, rot_mode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->rot_mode = rot_mode;
+ priv->rotation = rotation;
+ priv->hflip = hflip;
+ priv->vflip = vflip;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops prp_ctrl_ops = {
+ .s_ctrl = prp_s_ctrl,
+};
+
+static int prp_init_controls(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ struct v4l2_ctrl_handler *hdlr = &priv->ctrl_hdlr;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdlr, 3);
+
+ v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_ROTATE,
+ 0, 270, 90, 0);
+
+ ic_priv->sd.ctrl_handler = hdlr;
+
+ if (hdlr->error) {
+ ret = hdlr->error;
+ goto out_free;
+ }
+
+ v4l2_ctrl_handler_setup(hdlr);
+ return 0;
+
+out_free:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+static int prp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src_sd || !priv->sink) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
+
+ if (enable)
+ ret = prp_start(priv);
+ else
+ prp_stop(priv);
+ if (ret)
+ goto out;
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ if (fi->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+ fi->interval = priv->frame_interval;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ if (fi->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ /* No limits on valid frame intervals */
+ if (fi->interval.numerator == 0 || fi->interval.denominator == 0)
+ fi->interval = priv->frame_interval;
+ else
+ priv->frame_interval = fi->interval;
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_registered(struct v4l2_subdev *sd)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ int i, ret;
+ u32 code;
+
+ /* set a default mbus format */
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, code,
+ V4L2_FIELD_NONE, &priv->cc[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* init default frame interval */
+ priv->frame_interval.numerator = 1;
+ priv->frame_interval.denominator = 30;
+
+ priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
+ &ic_priv->sd,
+ PRPENCVF_SRC_PAD, true);
+ if (IS_ERR(priv->vdev))
+ return PTR_ERR(priv->vdev);
+
+ ret = imx_media_capture_device_register(priv->vdev, 0);
+ if (ret)
+ goto remove_vdev;
+
+ ret = prp_init_controls(priv);
+ if (ret)
+ goto unreg_vdev;
+
+ return 0;
+
+unreg_vdev:
+ imx_media_capture_device_unregister(priv->vdev);
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
+ return ret;
+}
+
+static void prp_unregistered(struct v4l2_subdev *sd)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+}
+
+static const struct v4l2_subdev_pad_ops prp_pad_ops = {
+ .init_cfg = imx_media_init_cfg,
+ .enum_mbus_code = prp_enum_mbus_code,
+ .enum_frame_size = prp_enum_frame_size,
+ .get_fmt = prp_get_fmt,
+ .set_fmt = prp_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops prp_video_ops = {
+ .g_frame_interval = prp_g_frame_interval,
+ .s_frame_interval = prp_s_frame_interval,
+ .s_stream = prp_s_stream,
+};
+
+static const struct media_entity_operations prp_entity_ops = {
+ .link_setup = prp_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_ops prp_subdev_ops = {
+ .video = &prp_video_ops,
+ .pad = &prp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops prp_internal_ops = {
+ .registered = prp_registered,
+ .unregistered = prp_unregistered,
+};
+
+static int prp_init(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv;
+ int i, ret;
+
+ priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ic_priv->task_priv = priv;
+ priv->ic_priv = ic_priv;
+
+ spin_lock_init(&priv->irqlock);
+ timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
+
+ mutex_init(&priv->lock);
+
+ for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&ic_priv->sd.entity, PRPENCVF_NUM_PADS,
+ priv->pad);
+ if (ret)
+ mutex_destroy(&priv->lock);
+
+ return ret;
+}
+
+static void prp_remove(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv = ic_priv->task_priv;
+
+ mutex_destroy(&priv->lock);
+}
+
+struct imx_ic_ops imx_ic_prpencvf_ops = {
+ .subdev_ops = &prp_subdev_ops,
+ .internal_ops = &prp_internal_ops,
+ .entity_ops = &prp_entity_ops,
+ .init = prp_init,
+ .remove = prp_remove,
+};
diff --git a/drivers/staging/media/imx/imx-ic.h b/drivers/staging/media/imx/imx-ic.h
new file mode 100644
index 000000000..587c191c3
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * V4L2 Image Converter Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#ifndef _IMX_IC_H
+#define _IMX_IC_H
+
+#include <media/v4l2-subdev.h>
+
+struct imx_ic_priv {
+ struct device *ipu_dev;
+ struct ipu_soc *ipu;
+ struct v4l2_subdev sd;
+ int task_id;
+ void *task_priv;
+};
+
+struct imx_ic_ops {
+ const struct v4l2_subdev_ops *subdev_ops;
+ const struct v4l2_subdev_internal_ops *internal_ops;
+ const struct media_entity_operations *entity_ops;
+
+ int (*init)(struct imx_ic_priv *ic_priv);
+ void (*remove)(struct imx_ic_priv *ic_priv);
+};
+
+extern struct imx_ic_ops imx_ic_prp_ops;
+extern struct imx_ic_ops imx_ic_prpencvf_ops;
+
+#endif
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
new file mode 100644
index 000000000..5cc67786b
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Video Capture Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2012-2016 Mentor Graphics Inc.
+ */
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <video/imx-ipu-v3.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+#define IMX_CAPTURE_NAME "imx-capture"
+
+struct capture_priv {
+ struct imx_media_dev *md; /* Media device */
+ struct device *dev; /* Physical device */
+
+ struct imx_media_video_dev vdev; /* Video device */
+ struct media_pad vdev_pad; /* Video device pad */
+
+ struct v4l2_subdev *src_sd; /* Source subdev */
+ int src_sd_pad; /* Source subdev pad */
+
+ struct mutex mutex; /* Protect vdev operations */
+
+ struct vb2_queue q; /* The videobuf2 queue */
+ struct list_head ready_q; /* List of queued buffers */
+ spinlock_t q_lock; /* Protect ready_q */
+
+ struct v4l2_ctrl_handler ctrl_hdlr; /* Controls inherited from subdevs */
+
+ bool legacy_api; /* Use the legacy (pre-MC) API */
+};
+
+#define to_capture_priv(v) container_of(v, struct capture_priv, vdev)
+
+/* In bytes, per queue */
+#define VID_MEM_LIMIT SZ_64M
+
+/* -----------------------------------------------------------------------------
+ * MC-Centric Video IOCTLs
+ */
+
+static const struct imx_media_pixfmt *capture_find_format(u32 code, u32 fourcc)
+{
+ const struct imx_media_pixfmt *cc;
+
+ cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
+ if (cc) {
+ enum imx_pixfmt_sel fmt_sel = cc->cs == IPUV3_COLORSPACE_YUV
+ ? PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
+
+ cc = imx_media_find_pixel_format(fourcc, fmt_sel);
+ if (!cc) {
+ imx_media_enum_pixel_formats(&fourcc, 0, fmt_sel, 0);
+ cc = imx_media_find_pixel_format(fourcc, fmt_sel);
+ }
+
+ return cc;
+ }
+
+ return imx_media_find_mbus_format(code, PIXFMT_SEL_ANY);
+}
+
+static int capture_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ strscpy(cap->driver, IMX_CAPTURE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX_CAPTURE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev_name(priv->dev));
+
+ return 0;
+}
+
+static int capture_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ return imx_media_enum_pixel_formats(&f->pixelformat, f->index,
+ PIXFMT_SEL_ANY, f->mbus_code);
+}
+
+static int capture_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct imx_media_pixfmt *cc;
+
+ if (fsize->index > 0)
+ return -EINVAL;
+
+ cc = imx_media_find_pixel_format(fsize->pixel_format, PIXFMT_SEL_ANY);
+ if (!cc)
+ return -EINVAL;
+
+ /*
+ * TODO: The constraints are hardware-specific and may depend on the
+ * pixel format. This should come from the driver using
+ * imx_media_capture.
+ */
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = 1;
+ fsize->stepwise.max_width = 65535;
+ fsize->stepwise.min_height = 1;
+ fsize->stepwise.max_height = 65535;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int capture_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ f->fmt.pix = priv->vdev.fmt;
+
+ return 0;
+}
+
+static const struct imx_media_pixfmt *
+__capture_try_fmt(struct v4l2_pix_format *pixfmt, struct v4l2_rect *compose)
+{
+ struct v4l2_mbus_framefmt fmt_src;
+ const struct imx_media_pixfmt *cc;
+
+ /*
+ * Find the pixel format, default to the first supported format if not
+ * found.
+ */
+ cc = imx_media_find_pixel_format(pixfmt->pixelformat, PIXFMT_SEL_ANY);
+ if (!cc) {
+ imx_media_enum_pixel_formats(&pixfmt->pixelformat, 0,
+ PIXFMT_SEL_ANY, 0);
+ cc = imx_media_find_pixel_format(pixfmt->pixelformat,
+ PIXFMT_SEL_ANY);
+ }
+
+ /* Allow IDMAC interweave but enforce field order from source. */
+ if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) {
+ switch (pixfmt->field) {
+ case V4L2_FIELD_SEQ_TB:
+ pixfmt->field = V4L2_FIELD_INTERLACED_TB;
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ pixfmt->field = V4L2_FIELD_INTERLACED_BT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ v4l2_fill_mbus_format(&fmt_src, pixfmt, 0);
+ imx_media_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src, cc);
+
+ if (compose) {
+ compose->width = fmt_src.width;
+ compose->height = fmt_src.height;
+ }
+
+ return cc;
+}
+
+static int capture_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ __capture_try_fmt(&f->fmt.pix, NULL);
+ return 0;
+}
+
+static int capture_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc;
+
+ if (vb2_is_busy(&priv->q)) {
+ dev_err(priv->dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ cc = __capture_try_fmt(&f->fmt.pix, &priv->vdev.compose);
+
+ priv->vdev.cc = cc;
+ priv->vdev.fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int capture_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* The compose rectangle is fixed to the source format. */
+ s->r = priv->vdev.compose;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ /*
+ * The hardware writes with a configurable but fixed DMA burst
+ * size. If the source format width is not burst size aligned,
+ * the written frame contains padding to the right.
+ */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = priv->vdev.fmt.width;
+ s->r.height = priv->vdev.fmt.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int capture_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ioctl_ops capture_ioctl_ops = {
+ .vidioc_querycap = capture_querycap,
+
+ .vidioc_enum_fmt_vid_cap = capture_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = capture_enum_framesizes,
+
+ .vidioc_g_fmt_vid_cap = capture_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = capture_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = capture_s_fmt_vid_cap,
+
+ .vidioc_g_selection = capture_g_selection,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_subscribe_event = capture_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * Legacy Video IOCTLs
+ */
+
+static int capture_legacy_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .pad = priv->src_sd_pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ cc = imx_media_find_pixel_format(fsize->pixel_format, PIXFMT_SEL_ANY);
+ if (!cc)
+ return -EINVAL;
+
+ fse.code = cc->codes ? cc->codes[0] : 0;
+
+ ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ if (fse.min_width == fse.max_width &&
+ fse.min_height == fse.max_height) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.min_width;
+ fsize->discrete.height = fse.min_height;
+ } else {
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = fse.min_width;
+ fsize->stepwise.max_width = fse.max_width;
+ fsize->stepwise.min_height = fse.min_height;
+ fsize->stepwise.max_height = fse.max_height;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+ }
+
+ return 0;
+}
+
+static int capture_legacy_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .pad = priv->src_sd_pad,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ cc = imx_media_find_pixel_format(fival->pixel_format, PIXFMT_SEL_ANY);
+ if (!cc)
+ return -EINVAL;
+
+ fie.code = cc->codes ? cc->codes[0] : 0;
+
+ ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_interval,
+ NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static int capture_legacy_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc_src;
+ struct v4l2_subdev_format fmt_src;
+ u32 fourcc;
+ int ret;
+
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret) {
+ dev_err(priv->dev, "failed to get src_sd format\n");
+ return ret;
+ }
+
+ cc_src = imx_media_find_ipu_format(fmt_src.format.code,
+ PIXFMT_SEL_YUV_RGB);
+ if (cc_src) {
+ enum imx_pixfmt_sel fmt_sel =
+ (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
+
+ ret = imx_media_enum_pixel_formats(&fourcc, f->index, fmt_sel,
+ 0);
+ if (ret)
+ return ret;
+ } else {
+ cc_src = imx_media_find_mbus_format(fmt_src.format.code,
+ PIXFMT_SEL_ANY);
+ if (WARN_ON(!cc_src))
+ return -EINVAL;
+
+ if (f->index != 0)
+ return -EINVAL;
+ fourcc = cc_src->fourcc;
+ }
+
+ f->pixelformat = fourcc;
+
+ return 0;
+}
+
+static const struct imx_media_pixfmt *
+__capture_legacy_try_fmt(struct capture_priv *priv,
+ struct v4l2_subdev_format *fmt_src,
+ struct v4l2_pix_format *pixfmt)
+{
+ const struct imx_media_pixfmt *cc;
+
+ cc = capture_find_format(fmt_src->format.code, pixfmt->pixelformat);
+ if (WARN_ON(!cc))
+ return NULL;
+
+ /* allow IDMAC interweave but enforce field order from source */
+ if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) {
+ switch (fmt_src->format.field) {
+ case V4L2_FIELD_SEQ_TB:
+ fmt_src->format.field = V4L2_FIELD_INTERLACED_TB;
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ fmt_src->format.field = V4L2_FIELD_INTERLACED_BT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ imx_media_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src->format, cc);
+
+ return cc;
+}
+
+static int capture_legacy_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_format fmt_src;
+ int ret;
+
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ if (!__capture_legacy_try_fmt(priv, &fmt_src, &f->fmt.pix))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int capture_legacy_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_format fmt_src;
+ const struct imx_media_pixfmt *cc;
+ int ret;
+
+ if (vb2_is_busy(&priv->q)) {
+ dev_err(priv->dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ cc = __capture_legacy_try_fmt(priv, &fmt_src, &f->fmt.pix);
+ if (!cc)
+ return -EINVAL;
+
+ priv->vdev.cc = cc;
+ priv->vdev.fmt = f->fmt.pix;
+ priv->vdev.compose.width = fmt_src.format.width;
+ priv->vdev.compose.height = fmt_src.format.height;
+
+ return 0;
+}
+
+static int capture_legacy_querystd(struct file *file, void *fh,
+ v4l2_std_id *std)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ return v4l2_subdev_call(priv->src_sd, video, querystd, std);
+}
+
+static int capture_legacy_g_std(struct file *file, void *fh, v4l2_std_id *std)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ return v4l2_subdev_call(priv->src_sd, video, g_std, std);
+}
+
+static int capture_legacy_s_std(struct file *file, void *fh, v4l2_std_id std)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ if (vb2_is_busy(&priv->q))
+ return -EBUSY;
+
+ return v4l2_subdev_call(priv->src_sd, video, s_std, std);
+}
+
+static int capture_legacy_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_frame_interval fi = {
+ .pad = priv->src_sd_pad,
+ };
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(priv->src_sd, video, g_frame_interval, &fi);
+ if (ret < 0)
+ return ret;
+
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.capture.timeperframe = fi.interval;
+
+ return 0;
+}
+
+static int capture_legacy_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_frame_interval fi = {
+ .pad = priv->src_sd_pad,
+ };
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ fi.interval = a->parm.capture.timeperframe;
+ ret = v4l2_subdev_call(priv->src_sd, video, s_frame_interval, &fi);
+ if (ret < 0)
+ return ret;
+
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.capture.timeperframe = fi.interval;
+
+ return 0;
+}
+
+static int capture_legacy_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ioctl_ops capture_legacy_ioctl_ops = {
+ .vidioc_querycap = capture_querycap,
+
+ .vidioc_enum_framesizes = capture_legacy_enum_framesizes,
+ .vidioc_enum_frameintervals = capture_legacy_enum_frameintervals,
+
+ .vidioc_enum_fmt_vid_cap = capture_legacy_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = capture_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = capture_legacy_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = capture_legacy_s_fmt_vid_cap,
+
+ .vidioc_querystd = capture_legacy_querystd,
+ .vidioc_g_std = capture_legacy_g_std,
+ .vidioc_s_std = capture_legacy_s_std,
+
+ .vidioc_g_selection = capture_g_selection,
+
+ .vidioc_g_parm = capture_legacy_g_parm,
+ .vidioc_s_parm = capture_legacy_s_parm,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_subscribe_event = capture_legacy_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * Queue Operations
+ */
+
+static int capture_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &priv->vdev.fmt;
+ unsigned int count = *nbuffers;
+
+ if (vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (*nplanes != 1 || sizes[0] < pix->sizeimage)
+ return -EINVAL;
+ count += vq->num_buffers;
+ }
+
+ count = min_t(__u32, VID_MEM_LIMIT / pix->sizeimage, count);
+
+ if (*nplanes)
+ *nbuffers = (count < vq->num_buffers) ? 0 :
+ count - vq->num_buffers;
+ else
+ *nbuffers = count;
+
+ *nplanes = 1;
+ sizes[0] = pix->sizeimage;
+
+ return 0;
+}
+
+static int capture_buf_init(struct vb2_buffer *vb)
+{
+ struct imx_media_buffer *buf = to_imx_media_vb(vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int capture_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &priv->vdev.fmt;
+
+ if (vb2_plane_size(vb, 0) < pix->sizeimage) {
+ dev_err(priv->dev,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), (long)pix->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, pix->sizeimage);
+
+ return 0;
+}
+
+static void capture_buf_queue(struct vb2_buffer *vb)
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vb->vb2_queue);
+ struct imx_media_buffer *buf = to_imx_media_vb(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+
+ list_add_tail(&buf->list, &priv->ready_q);
+
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+}
+
+static int capture_validate_fmt(struct capture_priv *priv)
+{
+ struct v4l2_subdev_format fmt_src;
+ const struct imx_media_pixfmt *cc;
+ int ret;
+
+ /* Retrieve the media bus format on the source subdev. */
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ /*
+ * Verify that the media bus size matches the size set on the video
+ * node. It is sufficient to check the compose rectangle size without
+ * checking the rounded size from vdev.fmt, as the rounded size is
+ * derived directly from the compose rectangle size, and will thus
+ * always match if the compose rectangle matches.
+ */
+ if (priv->vdev.compose.width != fmt_src.format.width ||
+ priv->vdev.compose.height != fmt_src.format.height)
+ return -EPIPE;
+
+ /*
+ * Verify that the media bus code is compatible with the pixel format
+ * set on the video node.
+ */
+ cc = capture_find_format(fmt_src.format.code, 0);
+ if (!cc || priv->vdev.cc->cs != cc->cs)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct imx_media_buffer *buf, *tmp;
+ unsigned long flags;
+ int ret;
+
+ ret = capture_validate_fmt(priv);
+ if (ret) {
+ dev_err(priv->dev, "capture format not valid\n");
+ goto return_bufs;
+ }
+
+ ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
+ true);
+ if (ret) {
+ dev_err(priv->dev, "pipeline start failed with %d\n", ret);
+ goto return_bufs;
+ }
+
+ return 0;
+
+return_bufs:
+ spin_lock_irqsave(&priv->q_lock, flags);
+ list_for_each_entry_safe(buf, tmp, &priv->ready_q, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+ return ret;
+}
+
+static void capture_stop_streaming(struct vb2_queue *vq)
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct imx_media_buffer *frame;
+ struct imx_media_buffer *tmp;
+ unsigned long flags;
+ int ret;
+
+ ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
+ false);
+ if (ret)
+ dev_warn(priv->dev, "pipeline stop failed with %d\n", ret);
+
+ /* release all active buffers */
+ spin_lock_irqsave(&priv->q_lock, flags);
+ list_for_each_entry_safe(frame, tmp, &priv->ready_q, list) {
+ list_del(&frame->list);
+ vb2_buffer_done(&frame->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+}
+
+static const struct vb2_ops capture_qops = {
+ .queue_setup = capture_queue_setup,
+ .buf_init = capture_buf_init,
+ .buf_prepare = capture_buf_prepare,
+ .buf_queue = capture_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = capture_start_streaming,
+ .stop_streaming = capture_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * File Operations
+ */
+
+static int capture_open(struct file *file)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct video_device *vfd = priv->vdev.vfd;
+ int ret;
+
+ if (mutex_lock_interruptible(&priv->mutex))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ dev_err(priv->dev, "v4l2_fh_open failed\n");
+ goto out;
+ }
+
+ ret = v4l2_pipeline_pm_get(&vfd->entity);
+ if (ret)
+ v4l2_fh_release(file);
+
+out:
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+static int capture_release(struct file *file)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct video_device *vfd = priv->vdev.vfd;
+ struct vb2_queue *vq = &priv->q;
+
+ mutex_lock(&priv->mutex);
+
+ if (file->private_data == vq->owner) {
+ vb2_queue_release(vq);
+ vq->owner = NULL;
+ }
+
+ v4l2_pipeline_pm_put(&vfd->entity);
+
+ v4l2_fh_release(file);
+ mutex_unlock(&priv->mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations capture_fops = {
+ .owner = THIS_MODULE,
+ .open = capture_open,
+ .release = capture_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * Public API
+ */
+
+struct imx_media_buffer *
+imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct imx_media_buffer *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+
+ /* get next queued buffer */
+ if (!list_empty(&priv->ready_q)) {
+ buf = list_entry(priv->ready_q.next, struct imx_media_buffer,
+ list);
+ list_del(&buf->list);
+ }
+
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_next_buf);
+
+void imx_media_capture_device_error(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct vb2_queue *vq = &priv->q;
+ unsigned long flags;
+
+ if (!vb2_is_streaming(vq))
+ return;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+ vb2_queue_error(vq);
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_error);
+
+static int capture_init_format(struct capture_priv *priv)
+{
+ struct v4l2_subdev_format fmt_src = {
+ .pad = priv->src_sd_pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct imx_media_video_dev *vdev = &priv->vdev;
+ int ret;
+
+ if (priv->legacy_api) {
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL,
+ &fmt_src);
+ if (ret) {
+ dev_err(priv->dev, "failed to get source format\n");
+ return ret;
+ }
+ } else {
+ fmt_src.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt_src.format.width = IMX_MEDIA_DEF_PIX_WIDTH;
+ fmt_src.format.height = IMX_MEDIA_DEF_PIX_HEIGHT;
+ }
+
+ imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt, &fmt_src.format, NULL);
+ vdev->compose.width = fmt_src.format.width;
+ vdev->compose.height = fmt_src.format.height;
+
+ vdev->cc = imx_media_find_pixel_format(vdev->fmt.pixelformat,
+ PIXFMT_SEL_ANY);
+
+ return 0;
+}
+
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev,
+ u32 link_flags)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct v4l2_subdev *sd = priv->src_sd;
+ struct v4l2_device *v4l2_dev = sd->v4l2_dev;
+ struct video_device *vfd = vdev->vfd;
+ int ret;
+
+ /* get media device */
+ priv->md = container_of(v4l2_dev->mdev, struct imx_media_dev, md);
+
+ vfd->v4l2_dev = v4l2_dev;
+
+ /* Initialize the default format and compose rectangle. */
+ ret = capture_init_format(priv);
+ if (ret < 0)
+ return ret;
+
+ /* Register the video device. */
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(priv->dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ dev_info(priv->dev, "Registered %s as /dev/%s\n", vfd->name,
+ video_device_node_name(vfd));
+
+ /* Create the link from the src_sd devnode pad to device node. */
+ if (link_flags & MEDIA_LNK_FL_IMMUTABLE)
+ link_flags |= MEDIA_LNK_FL_ENABLED;
+ ret = media_create_pad_link(&sd->entity, priv->src_sd_pad,
+ &vfd->entity, 0, link_flags);
+ if (ret) {
+ dev_err(priv->dev, "failed to create link to device node\n");
+ video_unregister_device(vfd);
+ return ret;
+ }
+
+ /* Add vdev to the video devices list. */
+ imx_media_add_video_device(priv->md, vdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_register);
+
+void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct video_device *vfd = priv->vdev.vfd;
+
+ media_entity_cleanup(&vfd->entity);
+ video_unregister_device(vfd);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_unregister);
+
+struct imx_media_video_dev *
+imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
+ int pad, bool legacy_api)
+{
+ struct capture_priv *priv;
+ struct video_device *vfd;
+ struct vb2_queue *vq;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->src_sd = src_sd;
+ priv->src_sd_pad = pad;
+ priv->dev = dev;
+ priv->legacy_api = legacy_api;
+
+ mutex_init(&priv->mutex);
+ INIT_LIST_HEAD(&priv->ready_q);
+ spin_lock_init(&priv->q_lock);
+
+ /* Allocate and initialize the video device. */
+ vfd = video_device_alloc();
+ if (!vfd)
+ return ERR_PTR(-ENOMEM);
+
+ vfd->fops = &capture_fops;
+ vfd->ioctl_ops = legacy_api ? &capture_legacy_ioctl_ops
+ : &capture_ioctl_ops;
+ vfd->minor = -1;
+ vfd->release = video_device_release;
+ vfd->vfl_dir = VFL_DIR_RX;
+ vfd->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
+ vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
+ | (!legacy_api ? V4L2_CAP_IO_MC : 0);
+ vfd->lock = &priv->mutex;
+ vfd->queue = &priv->q;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s capture", src_sd->name);
+
+ video_set_drvdata(vfd, priv);
+ priv->vdev.vfd = vfd;
+ INIT_LIST_HEAD(&priv->vdev.list);
+
+ /* Initialize the video device pad. */
+ priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
+ if (ret) {
+ video_device_release(vfd);
+ return ERR_PTR(ret);
+ }
+
+ /* Initialize the vb2 queue. */
+ vq = &priv->q;
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vq->drv_priv = priv;
+ vq->buf_struct_size = sizeof(struct imx_media_buffer);
+ vq->ops = &capture_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->lock = &priv->mutex;
+ vq->min_buffers_needed = 2;
+ vq->dev = priv->dev;
+
+ ret = vb2_queue_init(vq);
+ if (ret) {
+ dev_err(priv->dev, "vb2_queue_init failed\n");
+ video_device_release(vfd);
+ return ERR_PTR(ret);
+ }
+
+ if (legacy_api) {
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
+ vfd->ctrl_handler = &priv->ctrl_hdlr;
+ }
+
+ return &priv->vdev;
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_init);
+
+void imx_media_capture_device_remove(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_remove);
+
+MODULE_DESCRIPTION("i.MX5/6 v4l2 video capture interface driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
new file mode 100644
index 000000000..1fd39a2fc
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -0,0 +1,924 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * i.MX IPUv3 IC PP mem2mem CSC/Scaler driver
+ *
+ * Copyright (C) 2011 Pengutronix, Sascha Hauer
+ * Copyright (C) 2018 Pengutronix, Philipp Zabel
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <video/imx-ipu-v3.h>
+#include <video/imx-ipu-image-convert.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "imx-media.h"
+
+#define fh_to_ctx(__fh) container_of(__fh, struct ipu_csc_scaler_ctx, fh)
+
+#define IMX_CSC_SCALER_NAME "imx-csc-scaler"
+
+enum {
+ V4L2_M2M_SRC = 0,
+ V4L2_M2M_DST = 1,
+};
+
+struct ipu_csc_scaler_priv {
+ struct imx_media_video_dev vdev;
+
+ struct v4l2_m2m_dev *m2m_dev;
+ struct device *dev;
+
+ struct imx_media_dev *md;
+
+ struct mutex mutex; /* mem2mem device mutex */
+};
+
+#define vdev_to_priv(v) container_of(v, struct ipu_csc_scaler_priv, vdev)
+
+/* Per-queue, driver-specific private data */
+struct ipu_csc_scaler_q_data {
+ struct v4l2_pix_format cur_fmt;
+ struct v4l2_rect rect;
+};
+
+struct ipu_csc_scaler_ctx {
+ struct ipu_csc_scaler_priv *priv;
+
+ struct v4l2_fh fh;
+ struct ipu_csc_scaler_q_data q_data[2];
+ struct ipu_image_convert_ctx *icc;
+
+ struct v4l2_ctrl_handler ctrl_hdlr;
+ int rotate;
+ bool hflip;
+ bool vflip;
+ enum ipu_rotate_mode rot_mode;
+ unsigned int sequence;
+};
+
+static struct ipu_csc_scaler_q_data *get_q_data(struct ipu_csc_scaler_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->q_data[V4L2_M2M_SRC];
+ else
+ return &ctx->q_data[V4L2_M2M_DST];
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+static void job_abort(void *_ctx)
+{
+ struct ipu_csc_scaler_ctx *ctx = _ctx;
+
+ if (ctx->icc)
+ ipu_image_convert_abort(ctx->icc);
+}
+
+static void ipu_ic_pp_complete(struct ipu_image_convert_run *run, void *_ctx)
+{
+ struct ipu_csc_scaler_ctx *ctx = _ctx;
+ struct ipu_csc_scaler_priv *priv = ctx->priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
+
+ src_buf->sequence = ctx->sequence++;
+ dst_buf->sequence = src_buf->sequence;
+
+ v4l2_m2m_buf_done(src_buf, run->status ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, run->status ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
+ kfree(run);
+}
+
+static void device_run(void *_ctx)
+{
+ struct ipu_csc_scaler_ctx *ctx = _ctx;
+ struct ipu_csc_scaler_priv *priv = ctx->priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct ipu_image_convert_run *run;
+ int ret;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ run = kzalloc(sizeof(*run), GFP_KERNEL);
+ if (!run)
+ goto err;
+
+ run->ctx = ctx->icc;
+ run->in_phys = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ run->out_phys = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ ret = ipu_image_convert_queue(run);
+ if (ret < 0) {
+ v4l2_err(ctx->priv->vdev.vfd->v4l2_dev,
+ "%s: failed to queue: %d\n", __func__, ret);
+ goto err;
+ }
+
+ return;
+
+err:
+ v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+/*
+ * Video ioctls
+ */
+static int ipu_csc_scaler_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, IMX_CSC_SCALER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX_CSC_SCALER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", IMX_CSC_SCALER_NAME);
+
+ return 0;
+}
+
+static int ipu_csc_scaler_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ u32 fourcc;
+ int ret;
+
+ ret = imx_media_enum_pixel_formats(&fourcc, f->index,
+ PIXFMT_SEL_YUV_RGB, 0);
+ if (ret)
+ return ret;
+
+ f->pixelformat = fourcc;
+
+ return 0;
+}
+
+static int ipu_csc_scaler_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_q_data *q_data;
+
+ q_data = get_q_data(ctx, f->type);
+
+ f->fmt.pix = q_data->cur_fmt;
+
+ return 0;
+}
+
+static int ipu_csc_scaler_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_q_data *q_data = get_q_data(ctx, f->type);
+ struct ipu_image test_in, test_out;
+ enum v4l2_field field;
+
+ field = f->fmt.pix.field;
+ if (field == V4L2_FIELD_ANY)
+ field = V4L2_FIELD_NONE;
+ else if (field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ struct ipu_csc_scaler_q_data *q_data_in =
+ get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ test_out.pix = f->fmt.pix;
+ test_in.pix = q_data_in->cur_fmt;
+ } else {
+ struct ipu_csc_scaler_q_data *q_data_out =
+ get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ test_in.pix = f->fmt.pix;
+ test_out.pix = q_data_out->cur_fmt;
+ }
+
+ ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
+
+ f->fmt.pix = (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+ test_out.pix : test_in.pix;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ f->fmt.pix.colorspace = q_data->cur_fmt.colorspace;
+ f->fmt.pix.ycbcr_enc = q_data->cur_fmt.ycbcr_enc;
+ f->fmt.pix.xfer_func = q_data->cur_fmt.xfer_func;
+ f->fmt.pix.quantization = q_data->cur_fmt.quantization;
+ } else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT) {
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ return 0;
+}
+
+static int ipu_csc_scaler_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ipu_csc_scaler_q_data *q_data;
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq)) {
+ v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: queue busy\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ q_data = get_q_data(ctx, f->type);
+
+ ret = ipu_csc_scaler_try_fmt(file, priv, f);
+ if (ret < 0)
+ return ret;
+
+ q_data->cur_fmt.width = f->fmt.pix.width;
+ q_data->cur_fmt.height = f->fmt.pix.height;
+ q_data->cur_fmt.pixelformat = f->fmt.pix.pixelformat;
+ q_data->cur_fmt.field = f->fmt.pix.field;
+ q_data->cur_fmt.bytesperline = f->fmt.pix.bytesperline;
+ q_data->cur_fmt.sizeimage = f->fmt.pix.sizeimage;
+
+ /* Reset cropping/composing rectangle */
+ q_data->rect.left = 0;
+ q_data->rect.top = 0;
+ q_data->rect.width = q_data->cur_fmt.width;
+ q_data->rect.height = q_data->cur_fmt.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /* Set colorimetry on the output queue */
+ q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
+ q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
+ q_data->cur_fmt.quantization = f->fmt.pix.quantization;
+ /* Propagate colorimetry to the capture queue */
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
+ q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
+ q_data->cur_fmt.quantization = f->fmt.pix.quantization;
+ }
+
+ /*
+ * TODO: Setting colorimetry on the capture queue is currently not
+ * supported by the V4L2 API
+ */
+
+ return 0;
+}
+
+static int ipu_csc_scaler_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_q_data *q_data;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (s->target == V4L2_SEL_TGT_CROP ||
+ s->target == V4L2_SEL_TGT_COMPOSE) {
+ s->r = q_data->rect;
+ } else {
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = q_data->cur_fmt.width;
+ s->r.height = q_data->cur_fmt.height;
+ }
+
+ return 0;
+}
+
+static int ipu_csc_scaler_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
+ struct ipu_csc_scaler_q_data *q_data;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, s->type);
+
+ /* The input's frame width to the IC must be a multiple of 8 pixels
+ * When performing resizing the frame width must be multiple of burst
+ * size - 8 or 16 pixels as defined by CB#_BURST_16 parameter.
+ */
+ if (s->flags & V4L2_SEL_FLAG_GE)
+ s->r.width = round_up(s->r.width, 8);
+ if (s->flags & V4L2_SEL_FLAG_LE)
+ s->r.width = round_down(s->r.width, 8);
+ s->r.width = clamp_t(unsigned int, s->r.width, 8,
+ round_down(q_data->cur_fmt.width, 8));
+ s->r.height = clamp_t(unsigned int, s->r.height, 1,
+ q_data->cur_fmt.height);
+ s->r.left = clamp_t(unsigned int, s->r.left, 0,
+ q_data->cur_fmt.width - s->r.width);
+ s->r.top = clamp_t(unsigned int, s->r.top, 0,
+ q_data->cur_fmt.height - s->r.height);
+
+ /* V4L2_SEL_FLAG_KEEP_CONFIG is only valid for subdevices */
+ q_data->rect = s->r;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops ipu_csc_scaler_ioctl_ops = {
+ .vidioc_querycap = ipu_csc_scaler_querycap,
+
+ .vidioc_enum_fmt_vid_cap = ipu_csc_scaler_enum_fmt,
+ .vidioc_g_fmt_vid_cap = ipu_csc_scaler_g_fmt,
+ .vidioc_try_fmt_vid_cap = ipu_csc_scaler_try_fmt,
+ .vidioc_s_fmt_vid_cap = ipu_csc_scaler_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = ipu_csc_scaler_enum_fmt,
+ .vidioc_g_fmt_vid_out = ipu_csc_scaler_g_fmt,
+ .vidioc_try_fmt_vid_out = ipu_csc_scaler_try_fmt,
+ .vidioc_s_fmt_vid_out = ipu_csc_scaler_s_fmt,
+
+ .vidioc_g_selection = ipu_csc_scaler_g_selection,
+ .vidioc_s_selection = ipu_csc_scaler_s_selection,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+
+static int ipu_csc_scaler_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
+ struct ipu_csc_scaler_q_data *q_data;
+ unsigned int size, count = *nbuffers;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ size = q_data->cur_fmt.sizeimage;
+
+ *nbuffers = count;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ dev_dbg(ctx->priv->dev, "get %d buffer(s) of size %d each.\n",
+ count, size);
+
+ return 0;
+}
+
+static int ipu_csc_scaler_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
+ struct ipu_csc_scaler_q_data *q_data;
+ unsigned long size;
+
+ dev_dbg(ctx->priv->dev, "type: %d\n", vq->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_dbg(ctx->priv->dev, "%s: field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ q_data = get_q_data(ctx, vq->type);
+ size = q_data->cur_fmt.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_dbg(ctx->priv->dev,
+ "%s: data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void ipu_csc_scaler_buf_queue(struct vb2_buffer *vb)
+{
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static void ipu_image_from_q_data(struct ipu_image *im,
+ struct ipu_csc_scaler_q_data *q_data)
+{
+ struct v4l2_pix_format *fmt = &q_data->cur_fmt;
+
+ im->pix = *fmt;
+ if (fmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ if (fmt->quantization == V4L2_QUANTIZATION_DEFAULT)
+ im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ im->rect = q_data->rect;
+}
+
+static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ const enum ipu_ic_task ic_task = IC_TASK_POST_PROCESSOR;
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
+ struct ipu_csc_scaler_priv *priv = ctx->priv;
+ struct ipu_soc *ipu = priv->md->ipu[0];
+ struct ipu_csc_scaler_q_data *q_data;
+ struct vb2_queue *other_q;
+ struct ipu_image in, out;
+
+ other_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+ V4L2_BUF_TYPE_VIDEO_OUTPUT :
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (!vb2_is_streaming(other_q))
+ return 0;
+
+ if (ctx->icc) {
+ v4l2_warn(ctx->priv->vdev.vfd->v4l2_dev, "removing old ICC\n");
+ ipu_image_convert_unprepare(ctx->icc);
+ }
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ ipu_image_from_q_data(&in, q_data);
+
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ ipu_image_from_q_data(&out, q_data);
+
+ ctx->icc = ipu_image_convert_prepare(ipu, ic_task, &in, &out,
+ ctx->rot_mode,
+ ipu_ic_pp_complete, ctx);
+ if (IS_ERR(ctx->icc)) {
+ struct vb2_v4l2_buffer *buf;
+ int ret = PTR_ERR(ctx->icc);
+
+ ctx->icc = NULL;
+ v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: error %d\n",
+ __func__, ret);
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
+{
+ struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *buf;
+
+ if (ctx->icc) {
+ ipu_image_convert_unprepare(ctx->icc);
+ ctx->icc = NULL;
+ }
+
+ ctx->sequence = 0;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops ipu_csc_scaler_qops = {
+ .queue_setup = ipu_csc_scaler_queue_setup,
+ .buf_prepare = ipu_csc_scaler_buf_prepare,
+ .buf_queue = ipu_csc_scaler_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = ipu_csc_scaler_start_streaming,
+ .stop_streaming = ipu_csc_scaler_stop_streaming,
+};
+
+static int ipu_csc_scaler_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct ipu_csc_scaler_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &ipu_csc_scaler_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->priv->mutex;
+ src_vq->dev = ctx->priv->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &ipu_csc_scaler_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->priv->mutex;
+ dst_vq->dev = ctx->priv->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int ipu_csc_scaler_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ipu_csc_scaler_ctx *ctx = container_of(ctrl->handler,
+ struct ipu_csc_scaler_ctx,
+ ctrl_hdlr);
+ enum ipu_rotate_mode rot_mode;
+ int rotate;
+ bool hflip, vflip;
+ int ret = 0;
+
+ rotate = ctx->rotate;
+ hflip = ctx->hflip;
+ vflip = ctx->vflip;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ rotate = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = ipu_degrees_to_rot_mode(&rot_mode, rotate, hflip, vflip);
+ if (ret)
+ return ret;
+
+ if (rot_mode != ctx->rot_mode) {
+ struct v4l2_pix_format *in_fmt, *out_fmt;
+ struct ipu_image test_in, test_out;
+
+ in_fmt = &ctx->q_data[V4L2_M2M_SRC].cur_fmt;
+ out_fmt = &ctx->q_data[V4L2_M2M_DST].cur_fmt;
+
+ test_in.pix = *in_fmt;
+ test_out.pix = *out_fmt;
+
+ if (ipu_rot_mode_is_irt(rot_mode) !=
+ ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* Switch width & height to keep aspect ratio intact */
+ test_out.pix.width = out_fmt->height;
+ test_out.pix.height = out_fmt->width;
+ }
+
+ ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
+
+ /* Check if output format needs to be changed */
+ if (test_in.pix.width != in_fmt->width ||
+ test_in.pix.height != in_fmt->height ||
+ test_in.pix.bytesperline != in_fmt->bytesperline ||
+ test_in.pix.sizeimage != in_fmt->sizeimage) {
+ struct vb2_queue *out_q;
+
+ out_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (vb2_is_busy(out_q))
+ return -EBUSY;
+ }
+
+ /* Check if capture format needs to be changed */
+ if (test_out.pix.width != out_fmt->width ||
+ test_out.pix.height != out_fmt->height ||
+ test_out.pix.bytesperline != out_fmt->bytesperline ||
+ test_out.pix.sizeimage != out_fmt->sizeimage) {
+ struct vb2_queue *cap_q;
+
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vb2_is_busy(cap_q))
+ return -EBUSY;
+ }
+
+ *in_fmt = test_in.pix;
+ *out_fmt = test_out.pix;
+
+ ctx->rot_mode = rot_mode;
+ ctx->rotate = rotate;
+ ctx->hflip = hflip;
+ ctx->vflip = vflip;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ipu_csc_scaler_ctrl_ops = {
+ .s_ctrl = ipu_csc_scaler_s_ctrl,
+};
+
+static int ipu_csc_scaler_init_controls(struct ipu_csc_scaler_ctx *ctx)
+{
+ struct v4l2_ctrl_handler *hdlr = &ctx->ctrl_hdlr;
+
+ v4l2_ctrl_handler_init(hdlr, 3);
+
+ v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_ROTATE,
+ 0, 270, 90, 0);
+
+ if (hdlr->error) {
+ v4l2_ctrl_handler_free(hdlr);
+ return hdlr->error;
+ }
+
+ v4l2_ctrl_handler_setup(hdlr);
+ return 0;
+}
+
+#define DEFAULT_WIDTH 720
+#define DEFAULT_HEIGHT 576
+static const struct ipu_csc_scaler_q_data ipu_csc_scaler_q_data_default = {
+ .cur_fmt = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = DEFAULT_WIDTH,
+ .sizeimage = DEFAULT_WIDTH * DEFAULT_HEIGHT * 3 / 2,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ },
+ .rect = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ },
+};
+
+/*
+ * File operations
+ */
+static int ipu_csc_scaler_open(struct file *file)
+{
+ struct ipu_csc_scaler_priv *priv = video_drvdata(file);
+ struct ipu_csc_scaler_ctx *ctx = NULL;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->rot_mode = IPU_ROTATE_NONE;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ ctx->priv = priv;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(priv->m2m_dev, ctx,
+ &ipu_csc_scaler_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_ctx;
+ }
+
+ ret = ipu_csc_scaler_init_controls(ctx);
+ if (ret)
+ goto err_ctrls;
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdlr;
+
+ ctx->q_data[V4L2_M2M_SRC] = ipu_csc_scaler_q_data_default;
+ ctx->q_data[V4L2_M2M_DST] = ipu_csc_scaler_q_data_default;
+
+ dev_dbg(priv->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
+ ctx->fh.m2m_ctx);
+
+ return 0;
+
+err_ctrls:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+err_ctx:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int ipu_csc_scaler_release(struct file *file)
+{
+ struct ipu_csc_scaler_priv *priv = video_drvdata(file);
+ struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(file->private_data);
+
+ dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations ipu_csc_scaler_fops = {
+ .owner = THIS_MODULE,
+ .open = ipu_csc_scaler_open,
+ .release = ipu_csc_scaler_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+};
+
+static void ipu_csc_scaler_video_device_release(struct video_device *vdev)
+{
+ struct ipu_csc_scaler_priv *priv = video_get_drvdata(vdev);
+
+ v4l2_m2m_release(priv->m2m_dev);
+ video_device_release(vdev);
+ kfree(priv);
+}
+
+static const struct video_device ipu_csc_scaler_videodev_template = {
+ .name = "ipu_ic_pp csc/scaler",
+ .fops = &ipu_csc_scaler_fops,
+ .ioctl_ops = &ipu_csc_scaler_ioctl_ops,
+ .minor = -1,
+ .release = ipu_csc_scaler_video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev)
+{
+ struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+ struct video_device *vfd = vdev->vfd;
+ int ret;
+
+ vfd->v4l2_dev = &priv->md->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(vfd->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ v4l2_info(vfd->v4l2_dev, "Registered %s as /dev/%s\n", vfd->name,
+ video_device_node_name(vfd));
+
+ return 0;
+}
+
+void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
+{
+ struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+ struct video_device *vfd = priv->vdev.vfd;
+
+ video_unregister_device(vfd);
+}
+
+struct imx_media_video_dev *
+imx_media_csc_scaler_device_init(struct imx_media_dev *md)
+{
+ struct ipu_csc_scaler_priv *priv;
+ struct video_device *vfd;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->md = md;
+ priv->dev = md->md.dev;
+
+ mutex_init(&priv->mutex);
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ ret = -ENOMEM;
+ goto err_vfd;
+ }
+
+ *vfd = ipu_csc_scaler_videodev_template;
+ vfd->lock = &priv->mutex;
+ priv->vdev.vfd = vfd;
+
+ INIT_LIST_HEAD(&priv->vdev.list);
+
+ video_set_drvdata(vfd, priv);
+
+ priv->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(priv->m2m_dev)) {
+ ret = PTR_ERR(priv->m2m_dev);
+ v4l2_err(&md->v4l2_dev, "Failed to init mem2mem device: %d\n",
+ ret);
+ goto err_m2m;
+ }
+
+ return &priv->vdev;
+
+err_m2m:
+ video_set_drvdata(vfd, NULL);
+err_vfd:
+ kfree(priv);
+ return ERR_PTR(ret);
+}
+
+MODULE_DESCRIPTION("i.MX IPUv3 mem2mem scaler/CSC driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
new file mode 100644
index 000000000..b2b1f4dd4
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -0,0 +1,2089 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Capture CSI Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2014-2017 Mentor Graphics Inc.
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ */
+#include <linux/delay.h>
+#include <linux/gcd.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <video/imx-ipu-v3.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+/*
+ * Min/Max supported width and heights.
+ *
+ * We allow planar output, so we have to align width by 16 pixels
+ * to meet IDMAC alignment requirements.
+ *
+ * TODO: move this into pad format negotiation, if capture device
+ * has not requested planar formats, we should allow 8 pixel
+ * alignment.
+ */
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W 4096
+#define MAX_H 4096
+#define W_ALIGN 1 /* multiple of 2 pixels */
+#define H_ALIGN 1 /* multiple of 2 lines */
+#define S_ALIGN 1 /* multiple of 2 */
+
+/*
+ * struct csi_skip_desc - CSI frame skipping descriptor
+ * @keep - number of frames kept per max_ratio frames
+ * @max_ratio - width of skip_smfc, written to MAX_RATIO bitfield
+ * @skip_smfc - skip pattern written to the SKIP_SMFC bitfield
+ */
+struct csi_skip_desc {
+ u8 keep;
+ u8 max_ratio;
+ u8 skip_smfc;
+};
+
+struct csi_priv {
+ struct device *dev;
+ struct ipu_soc *ipu;
+ struct v4l2_subdev sd;
+ struct media_pad pad[CSI_NUM_PADS];
+ struct v4l2_async_notifier notifier;
+
+ /* the video device at IDMAC output pad */
+ struct imx_media_video_dev *vdev;
+ struct imx_media_fim *fim;
+ int csi_id;
+ int smfc_id;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ int active_output_pad;
+
+ struct ipuv3_channel *idmac_ch;
+ struct ipu_smfc *smfc;
+ struct ipu_csi *csi;
+
+ struct v4l2_mbus_framefmt format_mbus[CSI_NUM_PADS];
+ const struct imx_media_pixfmt *cc[CSI_NUM_PADS];
+ struct v4l2_fract frame_interval[CSI_NUM_PADS];
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ const struct csi_skip_desc *skip;
+
+ /* active vb2 buffers to send to video dev sink */
+ struct imx_media_buffer *active_vb2_buf[2];
+ struct imx_media_dma_buf underrun_buf;
+
+ int ipu_buf_num; /* ipu double buffer index: 0-1 */
+
+ /* the sink for the captured frames */
+ struct media_entity *sink;
+ enum ipu_csi_dest dest;
+ /* the source subdev */
+ struct v4l2_subdev *src_sd;
+
+ /* the mipi virtual channel number at link validate */
+ int vc_num;
+
+ /* the upstream endpoint CSI is receiving from */
+ struct v4l2_fwnode_endpoint upstream_ep;
+
+ spinlock_t irqlock; /* protect eof_irq handler */
+ struct timer_list eof_timeout_timer;
+ int eof_irq;
+ int nfb4eof_irq;
+
+ struct v4l2_ctrl_handler ctrl_hdlr;
+
+ int stream_count; /* streaming counter */
+ u32 frame_sequence; /* frame sequence counter */
+ bool last_eof; /* waiting for last EOF at stream off */
+ bool nfb4eof; /* NFB4EOF encountered during streaming */
+ bool interweave_swap; /* swap top/bottom lines when interweaving */
+ struct completion last_eof_comp;
+};
+
+static inline struct csi_priv *sd_to_dev(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csi_priv, sd);
+}
+
+static inline struct csi_priv *notifier_to_dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi_priv, notifier);
+}
+
+static inline bool is_parallel_bus(struct v4l2_fwnode_endpoint *ep)
+{
+ return ep->bus_type != V4L2_MBUS_CSI2_DPHY;
+}
+
+static inline bool is_parallel_16bit_bus(struct v4l2_fwnode_endpoint *ep)
+{
+ return is_parallel_bus(ep) && ep->bus.parallel.bus_width >= 16;
+}
+
+/*
+ * Check for conditions that require the IPU to handle the
+ * data internally as generic data, aka passthrough mode:
+ * - raw bayer media bus formats, or
+ * - BT.656 and BT.1120 (8/10-bit YUV422) data can always be processed
+ * on-the-fly
+ * - the CSI is receiving from a 16-bit parallel bus, or
+ * - the CSI is receiving from an 8-bit parallel bus and the incoming
+ * media bus format is other than UYVY8_2X8/YUYV8_2X8.
+ */
+static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
+ struct v4l2_mbus_framefmt *infmt,
+ const struct imx_media_pixfmt *incc)
+{
+ if (ep->bus_type == V4L2_MBUS_BT656) // including BT.1120
+ return false;
+
+ return incc->bayer || is_parallel_16bit_bus(ep) ||
+ (is_parallel_bus(ep) &&
+ infmt->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
+ infmt->code != MEDIA_BUS_FMT_YUYV8_2X8);
+}
+
+/*
+ * Parses the fwnode endpoint from the source pad of the entity
+ * connected to this CSI. This will either be the entity directly
+ * upstream from the CSI-2 receiver, directly upstream from the
+ * video mux, or directly upstream from the CSI itself. The endpoint
+ * is needed to determine the bus type and bus config coming into
+ * the CSI.
+ */
+static int csi_get_upstream_endpoint(struct csi_priv *priv,
+ struct v4l2_fwnode_endpoint *ep)
+{
+ struct fwnode_handle *endpoint;
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+
+ if (!IS_ENABLED(CONFIG_OF))
+ return -ENXIO;
+
+ if (!priv->src_sd)
+ return -EPIPE;
+
+ sd = priv->src_sd;
+
+ switch (sd->grp_id) {
+ case IMX_MEDIA_GRP_ID_CSI_MUX:
+ /*
+ * CSI is connected directly to CSI mux, skip up to
+ * CSI-2 receiver if it is in the path, otherwise stay
+ * with the CSI mux.
+ */
+ sd = imx_media_pipeline_subdev(&sd->entity,
+ IMX_MEDIA_GRP_ID_CSI2,
+ true);
+ if (IS_ERR(sd))
+ sd = priv->src_sd;
+ break;
+ case IMX_MEDIA_GRP_ID_CSI2:
+ break;
+ default:
+ /*
+ * the source is neither the CSI mux nor the CSI-2 receiver,
+ * get the source pad directly upstream from CSI itself.
+ */
+ sd = &priv->sd;
+ break;
+ }
+
+ /* get source pad of entity directly upstream from sd */
+ pad = imx_media_pipeline_pad(&sd->entity, 0, 0, true);
+ if (!pad)
+ return -ENODEV;
+
+ endpoint = imx_media_get_pad_fwnode(pad);
+ if (IS_ERR(endpoint))
+ return PTR_ERR(endpoint);
+
+ v4l2_fwnode_endpoint_parse(endpoint, ep);
+
+ fwnode_handle_put(endpoint);
+
+ return 0;
+}
+
+static void csi_idmac_put_ipu_resources(struct csi_priv *priv)
+{
+ if (priv->idmac_ch)
+ ipu_idmac_put(priv->idmac_ch);
+ priv->idmac_ch = NULL;
+
+ if (priv->smfc)
+ ipu_smfc_put(priv->smfc);
+ priv->smfc = NULL;
+}
+
+static int csi_idmac_get_ipu_resources(struct csi_priv *priv)
+{
+ int ch_num, ret;
+ struct ipu_smfc *smfc;
+ struct ipuv3_channel *idmac_ch;
+
+ ch_num = IPUV3_CHANNEL_CSI0 + priv->smfc_id;
+
+ smfc = ipu_smfc_get(priv->ipu, ch_num);
+ if (IS_ERR(smfc)) {
+ v4l2_err(&priv->sd, "failed to get SMFC\n");
+ ret = PTR_ERR(smfc);
+ goto out;
+ }
+ priv->smfc = smfc;
+
+ idmac_ch = ipu_idmac_get(priv->ipu, ch_num);
+ if (IS_ERR(idmac_ch)) {
+ v4l2_err(&priv->sd, "could not get IDMAC channel %u\n",
+ ch_num);
+ ret = PTR_ERR(idmac_ch);
+ goto out;
+ }
+ priv->idmac_ch = idmac_ch;
+
+ return 0;
+out:
+ csi_idmac_put_ipu_resources(priv);
+ return ret;
+}
+
+static void csi_vb2_buf_done(struct csi_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *done, *next;
+ struct vb2_buffer *vb;
+ dma_addr_t phys;
+
+ done = priv->active_vb2_buf[priv->ipu_buf_num];
+ if (done) {
+ done->vbuf.field = vdev->fmt.field;
+ done->vbuf.sequence = priv->frame_sequence;
+ vb = &done->vbuf.vb2_buf;
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, priv->nfb4eof ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ }
+
+ priv->frame_sequence++;
+ priv->nfb4eof = false;
+
+ /* get next queued buffer */
+ next = imx_media_capture_device_next_buf(vdev);
+ if (next) {
+ phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
+ priv->active_vb2_buf[priv->ipu_buf_num] = next;
+ } else {
+ phys = priv->underrun_buf.phys;
+ priv->active_vb2_buf[priv->ipu_buf_num] = NULL;
+ }
+
+ if (ipu_idmac_buffer_is_ready(priv->idmac_ch, priv->ipu_buf_num))
+ ipu_idmac_clear_buffer(priv->idmac_ch, priv->ipu_buf_num);
+
+ if (priv->interweave_swap)
+ phys += vdev->fmt.bytesperline;
+
+ ipu_cpmem_set_buffer(priv->idmac_ch, priv->ipu_buf_num, phys);
+}
+
+static irqreturn_t csi_idmac_eof_interrupt(int irq, void *dev_id)
+{
+ struct csi_priv *priv = dev_id;
+
+ spin_lock(&priv->irqlock);
+
+ if (priv->last_eof) {
+ complete(&priv->last_eof_comp);
+ priv->last_eof = false;
+ goto unlock;
+ }
+
+ if (priv->fim)
+ /* call frame interval monitor */
+ imx_media_fim_eof_monitor(priv->fim, ktime_get());
+
+ csi_vb2_buf_done(priv);
+
+ /* select new IPU buf */
+ ipu_idmac_select_buffer(priv->idmac_ch, priv->ipu_buf_num);
+ /* toggle IPU double-buffer index */
+ priv->ipu_buf_num ^= 1;
+
+ /* bump the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+unlock:
+ spin_unlock(&priv->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
+{
+ struct csi_priv *priv = dev_id;
+
+ spin_lock(&priv->irqlock);
+
+ /*
+ * this is not an unrecoverable error, just mark
+ * the next captured frame with vb2 error flag.
+ */
+ priv->nfb4eof = true;
+
+ v4l2_err(&priv->sd, "NFB4EOF\n");
+
+ spin_unlock(&priv->irqlock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * EOF timeout timer function. This is an unrecoverable condition
+ * without a stream restart.
+ */
+static void csi_idmac_eof_timeout(struct timer_list *t)
+{
+ struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
+ struct imx_media_video_dev *vdev = priv->vdev;
+
+ v4l2_err(&priv->sd, "EOF timeout\n");
+
+ /* signal a fatal error to capture device */
+ imx_media_capture_device_error(vdev);
+}
+
+static void csi_idmac_setup_vb2_buf(struct csi_priv *priv, dma_addr_t *phys)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *buf;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ buf = imx_media_capture_device_next_buf(vdev);
+ if (buf) {
+ priv->active_vb2_buf[i] = buf;
+ phys[i] = vb2_dma_contig_plane_dma_addr(
+ &buf->vbuf.vb2_buf, 0);
+ } else {
+ priv->active_vb2_buf[i] = NULL;
+ phys[i] = priv->underrun_buf.phys;
+ }
+ }
+}
+
+static void csi_idmac_unsetup_vb2_buf(struct csi_priv *priv,
+ enum vb2_buffer_state return_status)
+{
+ struct imx_media_buffer *buf;
+ int i;
+
+ /* return any remaining active frames with return_status */
+ for (i = 0; i < 2; i++) {
+ buf = priv->active_vb2_buf[i];
+ if (buf) {
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, return_status);
+ }
+ }
+}
+
+/* init the SMFC IDMAC channel */
+static int csi_idmac_setup_channel(struct csi_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_mbus_framefmt *outfmt;
+ bool passthrough, interweave;
+ struct ipu_image image;
+ u32 passthrough_bits;
+ u32 passthrough_cycles;
+ dma_addr_t phys[2];
+ u32 burst_size;
+ int ret;
+
+ infmt = &priv->format_mbus[CSI_SINK_PAD];
+ incc = priv->cc[CSI_SINK_PAD];
+ outfmt = &priv->format_mbus[CSI_SRC_PAD_IDMAC];
+
+ ipu_cpmem_zero(priv->idmac_ch);
+
+ memset(&image, 0, sizeof(image));
+ image.pix = vdev->fmt;
+ image.rect = vdev->compose;
+
+ csi_idmac_setup_vb2_buf(priv, phys);
+
+ image.phys0 = phys[0];
+ image.phys1 = phys[1];
+
+ passthrough = requires_passthrough(&priv->upstream_ep, infmt, incc);
+ passthrough_cycles = 1;
+
+ /*
+ * If the field type at capture interface is interlaced, and
+ * the output IDMAC pad is sequential, enable interweave at
+ * the IDMAC output channel.
+ */
+ interweave = V4L2_FIELD_IS_INTERLACED(image.pix.field) &&
+ V4L2_FIELD_IS_SEQUENTIAL(outfmt->field);
+ priv->interweave_swap = interweave &&
+ image.pix.field == V4L2_FIELD_INTERLACED_BT;
+
+ switch (image.pix.pixelformat) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_GREY:
+ burst_size = 16;
+ passthrough_bits = 8;
+ break;
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SGBRG16:
+ case V4L2_PIX_FMT_SGRBG16:
+ case V4L2_PIX_FMT_SRGGB16:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
+ burst_size = 8;
+ passthrough_bits = 16;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_NV12:
+ burst_size = (image.pix.width & 0x3f) ?
+ ((image.pix.width & 0x1f) ?
+ ((image.pix.width & 0xf) ? 8 : 16) : 32) : 64;
+ passthrough_bits = 16;
+ /*
+ * Skip writing U and V components to odd rows (but not
+ * when enabling IDMAC interweaving, they are incompatible).
+ */
+ if (!interweave)
+ ipu_cpmem_skip_odd_chroma_rows(priv->idmac_ch);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ burst_size = (image.pix.width & 0x1f) ?
+ ((image.pix.width & 0xf) ? 8 : 16) : 32;
+ passthrough_bits = 16;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ if (passthrough) {
+ burst_size = 16;
+ passthrough_bits = 8;
+ passthrough_cycles = incc->cycles;
+ break;
+ }
+ fallthrough; /* non-passthrough RGB565 (CSI-2 bus) */
+ default:
+ burst_size = (image.pix.width & 0xf) ? 8 : 16;
+ passthrough_bits = 16;
+ break;
+ }
+
+ if (passthrough) {
+ if (priv->interweave_swap) {
+ /* start interweave scan at 1st top line (2nd line) */
+ image.phys0 += image.pix.bytesperline;
+ image.phys1 += image.pix.bytesperline;
+ }
+
+ ipu_cpmem_set_resolution(priv->idmac_ch,
+ image.rect.width * passthrough_cycles,
+ image.rect.height);
+ ipu_cpmem_set_stride(priv->idmac_ch, image.pix.bytesperline);
+ ipu_cpmem_set_buffer(priv->idmac_ch, 0, image.phys0);
+ ipu_cpmem_set_buffer(priv->idmac_ch, 1, image.phys1);
+ ipu_cpmem_set_format_passthrough(priv->idmac_ch,
+ passthrough_bits);
+ } else {
+ if (priv->interweave_swap) {
+ /* start interweave scan at 1st top line (2nd line) */
+ image.rect.top = 1;
+ }
+
+ ret = ipu_cpmem_set_image(priv->idmac_ch, &image);
+ if (ret)
+ goto unsetup_vb2;
+ }
+
+ ipu_cpmem_set_burstsize(priv->idmac_ch, burst_size);
+
+ /*
+ * Set the channel for the direct CSI-->memory via SMFC
+ * use-case to very high priority, by enabling the watermark
+ * signal in the SMFC, enabling WM in the channel, and setting
+ * the channel priority to high.
+ *
+ * Refer to the i.mx6 rev. D TRM Table 36-8: Calculated priority
+ * value.
+ *
+ * The WM's are set very low by intention here to ensure that
+ * the SMFC FIFOs do not overflow.
+ */
+ ipu_smfc_set_watermark(priv->smfc, 0x02, 0x01);
+ ipu_cpmem_set_high_priority(priv->idmac_ch);
+ ipu_idmac_enable_watermark(priv->idmac_ch, true);
+ ipu_cpmem_set_axi_id(priv->idmac_ch, 0);
+
+ burst_size = passthrough ?
+ (burst_size >> 3) - 1 : (burst_size >> 2) - 1;
+
+ ipu_smfc_set_burstsize(priv->smfc, burst_size);
+
+ if (interweave)
+ ipu_cpmem_interlaced_scan(priv->idmac_ch,
+ priv->interweave_swap ?
+ -image.pix.bytesperline :
+ image.pix.bytesperline,
+ image.pix.pixelformat);
+
+ ipu_idmac_set_double_buffer(priv->idmac_ch, true);
+
+ return 0;
+
+unsetup_vb2:
+ csi_idmac_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void csi_idmac_unsetup(struct csi_priv *priv,
+ enum vb2_buffer_state state)
+{
+ ipu_idmac_disable_channel(priv->idmac_ch);
+ ipu_smfc_disable(priv->smfc);
+
+ csi_idmac_unsetup_vb2_buf(priv, state);
+}
+
+static int csi_idmac_setup(struct csi_priv *priv)
+{
+ int ret;
+
+ ret = csi_idmac_setup_channel(priv);
+ if (ret)
+ return ret;
+
+ ipu_cpmem_dump(priv->idmac_ch);
+ ipu_dump(priv->ipu);
+
+ ipu_smfc_enable(priv->smfc);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(priv->idmac_ch, 0);
+ ipu_idmac_select_buffer(priv->idmac_ch, 1);
+
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->idmac_ch);
+
+ return 0;
+}
+
+static int csi_idmac_start(struct csi_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ int ret;
+
+ ret = csi_idmac_get_ipu_resources(priv);
+ if (ret)
+ return ret;
+
+ ipu_smfc_map_channel(priv->smfc, priv->csi_id, priv->vc_num);
+
+ ret = imx_media_alloc_dma_buf(priv->dev, &priv->underrun_buf,
+ vdev->fmt.sizeimage);
+ if (ret)
+ goto out_put_ipu;
+
+ priv->ipu_buf_num = 0;
+
+ /* init EOF completion waitq */
+ init_completion(&priv->last_eof_comp);
+ priv->frame_sequence = 0;
+ priv->last_eof = false;
+ priv->nfb4eof = false;
+
+ ret = csi_idmac_setup(priv);
+ if (ret) {
+ v4l2_err(&priv->sd, "csi_idmac_setup failed: %d\n", ret);
+ goto out_free_dma_buf;
+ }
+
+ priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
+ priv->idmac_ch,
+ IPU_IRQ_NFB4EOF);
+ ret = devm_request_irq(priv->dev, priv->nfb4eof_irq,
+ csi_idmac_nfb4eof_interrupt, 0,
+ "imx-smfc-nfb4eof", priv);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "Error registering NFB4EOF irq: %d\n", ret);
+ goto out_unsetup;
+ }
+
+ priv->eof_irq = ipu_idmac_channel_irq(priv->ipu, priv->idmac_ch,
+ IPU_IRQ_EOF);
+
+ ret = devm_request_irq(priv->dev, priv->eof_irq,
+ csi_idmac_eof_interrupt, 0,
+ "imx-smfc-eof", priv);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "Error registering eof irq: %d\n", ret);
+ goto out_free_nfb4eof_irq;
+ }
+
+ /* start the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+ return 0;
+
+out_free_nfb4eof_irq:
+ devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
+out_unsetup:
+ csi_idmac_unsetup(priv, VB2_BUF_STATE_QUEUED);
+out_free_dma_buf:
+ imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
+out_put_ipu:
+ csi_idmac_put_ipu_resources(priv);
+ return ret;
+}
+
+static void csi_idmac_wait_last_eof(struct csi_priv *priv)
+{
+ unsigned long flags;
+ int ret;
+
+ /* mark next EOF interrupt as the last before stream off */
+ spin_lock_irqsave(&priv->irqlock, flags);
+ priv->last_eof = true;
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+
+ /*
+ * and then wait for interrupt handler to mark completion.
+ */
+ ret = wait_for_completion_timeout(
+ &priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(&priv->sd, "wait last EOF timeout\n");
+}
+
+static void csi_idmac_stop(struct csi_priv *priv)
+{
+ devm_free_irq(priv->dev, priv->eof_irq, priv);
+ devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
+
+ csi_idmac_unsetup(priv, VB2_BUF_STATE_ERROR);
+
+ imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
+
+ /* cancel the EOF timeout timer */
+ del_timer_sync(&priv->eof_timeout_timer);
+
+ csi_idmac_put_ipu_resources(priv);
+}
+
+/* Update the CSI whole sensor and active windows */
+static int csi_setup(struct csi_priv *priv)
+{
+ struct v4l2_mbus_framefmt *infmt, *outfmt;
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_config mbus_cfg;
+ struct v4l2_mbus_framefmt if_fmt;
+ struct v4l2_rect crop;
+
+ infmt = &priv->format_mbus[CSI_SINK_PAD];
+ incc = priv->cc[CSI_SINK_PAD];
+ outfmt = &priv->format_mbus[priv->active_output_pad];
+
+ /* compose mbus_config from the upstream endpoint */
+ mbus_cfg.type = priv->upstream_ep.bus_type;
+ if (is_parallel_bus(&priv->upstream_ep))
+ mbus_cfg.bus.parallel = priv->upstream_ep.bus.parallel;
+ else
+ mbus_cfg.bus.mipi_csi2 = priv->upstream_ep.bus.mipi_csi2;
+
+ if_fmt = *infmt;
+ crop = priv->crop;
+
+ /*
+ * if cycles is set, we need to handle this over multiple cycles as
+ * generic/bayer data
+ */
+ if (is_parallel_bus(&priv->upstream_ep) && incc->cycles) {
+ if_fmt.width *= incc->cycles;
+ crop.width *= incc->cycles;
+ }
+
+ ipu_csi_set_window(priv->csi, &crop);
+
+ ipu_csi_set_downsize(priv->csi,
+ priv->crop.width == 2 * priv->compose.width,
+ priv->crop.height == 2 * priv->compose.height);
+
+ ipu_csi_init_interface(priv->csi, &mbus_cfg, &if_fmt, outfmt);
+
+ ipu_csi_set_dest(priv->csi, priv->dest);
+
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ ipu_csi_set_skip_smfc(priv->csi, priv->skip->skip_smfc,
+ priv->skip->max_ratio - 1, 0);
+
+ ipu_csi_dump(priv->csi);
+
+ return 0;
+}
+
+static int csi_start(struct csi_priv *priv)
+{
+ struct v4l2_fract *input_fi, *output_fi;
+ int ret;
+
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
+ output_fi = &priv->frame_interval[priv->active_output_pad];
+
+ /* start upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret)
+ return ret;
+
+ /* Skip first few frames from a BT.656 source */
+ if (priv->upstream_ep.bus_type == V4L2_MBUS_BT656) {
+ u32 delay_usec, bad_frames = 20;
+
+ delay_usec = DIV_ROUND_UP_ULL((u64)USEC_PER_SEC *
+ input_fi->numerator * bad_frames,
+ input_fi->denominator);
+
+ usleep_range(delay_usec, delay_usec + 1000);
+ }
+
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ ret = csi_idmac_start(priv);
+ if (ret)
+ goto stop_upstream;
+ }
+
+ ret = csi_setup(priv);
+ if (ret)
+ goto idmac_stop;
+
+ /* start the frame interval monitor */
+ if (priv->fim && priv->dest == IPU_CSI_DEST_IDMAC) {
+ ret = imx_media_fim_set_stream(priv->fim, output_fi, true);
+ if (ret)
+ goto idmac_stop;
+ }
+
+ ret = ipu_csi_enable(priv->csi);
+ if (ret) {
+ v4l2_err(&priv->sd, "CSI enable error: %d\n", ret);
+ goto fim_off;
+ }
+
+ return 0;
+
+fim_off:
+ if (priv->fim && priv->dest == IPU_CSI_DEST_IDMAC)
+ imx_media_fim_set_stream(priv->fim, NULL, false);
+idmac_stop:
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ csi_idmac_stop(priv);
+stop_upstream:
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ return ret;
+}
+
+static void csi_stop(struct csi_priv *priv)
+{
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ csi_idmac_wait_last_eof(priv);
+
+ /*
+ * Disable the CSI asap, after syncing with the last EOF.
+ * Doing so after the IDMA channel is disabled has shown to
+ * create hard system-wide hangs.
+ */
+ ipu_csi_disable(priv->csi);
+
+ /* stop upstream */
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ csi_idmac_stop(priv);
+
+ /* stop the frame interval monitor */
+ if (priv->fim)
+ imx_media_fim_set_stream(priv->fim, NULL, false);
+ }
+}
+
+static const struct csi_skip_desc csi_skip[12] = {
+ { 1, 1, 0x00 }, /* Keep all frames */
+ { 5, 6, 0x10 }, /* Skip every sixth frame */
+ { 4, 5, 0x08 }, /* Skip every fifth frame */
+ { 3, 4, 0x04 }, /* Skip every fourth frame */
+ { 2, 3, 0x02 }, /* Skip every third frame */
+ { 3, 5, 0x0a }, /* Skip frames 1 and 3 of every 5 */
+ { 1, 2, 0x01 }, /* Skip every second frame */
+ { 2, 5, 0x0b }, /* Keep frames 1 and 4 of every 5 */
+ { 1, 3, 0x03 }, /* Keep one in three frames */
+ { 1, 4, 0x07 }, /* Keep one in four frames */
+ { 1, 5, 0x0f }, /* Keep one in five frames */
+ { 1, 6, 0x1f }, /* Keep one in six frames */
+};
+
+static void csi_apply_skip_interval(const struct csi_skip_desc *skip,
+ struct v4l2_fract *interval)
+{
+ unsigned int div;
+
+ interval->numerator *= skip->max_ratio;
+ interval->denominator *= skip->keep;
+
+ /* Reduce fraction to lowest terms */
+ div = gcd(interval->numerator, interval->denominator);
+ if (div > 1) {
+ interval->numerator /= div;
+ interval->denominator /= div;
+ }
+}
+
+/*
+ * Find the skip pattern to produce the output frame interval closest to the
+ * requested one, for the given input frame interval. Updates the output frame
+ * interval to the exact value.
+ */
+static const struct csi_skip_desc *csi_find_best_skip(struct v4l2_fract *in,
+ struct v4l2_fract *out)
+{
+ const struct csi_skip_desc *skip = &csi_skip[0], *best_skip = skip;
+ u32 min_err = UINT_MAX;
+ u64 want_us;
+ int i;
+
+ /* Default to 1:1 ratio */
+ if (out->numerator == 0 || out->denominator == 0 ||
+ in->numerator == 0 || in->denominator == 0) {
+ *out = *in;
+ return best_skip;
+ }
+
+ want_us = div_u64((u64)USEC_PER_SEC * out->numerator, out->denominator);
+
+ /* Find the reduction closest to the requested time per frame */
+ for (i = 0; i < ARRAY_SIZE(csi_skip); i++, skip++) {
+ u64 tmp, err;
+
+ tmp = div_u64((u64)USEC_PER_SEC * in->numerator *
+ skip->max_ratio, in->denominator * skip->keep);
+
+ err = abs((s64)tmp - want_us);
+ if (err < min_err) {
+ min_err = err;
+ best_skip = skip;
+ }
+ }
+
+ *out = *in;
+ csi_apply_skip_interval(best_skip, out);
+
+ return best_skip;
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int csi_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+
+ if (fi->pad >= CSI_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fi->interval = priv->frame_interval[fi->pad];
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int csi_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fract *input_fi;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
+
+ switch (fi->pad) {
+ case CSI_SINK_PAD:
+ /* No limits on valid input frame intervals */
+ if (fi->interval.numerator == 0 ||
+ fi->interval.denominator == 0)
+ fi->interval = *input_fi;
+ /* Reset output intervals and frame skipping ratio to 1:1 */
+ priv->frame_interval[CSI_SRC_PAD_IDMAC] = fi->interval;
+ priv->frame_interval[CSI_SRC_PAD_DIRECT] = fi->interval;
+ priv->skip = &csi_skip[0];
+ break;
+ case CSI_SRC_PAD_IDMAC:
+ /*
+ * frame interval at IDMAC output pad depends on input
+ * interval, modified by frame skipping.
+ */
+ priv->skip = csi_find_best_skip(input_fi, &fi->interval);
+ break;
+ case CSI_SRC_PAD_DIRECT:
+ /*
+ * frame interval at DIRECT output pad is same as input
+ * interval.
+ */
+ fi->interval = *input_fi;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->frame_interval[fi->pad] = fi->interval;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src_sd || !priv->sink) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ if (enable) {
+ dev_dbg(priv->dev, "stream ON\n");
+ ret = csi_start(priv);
+ if (ret)
+ goto out;
+ } else {
+ dev_dbg(priv->dev, "stream OFF\n");
+ csi_stop(priv);
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(priv->dev, "link setup %s -> %s\n", remote->entity->name,
+ local->entity->name);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->src_sd = remote_sd;
+ } else {
+ priv->src_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is a source pad */
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->sink) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+ v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
+ priv->sink = NULL;
+ /* do not apply IC burst alignment in csi_try_crop */
+ priv->active_output_pad = CSI_SRC_PAD_IDMAC;
+ goto out;
+ }
+
+ /* record which output pad is now active */
+ priv->active_output_pad = local->index;
+
+ /* set CSI destination */
+ if (local->index == CSI_SRC_PAD_IDMAC) {
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (priv->fim) {
+ ret = imx_media_fim_add_controls(priv->fim);
+ if (ret)
+ goto out;
+ }
+
+ priv->dest = IPU_CSI_DEST_IDMAC;
+ } else {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+ switch (remote_sd->grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_VDIC:
+ priv->dest = IPU_CSI_DEST_VDIC;
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
+ priv->dest = IPU_CSI_DEST_IC;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ priv->sink = remote->entity;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fwnode_endpoint upstream_ep = { .bus_type = 0 };
+ bool is_csi2;
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ ret = csi_get_upstream_endpoint(priv, &upstream_ep);
+ if (ret) {
+ v4l2_err(&priv->sd, "failed to find upstream endpoint\n");
+ return ret;
+ }
+
+ mutex_lock(&priv->lock);
+
+ priv->upstream_ep = upstream_ep;
+ is_csi2 = !is_parallel_bus(&upstream_ep);
+ if (is_csi2) {
+ /*
+ * NOTE! It seems the virtual channels from the mipi csi-2
+ * receiver are used only for routing by the video mux's,
+ * or for hard-wired routing to the CSI's. Once the stream
+ * enters the CSI's however, they are treated internally
+ * in the IPU as virtual channel 0.
+ */
+ ipu_csi_set_mipi_datatype(priv->csi, 0,
+ &priv->format_mbus[CSI_SINK_PAD]);
+ }
+
+ /* select either parallel or MIPI-CSI2 as input to CSI */
+ ipu_set_csi_src_mux(priv->ipu, priv->csi_id, is_csi2);
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__csi_get_fmt(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
+ else
+ return &priv->format_mbus[pad];
+}
+
+static struct v4l2_rect *
+__csi_get_crop(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&priv->sd, sd_state,
+ CSI_SINK_PAD);
+ else
+ return &priv->crop;
+}
+
+static struct v4l2_rect *
+__csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_compose(&priv->sd, sd_state,
+ CSI_SINK_PAD);
+ else
+ return &priv->compose;
+}
+
+static void csi_try_crop(struct csi_priv *priv,
+ struct v4l2_rect *crop,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_mbus_framefmt *infmt,
+ struct v4l2_fwnode_endpoint *upstream_ep)
+{
+ u32 in_height;
+
+ crop->width = min_t(__u32, infmt->width, crop->width);
+ if (crop->left + crop->width > infmt->width)
+ crop->left = infmt->width - crop->width;
+ /* adjust crop left/width to h/w alignment restrictions */
+ crop->left &= ~0x3;
+ if (priv->active_output_pad == CSI_SRC_PAD_DIRECT)
+ crop->width &= ~0x7; /* multiple of 8 pixels (IC burst) */
+ else
+ crop->width &= ~0x1; /* multiple of 2 pixels */
+
+ in_height = infmt->height;
+ if (infmt->field == V4L2_FIELD_ALTERNATE)
+ in_height *= 2;
+
+ /*
+ * FIXME: not sure why yet, but on interlaced bt.656,
+ * changing the vertical cropping causes loss of vertical
+ * sync, so fix it to NTSC/PAL active lines. NTSC contains
+ * 2 extra lines of active video that need to be cropped.
+ */
+ if (upstream_ep->bus_type == V4L2_MBUS_BT656 &&
+ (V4L2_FIELD_HAS_BOTH(infmt->field) ||
+ infmt->field == V4L2_FIELD_ALTERNATE)) {
+ crop->height = in_height;
+ crop->top = (in_height == 480) ? 2 : 0;
+ } else {
+ crop->height = min_t(__u32, in_height, crop->height);
+ if (crop->top + crop->height > in_height)
+ crop->top = in_height - crop->height;
+ }
+}
+
+static int csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fwnode_endpoint upstream_ep = { .bus_type = 0 };
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, code->which);
+ incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
+
+ switch (code->pad) {
+ case CSI_SINK_PAD:
+ ret = imx_media_enum_mbus_formats(&code->code, code->index,
+ PIXFMT_SEL_ANY);
+ break;
+ case CSI_SRC_PAD_DIRECT:
+ case CSI_SRC_PAD_IDMAC:
+ ret = csi_get_upstream_endpoint(priv, &upstream_ep);
+ if (ret) {
+ v4l2_err(&priv->sd, "failed to find upstream endpoint\n");
+ goto out;
+ }
+
+ if (requires_passthrough(&upstream_ep, infmt, incc)) {
+ if (code->index != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ code->code = infmt->code;
+ } else {
+ enum imx_pixfmt_sel fmt_sel =
+ (incc->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
+
+ ret = imx_media_enum_ipu_formats(&code->code,
+ code->index,
+ fmt_sel);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (fse->pad >= CSI_NUM_PADS ||
+ fse->index > (fse->pad == CSI_SINK_PAD ? 0 : 3))
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (fse->pad == CSI_SINK_PAD) {
+ fse->min_width = MIN_W;
+ fse->max_width = MAX_W;
+ fse->min_height = MIN_H;
+ fse->max_height = MAX_H;
+ } else {
+ crop = __csi_get_crop(priv, sd_state, fse->which);
+
+ fse->min_width = fse->index & 1 ?
+ crop->width / 2 : crop->width;
+ fse->max_width = fse->min_width;
+ fse->min_height = fse->index & 2 ?
+ crop->height / 2 : crop->height;
+ fse->max_height = fse->min_height;
+ }
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fract *input_fi;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (fie->pad >= CSI_NUM_PADS ||
+ fie->index >= (fie->pad != CSI_SRC_PAD_IDMAC ?
+ 1 : ARRAY_SIZE(csi_skip)))
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
+ crop = __csi_get_crop(priv, sd_state, fie->which);
+
+ if ((fie->width != crop->width && fie->width != crop->width / 2) ||
+ (fie->height != crop->height && fie->height != crop->height / 2)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fie->interval = *input_fi;
+
+ if (fie->pad == CSI_SRC_PAD_IDMAC)
+ csi_apply_skip_interval(&csi_skip[fie->index],
+ &fie->interval);
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= CSI_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void csi_try_field(struct csi_priv *priv,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct v4l2_mbus_framefmt *infmt =
+ __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
+
+ /*
+ * no restrictions on sink pad field type except must
+ * be initialized.
+ */
+ if (sdformat->pad == CSI_SINK_PAD) {
+ if (sdformat->format.field == V4L2_FIELD_ANY)
+ sdformat->format.field = V4L2_FIELD_NONE;
+ return;
+ }
+
+ switch (infmt->field) {
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ /*
+ * If the user requests sequential at the source pad,
+ * allow it (along with possibly inverting field order).
+ * Otherwise passthrough the field type.
+ */
+ if (!V4L2_FIELD_IS_SEQUENTIAL(sdformat->format.field))
+ sdformat->format.field = infmt->field;
+ break;
+ case V4L2_FIELD_ALTERNATE:
+ /*
+ * This driver does not support alternate field mode, and
+ * the CSI captures a whole frame, so the CSI never presents
+ * alternate mode at its source pads. If user has not
+ * already requested sequential, translate ALTERNATE at
+ * sink pad to SEQ_TB or SEQ_BT at the source pad depending
+ * on input height (assume NTSC BT order if 480 total active
+ * frame lines, otherwise PAL TB order).
+ */
+ if (!V4L2_FIELD_IS_SEQUENTIAL(sdformat->format.field))
+ sdformat->format.field = (infmt->height == 480 / 2) ?
+ V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB;
+ break;
+ default:
+ /* Passthrough for all other input field types */
+ sdformat->format.field = infmt->field;
+ break;
+ }
+}
+
+static void csi_try_fmt(struct csi_priv *priv,
+ struct v4l2_fwnode_endpoint *upstream_ep,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat,
+ struct v4l2_rect *crop,
+ struct v4l2_rect *compose,
+ const struct imx_media_pixfmt **cc)
+{
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ u32 code;
+
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
+
+ switch (sdformat->pad) {
+ case CSI_SRC_PAD_DIRECT:
+ case CSI_SRC_PAD_IDMAC:
+ incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
+
+ sdformat->format.width = compose->width;
+ sdformat->format.height = compose->height;
+
+ if (requires_passthrough(upstream_ep, infmt, incc)) {
+ sdformat->format.code = infmt->code;
+ *cc = incc;
+ } else {
+ enum imx_pixfmt_sel fmt_sel =
+ (incc->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
+
+ *cc = imx_media_find_ipu_format(sdformat->format.code,
+ fmt_sel);
+ if (!*cc) {
+ imx_media_enum_ipu_formats(&code, 0, fmt_sel);
+ *cc = imx_media_find_ipu_format(code, fmt_sel);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+ }
+
+ csi_try_field(priv, sd_state, sdformat);
+
+ /* propagate colorimetry from sink */
+ sdformat->format.colorspace = infmt->colorspace;
+ sdformat->format.xfer_func = infmt->xfer_func;
+ sdformat->format.quantization = infmt->quantization;
+ sdformat->format.ycbcr_enc = infmt->ycbcr_enc;
+
+ break;
+ case CSI_SINK_PAD:
+ v4l_bound_align_image(&sdformat->format.width, MIN_W, MAX_W,
+ W_ALIGN, &sdformat->format.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+
+ *cc = imx_media_find_mbus_format(sdformat->format.code,
+ PIXFMT_SEL_ANY);
+ if (!*cc) {
+ imx_media_enum_mbus_formats(&code, 0,
+ PIXFMT_SEL_YUV_RGB);
+ *cc = imx_media_find_mbus_format(code,
+ PIXFMT_SEL_YUV_RGB);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ csi_try_field(priv, sd_state, sdformat);
+
+ /* Reset crop and compose rectangles */
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = sdformat->format.width;
+ crop->height = sdformat->format.height;
+ if (sdformat->format.field == V4L2_FIELD_ALTERNATE)
+ crop->height *= 2;
+ csi_try_crop(priv, crop, sd_state, &sdformat->format,
+ upstream_ep);
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = crop->width;
+ compose->height = crop->height;
+
+ break;
+ }
+
+ imx_media_try_colorimetry(&sdformat->format,
+ priv->active_output_pad == CSI_SRC_PAD_DIRECT);
+}
+
+static int csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fwnode_endpoint upstream_ep = { .bus_type = 0 };
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop, *compose;
+ int ret;
+
+ if (sdformat->pad >= CSI_NUM_PADS)
+ return -EINVAL;
+
+ ret = csi_get_upstream_endpoint(priv, &upstream_ep);
+ if (ret) {
+ v4l2_err(&priv->sd, "failed to find upstream endpoint\n");
+ return ret;
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ crop = __csi_get_crop(priv, sd_state, sdformat->which);
+ compose = __csi_get_compose(priv, sd_state, sdformat->which);
+
+ csi_try_fmt(priv, &upstream_ep, sd_state, sdformat, crop, compose,
+ &cc);
+
+ fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+
+ if (sdformat->pad == CSI_SINK_PAD) {
+ int pad;
+
+ /* propagate format to source pads */
+ for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_subdev_format format;
+
+ format.pad = pad;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ csi_try_fmt(priv, &upstream_ep, sd_state, &format,
+ NULL, compose, &outcc);
+
+ outfmt = __csi_get_fmt(priv, sd_state, pad,
+ sdformat->which);
+ *outfmt = format.format;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[pad] = outcc;
+ }
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_rect *crop, *compose;
+ int ret = 0;
+
+ if (sel->pad != CSI_SINK_PAD)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, sd_state, sel->which);
+ compose = __csi_get_compose(priv, sd_state, sel->which);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = infmt->width;
+ sel->r.height = infmt->height;
+ if (infmt->field == V4L2_FIELD_ALTERNATE)
+ sel->r.height *= 2;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = crop->width;
+ sel->r.height = crop->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = *compose;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_set_scale(u32 *compose, u32 crop, u32 flags)
+{
+ if ((flags & (V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE)) ==
+ (V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE) &&
+ *compose != crop && *compose != crop / 2)
+ return -ERANGE;
+
+ if (*compose <= crop / 2 ||
+ (*compose < crop * 3 / 4 && !(flags & V4L2_SEL_FLAG_GE)) ||
+ (*compose < crop && (flags & V4L2_SEL_FLAG_LE)))
+ *compose = crop / 2;
+ else
+ *compose = crop;
+
+ return 0;
+}
+
+static int csi_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fwnode_endpoint upstream_ep = { .bus_type = 0 };
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_rect *crop, *compose;
+ int pad, ret;
+
+ if (sel->pad != CSI_SINK_PAD)
+ return -EINVAL;
+
+ ret = csi_get_upstream_endpoint(priv, &upstream_ep);
+ if (ret) {
+ v4l2_err(&priv->sd, "failed to find upstream endpoint\n");
+ return ret;
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, sd_state, sel->which);
+ compose = __csi_get_compose(priv, sd_state, sel->which);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * Modifying the crop rectangle always changes the format on
+ * the source pads. If the KEEP_CONFIG flag is set, just return
+ * the current crop rectangle.
+ */
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = priv->crop;
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ *crop = sel->r;
+ goto out;
+ }
+
+ csi_try_crop(priv, &sel->r, sd_state, infmt, &upstream_ep);
+
+ *crop = sel->r;
+
+ /* Reset scaling to 1:1 */
+ compose->width = crop->width;
+ compose->height = crop->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ /*
+ * Modifying the compose rectangle always changes the format on
+ * the source pads. If the KEEP_CONFIG flag is set, just return
+ * the current compose rectangle.
+ */
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = priv->compose;
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ *compose = sel->r;
+ goto out;
+ }
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ ret = csi_set_scale(&sel->r.width, crop->width, sel->flags);
+ if (ret)
+ goto out;
+ ret = csi_set_scale(&sel->r.height, crop->height, sel->flags);
+ if (ret)
+ goto out;
+
+ *compose = sel->r;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Reset source pads to sink compose rectangle */
+ for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
+ struct v4l2_mbus_framefmt *outfmt;
+
+ outfmt = __csi_get_fmt(priv, sd_state, pad, sel->which);
+ outfmt->width = compose->width;
+ outfmt->height = compose->height;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR)
+ return -EINVAL;
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+}
+
+static int csi_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static int csi_registered(struct v4l2_subdev *sd)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct ipu_csi *csi;
+ int i, ret;
+ u32 code;
+
+ /* get handle to IPU CSI */
+ csi = ipu_csi_get(priv->ipu, priv->csi_id);
+ if (IS_ERR(csi)) {
+ v4l2_err(&priv->sd, "failed to get CSI%d\n", priv->csi_id);
+ return PTR_ERR(csi);
+ }
+ priv->csi = csi;
+
+ for (i = 0; i < CSI_NUM_PADS; i++) {
+ code = 0;
+ if (i != CSI_SINK_PAD)
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ /* set a default mbus format */
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, code,
+ V4L2_FIELD_NONE, &priv->cc[i]);
+ if (ret)
+ goto put_csi;
+
+ /* init default frame interval */
+ priv->frame_interval[i].numerator = 1;
+ priv->frame_interval[i].denominator = 30;
+ }
+
+ /* disable frame skipping */
+ priv->skip = &csi_skip[0];
+
+ /* init default crop and compose rectangle sizes */
+ priv->crop.width = IMX_MEDIA_DEF_PIX_WIDTH;
+ priv->crop.height = IMX_MEDIA_DEF_PIX_HEIGHT;
+ priv->compose.width = IMX_MEDIA_DEF_PIX_WIDTH;
+ priv->compose.height = IMX_MEDIA_DEF_PIX_HEIGHT;
+
+ priv->fim = imx_media_fim_init(&priv->sd);
+ if (IS_ERR(priv->fim)) {
+ ret = PTR_ERR(priv->fim);
+ goto put_csi;
+ }
+
+ priv->vdev = imx_media_capture_device_init(priv->sd.dev, &priv->sd,
+ CSI_SRC_PAD_IDMAC, true);
+ if (IS_ERR(priv->vdev)) {
+ ret = PTR_ERR(priv->vdev);
+ goto free_fim;
+ }
+
+ ret = imx_media_capture_device_register(priv->vdev, 0);
+ if (ret)
+ goto remove_vdev;
+
+ return 0;
+
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
+free_fim:
+ if (priv->fim)
+ imx_media_fim_free(priv->fim);
+put_csi:
+ ipu_csi_put(priv->csi);
+ return ret;
+}
+
+static void csi_unregistered(struct v4l2_subdev *sd)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+
+ imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
+
+ if (priv->fim)
+ imx_media_fim_free(priv->fim);
+
+ if (priv->csi)
+ ipu_csi_put(priv->csi);
+}
+
+/*
+ * The CSI has only one fwnode endpoint, at the sink pad. Verify the
+ * endpoint belongs to us, and return CSI_SINK_PAD.
+ */
+static int csi_get_fwnode_pad(struct media_entity *entity,
+ struct fwnode_endpoint *endpoint)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct fwnode_handle *csi_port = dev_fwnode(priv->dev);
+ struct fwnode_handle *csi_ep;
+ int ret;
+
+ csi_ep = fwnode_get_next_child_node(csi_port, NULL);
+
+ ret = endpoint->local_fwnode == csi_ep ? CSI_SINK_PAD : -ENXIO;
+
+ fwnode_handle_put(csi_ep);
+
+ return ret;
+}
+
+static const struct media_entity_operations csi_entity_ops = {
+ .link_setup = csi_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = csi_get_fwnode_pad,
+};
+
+static const struct v4l2_subdev_core_ops csi_core_ops = {
+ .subscribe_event = csi_subscribe_event,
+ .unsubscribe_event = csi_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops csi_video_ops = {
+ .g_frame_interval = csi_g_frame_interval,
+ .s_frame_interval = csi_s_frame_interval,
+ .s_stream = csi_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csi_pad_ops = {
+ .init_cfg = imx_media_init_cfg,
+ .enum_mbus_code = csi_enum_mbus_code,
+ .enum_frame_size = csi_enum_frame_size,
+ .enum_frame_interval = csi_enum_frame_interval,
+ .get_fmt = csi_get_fmt,
+ .set_fmt = csi_set_fmt,
+ .get_selection = csi_get_selection,
+ .set_selection = csi_set_selection,
+ .link_validate = csi_link_validate,
+};
+
+static const struct v4l2_subdev_ops csi_subdev_ops = {
+ .core = &csi_core_ops,
+ .video = &csi_video_ops,
+ .pad = &csi_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csi_internal_ops = {
+ .registered = csi_registered,
+ .unregistered = csi_unregistered,
+};
+
+static int imx_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct csi_priv *priv = notifier_to_dev(notifier);
+ struct media_pad *sink = &priv->sd.entity.pads[CSI_SINK_PAD];
+
+ /*
+ * If the subdev is a video mux, it must be one of the CSI
+ * muxes. Mark it as such via its group id.
+ */
+ if (sd->entity.function == MEDIA_ENT_F_VID_MUX)
+ sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, 0);
+}
+
+static const struct v4l2_async_notifier_operations csi_notify_ops = {
+ .bound = imx_csi_notify_bound,
+};
+
+static int imx_csi_async_register(struct csi_priv *priv)
+{
+ struct v4l2_async_subdev *asd = NULL;
+ struct fwnode_handle *ep;
+ unsigned int port;
+ int ret;
+
+ v4l2_async_nf_init(&priv->notifier);
+
+ /* get this CSI's port id */
+ ret = fwnode_property_read_u32(dev_fwnode(priv->dev), "reg", &port);
+ if (ret < 0)
+ return ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(priv->dev->parent),
+ port, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (ep) {
+ asd = v4l2_async_nf_add_fwnode_remote(&priv->notifier, ep,
+ struct v4l2_async_subdev);
+
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ /* OK if asd already exists */
+ if (ret != -EEXIST)
+ return ret;
+ }
+ }
+
+ priv->notifier.ops = &csi_notify_ops;
+
+ ret = v4l2_async_subdev_nf_register(&priv->sd, &priv->notifier);
+ if (ret)
+ return ret;
+
+ return v4l2_async_register_subdev(&priv->sd);
+}
+
+static int imx_csi_probe(struct platform_device *pdev)
+{
+ struct ipu_client_platformdata *pdata;
+ struct pinctrl *pinctrl;
+ struct csi_priv *priv;
+ int i, ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->sd);
+ priv->dev = &pdev->dev;
+
+ ret = dma_set_coherent_mask(priv->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ /* get parent IPU */
+ priv->ipu = dev_get_drvdata(priv->dev->parent);
+
+ /* get our CSI id */
+ pdata = priv->dev->platform_data;
+ priv->csi_id = pdata->csi;
+ priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
+
+ priv->active_output_pad = CSI_SRC_PAD_IDMAC;
+
+ timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
+ spin_lock_init(&priv->irqlock);
+
+ v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
+ v4l2_set_subdevdata(&priv->sd, priv);
+ priv->sd.internal_ops = &csi_internal_ops;
+ priv->sd.entity.ops = &csi_entity_ops;
+ priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ priv->sd.dev = &pdev->dev;
+ priv->sd.fwnode = of_fwnode_handle(pdata->of_node);
+ priv->sd.owner = THIS_MODULE;
+ priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ priv->sd.grp_id = priv->csi_id ?
+ IMX_MEDIA_GRP_ID_IPU_CSI1 : IMX_MEDIA_GRP_ID_IPU_CSI0;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(priv->ipu));
+
+ for (i = 0; i < CSI_NUM_PADS; i++)
+ priv->pad[i].flags = (i == CSI_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->sd.entity, CSI_NUM_PADS,
+ priv->pad);
+ if (ret)
+ return ret;
+
+ mutex_init(&priv->lock);
+
+ v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
+ priv->sd.ctrl_handler = &priv->ctrl_hdlr;
+
+ /*
+ * The IPUv3 driver did not assign an of_node to this
+ * device. As a result, pinctrl does not automatically
+ * configure our pin groups, so we need to do that manually
+ * here, after setting this device's of_node.
+ */
+ priv->dev->of_node = pdata->of_node;
+ pinctrl = devm_pinctrl_get_select_default(priv->dev);
+ if (IS_ERR(pinctrl)) {
+ ret = PTR_ERR(pinctrl);
+ dev_dbg(priv->dev,
+ "devm_pinctrl_get_select_default() failed: %d\n", ret);
+ if (ret != -ENODEV)
+ goto free;
+ }
+
+ ret = imx_csi_async_register(priv);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+free:
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+ mutex_destroy(&priv->lock);
+ return ret;
+}
+
+static int imx_csi_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csi_priv *priv = sd_to_dev(sd);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+ mutex_destroy(&priv->lock);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct platform_device_id imx_csi_ids[] = {
+ { .name = "imx-ipuv3-csi" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, imx_csi_ids);
+
+static struct platform_driver imx_csi_driver = {
+ .probe = imx_csi_probe,
+ .remove = imx_csi_remove,
+ .id_table = imx_csi_ids,
+ .driver = {
+ .name = "imx-ipuv3-csi",
+ },
+};
+module_platform_driver(imx_csi_driver);
+
+MODULE_DESCRIPTION("i.MX CSI subdev driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
new file mode 100644
index 000000000..e6d6ed3b1
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Media Controller Driver for Freescale common i.MX5/6/7 SOC
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include "imx-media.h"
+
+static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct imx_media_dev, notifier);
+}
+
+/* async subdev bound notifier */
+static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+
+ dev_dbg(imxmd->md.dev, "subdev %s bound\n", sd->name);
+
+ return 0;
+}
+
+/*
+ * Create the missing media links from the CSI-2 receiver.
+ * Called after all async subdevs have bound.
+ */
+static void imx_media_create_csi2_links(struct imx_media_dev *imxmd)
+{
+ struct v4l2_subdev *sd, *csi2 = NULL;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ if (sd->grp_id == IMX_MEDIA_GRP_ID_CSI2) {
+ csi2 = sd;
+ break;
+ }
+ }
+ if (!csi2)
+ return;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ /* skip if not a CSI or a CSI mux */
+ if (!(sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) &&
+ !(sd->grp_id & IMX_MEDIA_GRP_ID_CSI) &&
+ !(sd->grp_id & IMX_MEDIA_GRP_ID_CSI_MUX))
+ continue;
+
+ v4l2_create_fwnode_links(csi2, sd);
+ }
+}
+
+/*
+ * adds given video device to given imx-media source pad vdev list.
+ * Continues upstream from the pad entity's sink pads.
+ */
+static int imx_media_add_vdev_to_pad(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev,
+ struct media_pad *srcpad)
+{
+ struct media_entity *entity = srcpad->entity;
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+ struct media_link *link;
+ struct v4l2_subdev *sd;
+ int i, ret;
+
+ /* skip this entity if not a v4l2_subdev */
+ if (!is_media_entity_v4l2_subdev(entity))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ pad_vdev_list = to_pad_vdev_list(sd, srcpad->index);
+ if (!pad_vdev_list) {
+ v4l2_warn(&imxmd->v4l2_dev, "%s:%u has no vdev list!\n",
+ entity->name, srcpad->index);
+ /*
+ * shouldn't happen, but no reason to fail driver load,
+ * just skip this entity.
+ */
+ return 0;
+ }
+
+ /* just return if we've been here before */
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ if (pad_vdev->vdev == vdev)
+ return 0;
+ }
+
+ dev_dbg(imxmd->md.dev, "adding %s to pad %s:%u\n",
+ vdev->vfd->entity.name, entity->name, srcpad->index);
+
+ pad_vdev = devm_kzalloc(imxmd->md.dev, sizeof(*pad_vdev), GFP_KERNEL);
+ if (!pad_vdev)
+ return -ENOMEM;
+
+ /* attach this vdev to this pad */
+ pad_vdev->vdev = vdev;
+ list_add_tail(&pad_vdev->list, pad_vdev_list);
+
+ /* move upstream from this entity's sink pads */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ list_for_each_entry(link, &entity->links, list) {
+ if (link->sink != pad)
+ continue;
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev,
+ link->source);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * For every subdevice, allocate an array of list_head's, one list_head
+ * for each pad, to hold the list of video devices reachable from that
+ * pad.
+ */
+static int imx_media_alloc_pad_vdev_lists(struct imx_media_dev *imxmd)
+{
+ struct list_head *vdev_lists;
+ struct media_entity *entity;
+ struct v4l2_subdev *sd;
+ int i;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ entity = &sd->entity;
+ vdev_lists = devm_kcalloc(imxmd->md.dev,
+ entity->num_pads, sizeof(*vdev_lists),
+ GFP_KERNEL);
+ if (!vdev_lists)
+ return -ENOMEM;
+
+ /* attach to the subdev's host private pointer */
+ sd->host_priv = vdev_lists;
+
+ for (i = 0; i < entity->num_pads; i++)
+ INIT_LIST_HEAD(to_pad_vdev_list(sd, i));
+ }
+
+ return 0;
+}
+
+/* form the vdev lists in all imx-media source pads */
+static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
+{
+ struct imx_media_video_dev *vdev;
+ struct media_link *link;
+ int ret;
+
+ ret = imx_media_alloc_pad_vdev_lists(imxmd);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(vdev, &imxmd->vdev_list, list) {
+ link = list_first_entry(&vdev->vfd->entity.links,
+ struct media_link, list);
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* async subdev complete notifier */
+int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int ret;
+
+ mutex_lock(&imxmd->mutex);
+
+ imx_media_create_csi2_links(imxmd);
+
+ ret = imx_media_create_pad_vdev_lists(imxmd);
+ if (ret)
+ goto unlock;
+
+ ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
+unlock:
+ mutex_unlock(&imxmd->mutex);
+ if (ret)
+ return ret;
+
+ return media_device_register(&imxmd->md);
+}
+EXPORT_SYMBOL_GPL(imx_media_probe_complete);
+
+/*
+ * adds controls to a video device from an entity subdevice.
+ * Continues upstream from the entity's sink pads.
+ */
+static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
+ struct video_device *vfd,
+ struct media_entity *entity)
+{
+ int i, ret = 0;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+
+ dev_dbg(imxmd->md.dev,
+ "adding controls to %s from %s\n",
+ vfd->entity.name, sd->entity.name);
+
+ ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
+ sd->ctrl_handler,
+ NULL, true);
+ if (ret)
+ return ret;
+ }
+
+ /* move upstream */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad, *spad = &entity->pads[i];
+
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ pad = media_pad_remote_pad_first(spad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ continue;
+
+ ret = imx_media_inherit_controls(imxmd, vfd, pad->entity);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int imx_media_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct imx_media_dev *imxmd = container_of(link->graph_obj.mdev,
+ struct imx_media_dev, md);
+ struct media_entity *source = link->source->entity;
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+ struct video_device *vfd;
+ struct v4l2_subdev *sd;
+ int pad_idx, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
+ if (ret)
+ return ret;
+
+ /* don't bother if source is not a subdev */
+ if (!is_media_entity_v4l2_subdev(source))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(source);
+ pad_idx = link->source->index;
+
+ pad_vdev_list = to_pad_vdev_list(sd, pad_idx);
+ if (!pad_vdev_list) {
+ /* nothing to do if source sd has no pad vdev list */
+ return 0;
+ }
+
+ /*
+ * Before disabling a link, reset controls for all video
+ * devices reachable from this link.
+ *
+ * After enabling a link, refresh controls for all video
+ * devices reachable from this link.
+ */
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ vfd = pad_vdev->vdev->vfd;
+ if (!vfd->ctrl_handler)
+ continue;
+ dev_dbg(imxmd->md.dev,
+ "reset controls for %s\n",
+ vfd->entity.name);
+ v4l2_ctrl_handler_free(vfd->ctrl_handler);
+ v4l2_ctrl_handler_init(vfd->ctrl_handler, 0);
+ }
+ } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ (link->flags & MEDIA_LNK_FL_ENABLED)) {
+ list_for_each_entry(pad_vdev, pad_vdev_list, list) {
+ vfd = pad_vdev->vdev->vfd;
+ if (!vfd->ctrl_handler)
+ continue;
+ dev_dbg(imxmd->md.dev,
+ "refresh controls for %s\n",
+ vfd->entity.name);
+ ret = imx_media_inherit_controls(imxmd, vfd,
+ &vfd->entity);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg)
+{
+ struct media_entity *entity = &sd->entity;
+ int i;
+
+ if (notification != V4L2_DEVICE_NOTIFY_EVENT)
+ return;
+
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+ struct imx_media_pad_vdev *pad_vdev;
+ struct list_head *pad_vdev_list;
+
+ pad_vdev_list = to_pad_vdev_list(sd, pad->index);
+ if (!pad_vdev_list)
+ continue;
+ list_for_each_entry(pad_vdev, pad_vdev_list, list)
+ v4l2_event_queue(pad_vdev->vdev->vfd, arg);
+ }
+}
+
+static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx_media_probe_complete,
+};
+
+static const struct media_device_ops imx_media_md_ops = {
+ .link_notify = imx_media_link_notify,
+};
+
+struct imx_media_dev *imx_media_dev_init(struct device *dev,
+ const struct media_device_ops *ops)
+{
+ struct imx_media_dev *imxmd;
+ int ret;
+
+ imxmd = devm_kzalloc(dev, sizeof(*imxmd), GFP_KERNEL);
+ if (!imxmd)
+ return ERR_PTR(-ENOMEM);
+
+ dev_set_drvdata(dev, imxmd);
+
+ strscpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
+ imxmd->md.ops = ops ? ops : &imx_media_md_ops;
+ imxmd->md.dev = dev;
+
+ mutex_init(&imxmd->mutex);
+
+ imxmd->v4l2_dev.mdev = &imxmd->md;
+ imxmd->v4l2_dev.notify = imx_media_notify;
+ strscpy(imxmd->v4l2_dev.name, "imx-media",
+ sizeof(imxmd->v4l2_dev.name));
+ snprintf(imxmd->md.bus_info, sizeof(imxmd->md.bus_info),
+ "platform:%s", dev_name(imxmd->md.dev));
+
+ media_device_init(&imxmd->md);
+
+ ret = v4l2_device_register(dev, &imxmd->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "Failed to register v4l2_device: %d\n", ret);
+ goto cleanup;
+ }
+
+ INIT_LIST_HEAD(&imxmd->vdev_list);
+
+ v4l2_async_nf_init(&imxmd->notifier);
+
+ return imxmd;
+
+cleanup:
+ media_device_cleanup(&imxmd->md);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(imx_media_dev_init);
+
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
+ const struct v4l2_async_notifier_operations *ops)
+{
+ int ret;
+
+ /* no subdevs? just bail */
+ if (list_empty(&imxmd->notifier.asd_list)) {
+ v4l2_err(&imxmd->v4l2_dev, "no subdevs\n");
+ return -ENODEV;
+ }
+
+ /* prepare the async subdev notifier and register it */
+ imxmd->notifier.ops = ops ? ops : &imx_media_notifier_ops;
+ ret = v4l2_async_nf_register(&imxmd->v4l2_dev, &imxmd->notifier);
+ if (ret) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "v4l2_async_nf_register failed with %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_dev_notifier_register);
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
new file mode 100644
index 000000000..f85462214
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016-2019 Mentor Graphics Inc.
+ */
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-event.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct imx_media_dev, notifier);
+}
+
+/* async subdev bound notifier */
+static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int ret;
+
+ if (sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) {
+ /* register the IPU internal subdevs */
+ ret = imx_media_register_ipu_internal_subdevs(imxmd, sd);
+ if (ret)
+ return ret;
+ }
+
+ dev_dbg(imxmd->md.dev, "subdev %s bound\n", sd->name);
+
+ return 0;
+}
+
+/* async subdev complete notifier */
+static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int ret;
+
+ /* call the imx5/6/7 common probe completion handler */
+ ret = imx_media_probe_complete(notifier);
+ if (ret)
+ return ret;
+
+ mutex_lock(&imxmd->mutex);
+
+ imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
+ if (IS_ERR(imxmd->m2m_vdev)) {
+ ret = PTR_ERR(imxmd->m2m_vdev);
+ imxmd->m2m_vdev = NULL;
+ goto unlock;
+ }
+
+ ret = imx_media_csc_scaler_device_register(imxmd->m2m_vdev);
+unlock:
+ mutex_unlock(&imxmd->mutex);
+ return ret;
+}
+
+/* async subdev complete notifier */
+static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx6_media_probe_complete,
+};
+
+static int imx_media_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct imx_media_dev *imxmd;
+ int ret;
+
+ imxmd = imx_media_dev_init(dev, NULL);
+ if (IS_ERR(imxmd))
+ return PTR_ERR(imxmd);
+
+ ret = imx_media_add_of_subdevs(imxmd, node);
+ if (ret) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "add_of_subdevs failed with %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = imx_media_dev_notifier_register(imxmd, &imx_media_notifier_ops);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ v4l2_async_nf_cleanup(&imxmd->notifier);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+ media_device_cleanup(&imxmd->md);
+
+ return ret;
+}
+
+static int imx_media_remove(struct platform_device *pdev)
+{
+ struct imx_media_dev *imxmd =
+ (struct imx_media_dev *)platform_get_drvdata(pdev);
+
+ v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
+
+ if (imxmd->m2m_vdev) {
+ imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
+ imxmd->m2m_vdev = NULL;
+ }
+
+ v4l2_async_nf_unregister(&imxmd->notifier);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
+ media_device_unregister(&imxmd->md);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+ media_device_cleanup(&imxmd->md);
+
+ return 0;
+}
+
+static const struct of_device_id imx_media_dt_ids[] = {
+ { .compatible = "fsl,imx-capture-subsystem" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_media_dt_ids);
+
+static struct platform_driver imx_media_pdrv = {
+ .probe = imx_media_probe,
+ .remove = imx_media_remove,
+ .driver = {
+ .name = "imx-media",
+ .of_match_table = imx_media_dt_ids,
+ },
+};
+
+module_platform_driver(imx_media_pdrv);
+
+MODULE_DESCRIPTION("i.MX5/6 v4l2 media controller driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
new file mode 100644
index 000000000..3a9182933
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Frame Interval Monitor.
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+enum {
+ FIM_CL_ENABLE = 0,
+ FIM_CL_NUM,
+ FIM_CL_TOLERANCE_MIN,
+ FIM_CL_TOLERANCE_MAX,
+ FIM_CL_NUM_SKIP,
+ FIM_NUM_CONTROLS,
+};
+
+enum {
+ FIM_CL_ICAP_EDGE = 0,
+ FIM_CL_ICAP_CHANNEL,
+ FIM_NUM_ICAP_CONTROLS,
+};
+
+#define FIM_CL_ENABLE_DEF 0 /* FIM disabled by default */
+#define FIM_CL_NUM_DEF 8 /* average 8 frames */
+#define FIM_CL_NUM_SKIP_DEF 2 /* skip 2 frames after restart */
+#define FIM_CL_TOLERANCE_MIN_DEF 50 /* usec */
+#define FIM_CL_TOLERANCE_MAX_DEF 0 /* no max tolerance (unbounded) */
+
+struct imx_media_fim {
+ /* the owning subdev of this fim instance */
+ struct v4l2_subdev *sd;
+
+ /* FIM's control handler */
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* control clusters */
+ struct v4l2_ctrl *ctrl[FIM_NUM_CONTROLS];
+ struct v4l2_ctrl *icap_ctrl[FIM_NUM_ICAP_CONTROLS];
+
+ spinlock_t lock; /* protect control values */
+
+ /* current control values */
+ bool enabled;
+ int num_avg;
+ int num_skip;
+ unsigned long tolerance_min; /* usec */
+ unsigned long tolerance_max; /* usec */
+ /* input capture method of measuring FI */
+ int icap_channel;
+ int icap_flags;
+
+ int counter;
+ ktime_t last_ts;
+ unsigned long sum; /* usec */
+ unsigned long nominal; /* usec */
+
+ struct completion icap_first_event;
+ bool stream_on;
+};
+
+#define icap_enabled(fim) ((fim)->icap_flags != IRQ_TYPE_NONE)
+
+static void update_fim_nominal(struct imx_media_fim *fim,
+ const struct v4l2_fract *fi)
+{
+ if (fi->denominator == 0) {
+ dev_dbg(fim->sd->dev, "no frame interval, FIM disabled\n");
+ fim->enabled = false;
+ return;
+ }
+
+ fim->nominal = DIV_ROUND_CLOSEST_ULL(1000000ULL * (u64)fi->numerator,
+ fi->denominator);
+
+ dev_dbg(fim->sd->dev, "FI=%lu usec\n", fim->nominal);
+}
+
+static void reset_fim(struct imx_media_fim *fim, bool curval)
+{
+ struct v4l2_ctrl *icap_chan = fim->icap_ctrl[FIM_CL_ICAP_CHANNEL];
+ struct v4l2_ctrl *icap_edge = fim->icap_ctrl[FIM_CL_ICAP_EDGE];
+ struct v4l2_ctrl *en = fim->ctrl[FIM_CL_ENABLE];
+ struct v4l2_ctrl *num = fim->ctrl[FIM_CL_NUM];
+ struct v4l2_ctrl *skip = fim->ctrl[FIM_CL_NUM_SKIP];
+ struct v4l2_ctrl *tol_min = fim->ctrl[FIM_CL_TOLERANCE_MIN];
+ struct v4l2_ctrl *tol_max = fim->ctrl[FIM_CL_TOLERANCE_MAX];
+
+ if (curval) {
+ fim->enabled = en->cur.val;
+ fim->icap_flags = icap_edge->cur.val;
+ fim->icap_channel = icap_chan->cur.val;
+ fim->num_avg = num->cur.val;
+ fim->num_skip = skip->cur.val;
+ fim->tolerance_min = tol_min->cur.val;
+ fim->tolerance_max = tol_max->cur.val;
+ } else {
+ fim->enabled = en->val;
+ fim->icap_flags = icap_edge->val;
+ fim->icap_channel = icap_chan->val;
+ fim->num_avg = num->val;
+ fim->num_skip = skip->val;
+ fim->tolerance_min = tol_min->val;
+ fim->tolerance_max = tol_max->val;
+ }
+
+ /* disable tolerance range if max <= min */
+ if (fim->tolerance_max <= fim->tolerance_min)
+ fim->tolerance_max = 0;
+
+ /* num_skip must be >= 1 if input capture not used */
+ if (!icap_enabled(fim))
+ fim->num_skip = max_t(int, fim->num_skip, 1);
+
+ fim->counter = -fim->num_skip;
+ fim->sum = 0;
+}
+
+static void send_fim_event(struct imx_media_fim *fim, unsigned long error)
+{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR,
+ };
+
+ v4l2_subdev_notify_event(fim->sd, &ev);
+}
+
+/*
+ * Monitor an averaged frame interval. If the average deviates too much
+ * from the nominal frame rate, send the frame interval error event. The
+ * frame intervals are averaged in order to quiet noise from
+ * (presumably random) interrupt latency.
+ */
+static void frame_interval_monitor(struct imx_media_fim *fim,
+ ktime_t timestamp)
+{
+ long long interval, error;
+ unsigned long error_avg;
+ bool send_event = false;
+
+ if (!fim->enabled || ++fim->counter <= 0)
+ goto out_update_ts;
+
+ /* max error is less than l00µs, so use 32-bit division or fail */
+ interval = ktime_to_ns(ktime_sub(timestamp, fim->last_ts));
+ error = abs(interval - NSEC_PER_USEC * (u64)fim->nominal);
+ if (error > U32_MAX)
+ error = U32_MAX;
+ else
+ error = abs((u32)error / NSEC_PER_USEC);
+
+ if (fim->tolerance_max && error >= fim->tolerance_max) {
+ dev_dbg(fim->sd->dev,
+ "FIM: %llu ignored, out of tolerance bounds\n",
+ error);
+ fim->counter--;
+ goto out_update_ts;
+ }
+
+ fim->sum += error;
+
+ if (fim->counter == fim->num_avg) {
+ error_avg = DIV_ROUND_CLOSEST(fim->sum, fim->num_avg);
+
+ if (error_avg > fim->tolerance_min)
+ send_event = true;
+
+ dev_dbg(fim->sd->dev, "FIM: error: %lu usec%s\n",
+ error_avg, send_event ? " (!!!)" : "");
+
+ fim->counter = 0;
+ fim->sum = 0;
+ }
+
+out_update_ts:
+ fim->last_ts = timestamp;
+ if (send_event)
+ send_fim_event(fim, error_avg);
+}
+
+#ifdef CONFIG_IMX_GPT_ICAP
+/*
+ * Input Capture method of measuring frame intervals. Not subject
+ * to interrupt latency.
+ */
+static void fim_input_capture_handler(int channel, void *dev_id,
+ ktime_t timestamp)
+{
+ struct imx_media_fim *fim = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fim->lock, flags);
+
+ frame_interval_monitor(fim, timestamp);
+
+ if (!completion_done(&fim->icap_first_event))
+ complete(&fim->icap_first_event);
+
+ spin_unlock_irqrestore(&fim->lock, flags);
+}
+
+static int fim_request_input_capture(struct imx_media_fim *fim)
+{
+ init_completion(&fim->icap_first_event);
+
+ return mxc_request_input_capture(fim->icap_channel,
+ fim_input_capture_handler,
+ fim->icap_flags, fim);
+}
+
+static void fim_free_input_capture(struct imx_media_fim *fim)
+{
+ mxc_free_input_capture(fim->icap_channel, fim);
+}
+
+#else /* CONFIG_IMX_GPT_ICAP */
+
+static int fim_request_input_capture(struct imx_media_fim *fim)
+{
+ return 0;
+}
+
+static void fim_free_input_capture(struct imx_media_fim *fim)
+{
+}
+
+#endif /* CONFIG_IMX_GPT_ICAP */
+
+/*
+ * In case we are monitoring the first frame interval after streamon
+ * (when fim->num_skip = 0), we need a valid fim->last_ts before we
+ * can begin. This only applies to the input capture method. It is not
+ * possible to accurately measure the first FI after streamon using the
+ * EOF method, so fim->num_skip minimum is set to 1 in that case, so this
+ * function is a noop when the EOF method is used.
+ */
+static void fim_acquire_first_ts(struct imx_media_fim *fim)
+{
+ unsigned long ret;
+
+ if (!fim->enabled || fim->num_skip > 0)
+ return;
+
+ ret = wait_for_completion_timeout(
+ &fim->icap_first_event,
+ msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(fim->sd, "wait first icap event timeout\n");
+}
+
+/* FIM Controls */
+static int fim_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx_media_fim *fim = container_of(ctrl->handler,
+ struct imx_media_fim,
+ ctrl_handler);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&fim->lock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_IMX_FIM_ENABLE:
+ break;
+ case V4L2_CID_IMX_FIM_ICAP_EDGE:
+ if (fim->stream_on)
+ ret = -EBUSY;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ reset_fim(fim, false);
+
+ spin_unlock_irqrestore(&fim->lock, flags);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops fim_ctrl_ops = {
+ .s_ctrl = fim_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config fim_ctrl[] = {
+ [FIM_CL_ENABLE] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_ENABLE,
+ .name = "FIM Enable",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .def = FIM_CL_ENABLE_DEF,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ },
+ [FIM_CL_NUM] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_NUM,
+ .name = "FIM Num Average",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_NUM_DEF,
+ .min = 1, /* no averaging */
+ .max = 64, /* average 64 frames */
+ .step = 1,
+ },
+ [FIM_CL_TOLERANCE_MIN] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_TOLERANCE_MIN,
+ .name = "FIM Tolerance Min",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_TOLERANCE_MIN_DEF,
+ .min = 2,
+ .max = 200,
+ .step = 1,
+ },
+ [FIM_CL_TOLERANCE_MAX] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_TOLERANCE_MAX,
+ .name = "FIM Tolerance Max",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_TOLERANCE_MAX_DEF,
+ .min = 0,
+ .max = 500,
+ .step = 1,
+ },
+ [FIM_CL_NUM_SKIP] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_NUM_SKIP,
+ .name = "FIM Num Skip",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_NUM_SKIP_DEF,
+ .min = 0, /* skip no frames */
+ .max = 256, /* skip 256 frames */
+ .step = 1,
+ },
+};
+
+static const struct v4l2_ctrl_config fim_icap_ctrl[] = {
+ [FIM_CL_ICAP_EDGE] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_ICAP_EDGE,
+ .name = "FIM Input Capture Edge",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = IRQ_TYPE_NONE, /* input capture disabled by default */
+ .min = IRQ_TYPE_NONE,
+ .max = IRQ_TYPE_EDGE_BOTH,
+ .step = 1,
+ },
+ [FIM_CL_ICAP_CHANNEL] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_ICAP_CHANNEL,
+ .name = "FIM Input Capture Channel",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = 0,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ },
+};
+
+static int init_fim_controls(struct imx_media_fim *fim)
+{
+ struct v4l2_ctrl_handler *hdlr = &fim->ctrl_handler;
+ int i, ret;
+
+ v4l2_ctrl_handler_init(hdlr, FIM_NUM_CONTROLS + FIM_NUM_ICAP_CONTROLS);
+
+ for (i = 0; i < FIM_NUM_CONTROLS; i++)
+ fim->ctrl[i] = v4l2_ctrl_new_custom(hdlr,
+ &fim_ctrl[i],
+ NULL);
+ for (i = 0; i < FIM_NUM_ICAP_CONTROLS; i++)
+ fim->icap_ctrl[i] = v4l2_ctrl_new_custom(hdlr,
+ &fim_icap_ctrl[i],
+ NULL);
+ if (hdlr->error) {
+ ret = hdlr->error;
+ goto err_free;
+ }
+
+ v4l2_ctrl_cluster(FIM_NUM_CONTROLS, fim->ctrl);
+ v4l2_ctrl_cluster(FIM_NUM_ICAP_CONTROLS, fim->icap_ctrl);
+
+ return 0;
+err_free:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+/*
+ * Monitor frame intervals via EOF interrupt. This method is
+ * subject to uncertainty errors introduced by interrupt latency.
+ *
+ * This is a noop if the Input Capture method is being used, since
+ * the frame_interval_monitor() is called by the input capture event
+ * callback handler in that case.
+ */
+void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fim->lock, flags);
+
+ if (!icap_enabled(fim))
+ frame_interval_monitor(fim, timestamp);
+
+ spin_unlock_irqrestore(&fim->lock, flags);
+}
+
+/* Called by the subdev in its s_stream callback */
+int imx_media_fim_set_stream(struct imx_media_fim *fim,
+ const struct v4l2_fract *fi,
+ bool on)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ v4l2_ctrl_lock(fim->ctrl[FIM_CL_ENABLE]);
+
+ if (fim->stream_on == on)
+ goto out;
+
+ if (on) {
+ spin_lock_irqsave(&fim->lock, flags);
+ reset_fim(fim, true);
+ update_fim_nominal(fim, fi);
+ spin_unlock_irqrestore(&fim->lock, flags);
+
+ if (icap_enabled(fim)) {
+ ret = fim_request_input_capture(fim);
+ if (ret)
+ goto out;
+ fim_acquire_first_ts(fim);
+ }
+ } else {
+ if (icap_enabled(fim))
+ fim_free_input_capture(fim);
+ }
+
+ fim->stream_on = on;
+out:
+ v4l2_ctrl_unlock(fim->ctrl[FIM_CL_ENABLE]);
+ return ret;
+}
+
+int imx_media_fim_add_controls(struct imx_media_fim *fim)
+{
+ /* add the FIM controls to the calling subdev ctrl handler */
+ return v4l2_ctrl_add_handler(fim->sd->ctrl_handler,
+ &fim->ctrl_handler, NULL, false);
+}
+
+/* Called by the subdev in its subdev registered callback */
+struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
+{
+ struct imx_media_fim *fim;
+ int ret;
+
+ fim = devm_kzalloc(sd->dev, sizeof(*fim), GFP_KERNEL);
+ if (!fim)
+ return ERR_PTR(-ENOMEM);
+
+ fim->sd = sd;
+
+ spin_lock_init(&fim->lock);
+
+ ret = init_fim_controls(fim);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fim;
+}
+
+void imx_media_fim_free(struct imx_media_fim *fim)
+{
+ v4l2_ctrl_handler_free(&fim->ctrl_handler);
+}
diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
new file mode 100644
index 000000000..da4109b2f
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-internal-sd.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Media driver for Freescale i.MX5/6 SOC
+ *
+ * Adds the IPU internal subdevices and the media links between them.
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#include <linux/platform_device.h>
+#include "imx-media.h"
+
+/* max pads per internal-sd */
+#define MAX_INTERNAL_PADS 8
+/* max links per internal-sd pad */
+#define MAX_INTERNAL_LINKS 8
+
+struct internal_subdev;
+
+struct internal_link {
+ int remote;
+ int local_pad;
+ int remote_pad;
+};
+
+struct internal_pad {
+ int num_links;
+ struct internal_link link[MAX_INTERNAL_LINKS];
+};
+
+struct internal_subdev {
+ u32 grp_id;
+ struct internal_pad pad[MAX_INTERNAL_PADS];
+
+ struct v4l2_subdev * (*sync_register)(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+ int (*sync_unregister)(struct v4l2_subdev *sd);
+};
+
+static const struct internal_subdev int_subdev[NUM_IPU_SUBDEVS] = {
+ [IPU_CSI0] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI0,
+ .pad[CSI_SRC_PAD_DIRECT] = {
+ .num_links = 2,
+ .link = {
+ {
+ .local_pad = CSI_SRC_PAD_DIRECT,
+ .remote = IPU_IC_PRP,
+ .remote_pad = PRP_SINK_PAD,
+ }, {
+ .local_pad = CSI_SRC_PAD_DIRECT,
+ .remote = IPU_VDIC,
+ .remote_pad = VDIC_SINK_PAD_DIRECT,
+ },
+ },
+ },
+ },
+
+ [IPU_CSI1] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_CSI1,
+ .pad[CSI_SRC_PAD_DIRECT] = {
+ .num_links = 2,
+ .link = {
+ {
+ .local_pad = CSI_SRC_PAD_DIRECT,
+ .remote = IPU_IC_PRP,
+ .remote_pad = PRP_SINK_PAD,
+ }, {
+ .local_pad = CSI_SRC_PAD_DIRECT,
+ .remote = IPU_VDIC,
+ .remote_pad = VDIC_SINK_PAD_DIRECT,
+ },
+ },
+ },
+ },
+
+ [IPU_VDIC] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_VDIC,
+ .sync_register = imx_media_vdic_register,
+ .sync_unregister = imx_media_vdic_unregister,
+ .pad[VDIC_SRC_PAD_DIRECT] = {
+ .num_links = 1,
+ .link = {
+ {
+ .local_pad = VDIC_SRC_PAD_DIRECT,
+ .remote = IPU_IC_PRP,
+ .remote_pad = PRP_SINK_PAD,
+ },
+ },
+ },
+ },
+
+ [IPU_IC_PRP] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRP,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
+ .pad[PRP_SRC_PAD_PRPENC] = {
+ .num_links = 1,
+ .link = {
+ {
+ .local_pad = PRP_SRC_PAD_PRPENC,
+ .remote = IPU_IC_PRPENC,
+ .remote_pad = PRPENCVF_SINK_PAD,
+ },
+ },
+ },
+ .pad[PRP_SRC_PAD_PRPVF] = {
+ .num_links = 1,
+ .link = {
+ {
+ .local_pad = PRP_SRC_PAD_PRPVF,
+ .remote = IPU_IC_PRPVF,
+ .remote_pad = PRPENCVF_SINK_PAD,
+ },
+ },
+ },
+ },
+
+ [IPU_IC_PRPENC] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPENC,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
+ },
+
+ [IPU_IC_PRPVF] = {
+ .grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPVF,
+ .sync_register = imx_media_ic_register,
+ .sync_unregister = imx_media_ic_unregister,
+ },
+};
+
+static int create_internal_link(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *src,
+ struct v4l2_subdev *sink,
+ const struct internal_link *link)
+{
+ int ret;
+
+ /* skip if this link already created */
+ if (media_entity_find_link(&src->entity.pads[link->local_pad],
+ &sink->entity.pads[link->remote_pad]))
+ return 0;
+
+ dev_dbg(imxmd->md.dev, "%s:%d -> %s:%d\n",
+ src->name, link->local_pad,
+ sink->name, link->remote_pad);
+
+ ret = media_create_pad_link(&src->entity, link->local_pad,
+ &sink->entity, link->remote_pad, 0);
+ if (ret)
+ v4l2_err(&imxmd->v4l2_dev, "%s failed: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int create_ipu_internal_links(struct imx_media_dev *imxmd,
+ const struct internal_subdev *intsd,
+ struct v4l2_subdev *sd,
+ int ipu_id)
+{
+ const struct internal_pad *intpad;
+ const struct internal_link *link;
+ struct media_pad *pad;
+ int i, j, ret;
+
+ /* create the source->sink links */
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ intpad = &intsd->pad[i];
+ pad = &sd->entity.pads[i];
+
+ if (!(pad->flags & MEDIA_PAD_FL_SOURCE))
+ continue;
+
+ for (j = 0; j < intpad->num_links; j++) {
+ struct v4l2_subdev *sink;
+
+ link = &intpad->link[j];
+ sink = imxmd->sync_sd[ipu_id][link->remote];
+
+ ret = create_internal_link(imxmd, sd, sink, link);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi)
+{
+ struct device *ipu_dev = csi->dev->parent;
+ const struct internal_subdev *intsd;
+ struct v4l2_subdev *sd;
+ struct ipu_soc *ipu;
+ int i, ipu_id, ret;
+
+ ipu = dev_get_drvdata(ipu_dev);
+ if (!ipu) {
+ v4l2_err(&imxmd->v4l2_dev, "invalid IPU device!\n");
+ return -ENODEV;
+ }
+
+ ipu_id = ipu_get_num(ipu);
+ if (ipu_id > 1) {
+ v4l2_err(&imxmd->v4l2_dev, "invalid IPU id %d!\n", ipu_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&imxmd->mutex);
+
+ /* record this IPU */
+ if (!imxmd->ipu[ipu_id])
+ imxmd->ipu[ipu_id] = ipu;
+
+ /* register the synchronous subdevs */
+ for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
+ intsd = &int_subdev[i];
+
+ sd = imxmd->sync_sd[ipu_id][i];
+
+ /*
+ * skip if this sync subdev already registered or its
+ * not a sync subdev (one of the CSIs)
+ */
+ if (sd || !intsd->sync_register)
+ continue;
+
+ mutex_unlock(&imxmd->mutex);
+ sd = intsd->sync_register(&imxmd->v4l2_dev, ipu_dev, ipu,
+ intsd->grp_id);
+ mutex_lock(&imxmd->mutex);
+ if (IS_ERR(sd)) {
+ ret = PTR_ERR(sd);
+ goto err_unwind;
+ }
+
+ imxmd->sync_sd[ipu_id][i] = sd;
+ }
+
+ /*
+ * all the sync subdevs are registered, create the media links
+ * between them.
+ */
+ for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
+ intsd = &int_subdev[i];
+
+ if (intsd->grp_id == csi->grp_id) {
+ sd = csi;
+ } else {
+ sd = imxmd->sync_sd[ipu_id][i];
+ if (!sd)
+ continue;
+ }
+
+ ret = create_ipu_internal_links(imxmd, intsd, sd, ipu_id);
+ if (ret) {
+ mutex_unlock(&imxmd->mutex);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&imxmd->mutex);
+ return 0;
+
+err_unwind:
+ while (--i >= 0) {
+ intsd = &int_subdev[i];
+ sd = imxmd->sync_sd[ipu_id][i];
+ if (!sd || !intsd->sync_unregister)
+ continue;
+ mutex_unlock(&imxmd->mutex);
+ intsd->sync_unregister(sd);
+ mutex_lock(&imxmd->mutex);
+ }
+
+ mutex_unlock(&imxmd->mutex);
+ return ret;
+}
+
+void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd)
+{
+ const struct internal_subdev *intsd;
+ struct v4l2_subdev *sd;
+ int i, j;
+
+ mutex_lock(&imxmd->mutex);
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < NUM_IPU_SUBDEVS; j++) {
+ intsd = &int_subdev[j];
+ sd = imxmd->sync_sd[i][j];
+
+ if (!sd || !intsd->sync_unregister)
+ continue;
+
+ mutex_unlock(&imxmd->mutex);
+ intsd->sync_unregister(sd);
+ mutex_lock(&imxmd->mutex);
+ }
+ }
+
+ mutex_unlock(&imxmd->mutex);
+}
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
new file mode 100644
index 000000000..59f1eb7b6
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Media driver for Freescale i.MX5/6 SOC
+ *
+ * Open Firmware parsing.
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#include <linux/of_platform.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <linux/of_graph.h>
+#include <video/imx-ipu-v3.h>
+#include "imx-media.h"
+
+int imx_media_of_add_csi(struct imx_media_dev *imxmd,
+ struct device_node *csi_np)
+{
+ struct v4l2_async_subdev *asd;
+ int ret = 0;
+
+ if (!of_device_is_available(csi_np)) {
+ dev_dbg(imxmd->md.dev, "%s: %pOFn not enabled\n", __func__,
+ csi_np);
+ return -ENODEV;
+ }
+
+ /* add CSI fwnode to async notifier */
+ asd = v4l2_async_nf_add_fwnode(&imxmd->notifier,
+ of_fwnode_handle(csi_np),
+ struct v4l2_async_subdev);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ if (ret == -EEXIST)
+ dev_dbg(imxmd->md.dev, "%s: already added %pOFn\n",
+ __func__, csi_np);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_of_add_csi);
+
+int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
+ struct device_node *np)
+{
+ struct device_node *csi_np;
+ int i, ret;
+
+ for (i = 0; ; i++) {
+ csi_np = of_parse_phandle(np, "ports", i);
+ if (!csi_np)
+ break;
+
+ ret = imx_media_of_add_csi(imxmd, csi_np);
+ if (ret) {
+ /* unavailable or already added is not an error */
+ if (ret == -ENODEV || ret == -EEXIST) {
+ of_node_put(csi_np);
+ continue;
+ }
+
+ /* other error, can't continue */
+ goto err_out;
+ }
+ }
+
+ return 0;
+
+err_out:
+ of_node_put(csi_np);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
new file mode 100644
index 000000000..4985f21b4
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -0,0 +1,886 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#include <linux/module.h>
+#include "imx-media.h"
+
+#define IMX_BUS_FMTS(fmt...) (const u32[]) {fmt, 0}
+
+/*
+ * List of supported pixel formats for the subdevs.
+ */
+static const struct imx_media_pixfmt pixel_formats[] = {
+ /*** YUV formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 12,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 12,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 12,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV32,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_AYUV8_1X32),
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 32,
+ .ipufmt = true,
+ },
+ /*** RGB formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_RGB565_2X8_LE),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .cycles = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_ARGB8888_1X32),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_ARGB8888_1X32),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ .ipufmt = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ },
+ /*** raw bayer and grayscale formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR8_1X8),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG8_1X8),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG8_1X8),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB8_1X8),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR16,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ MEDIA_BUS_FMT_SBGGR16_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG16,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ MEDIA_BUS_FMT_SGBRG16_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG16,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ MEDIA_BUS_FMT_SGRBG16_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB16,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ MEDIA_BUS_FMT_SRGGB16_1X16
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12
+ ),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y10_1X10),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y12_1X12),
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ },
+};
+
+/*
+ * Search in the pixel_formats[] array for an entry with the given fourcc
+ * that matches the requested selection criteria and return it.
+ *
+ * @fourcc: Search for an entry with the given fourcc pixel format.
+ * @fmt_sel: Allow entries only with the given selection criteria.
+ */
+const struct imx_media_pixfmt *
+imx_media_find_pixel_format(u32 fourcc, enum imx_pixfmt_sel fmt_sel)
+{
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
+
+ fmt_sel &= ~PIXFMT_SEL_IPU;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+
+ if (sel_ipu != fmt->ipufmt)
+ continue;
+
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
+
+ if ((fmt_sel & sel) && fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(imx_media_find_pixel_format);
+
+/*
+ * Search in the pixel_formats[] array for an entry with the given media
+ * bus code that matches the requested selection criteria and return it.
+ *
+ * @code: Search for an entry with the given media-bus code.
+ * @fmt_sel: Allow entries only with the given selection criteria.
+ */
+const struct imx_media_pixfmt *
+imx_media_find_mbus_format(u32 code, enum imx_pixfmt_sel fmt_sel)
+{
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
+
+ fmt_sel &= ~PIXFMT_SEL_IPU;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+ unsigned int j;
+
+ if (sel_ipu != fmt->ipufmt)
+ continue;
+
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
+
+ if (!(fmt_sel & sel) || !fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (code == fmt->codes[j])
+ return fmt;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(imx_media_find_mbus_format);
+
+/*
+ * Enumerate entries in the pixel_formats[] array that match the
+ * requested selection criteria. Return the fourcc that matches the
+ * selection criteria at the requested match index.
+ *
+ * @fourcc: The returned fourcc that matches the search criteria at
+ * the requested match index.
+ * @index: The requested match index.
+ * @fmt_sel: Include in the enumeration entries with the given selection
+ * criteria.
+ * @code: If non-zero, only include in the enumeration entries matching this
+ * media bus code.
+ */
+int imx_media_enum_pixel_formats(u32 *fourcc, u32 index,
+ enum imx_pixfmt_sel fmt_sel, u32 code)
+{
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
+
+ fmt_sel &= ~PIXFMT_SEL_IPU;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+
+ if (sel_ipu != fmt->ipufmt)
+ continue;
+
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
+
+ if (!(fmt_sel & sel))
+ continue;
+
+ /*
+ * If a media bus code is specified, only consider formats that
+ * match it.
+ */
+ if (code) {
+ unsigned int j;
+
+ if (!fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (code == fmt->codes[j])
+ break;
+ }
+
+ if (!fmt->codes[j])
+ continue;
+ }
+
+ if (index == 0) {
+ *fourcc = fmt->fourcc;
+ return 0;
+ }
+
+ index--;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(imx_media_enum_pixel_formats);
+
+/*
+ * Enumerate entries in the pixel_formats[] array that match the
+ * requested search criteria. Return the media-bus code that matches
+ * the search criteria at the requested match index.
+ *
+ * @code: The returned media-bus code that matches the search criteria at
+ * the requested match index.
+ * @index: The requested match index.
+ * @fmt_sel: Include in the enumeration entries with the given selection
+ * criteria.
+ */
+int imx_media_enum_mbus_formats(u32 *code, u32 index,
+ enum imx_pixfmt_sel fmt_sel)
+{
+ bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
+ unsigned int i;
+
+ fmt_sel &= ~PIXFMT_SEL_IPU;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx_media_pixfmt *fmt = &pixel_formats[i];
+ enum imx_pixfmt_sel sel;
+ unsigned int j;
+
+ if (sel_ipu != fmt->ipufmt)
+ continue;
+
+ sel = fmt->bayer ? PIXFMT_SEL_BAYER :
+ ((fmt->cs == IPUV3_COLORSPACE_YUV) ?
+ PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
+
+ if (!(fmt_sel & sel) || !fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (index == 0) {
+ *code = fmt->codes[j];
+ return 0;
+ }
+
+ index--;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(imx_media_enum_mbus_formats);
+
+int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
+ u32 width, u32 height, u32 code, u32 field,
+ const struct imx_media_pixfmt **cc)
+{
+ const struct imx_media_pixfmt *lcc;
+
+ mbus->width = width;
+ mbus->height = height;
+ mbus->field = field;
+
+ if (code == 0)
+ imx_media_enum_mbus_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ lcc = imx_media_find_mbus_format(code, PIXFMT_SEL_ANY);
+ if (!lcc) {
+ lcc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
+ if (!lcc)
+ return -EINVAL;
+ }
+
+ mbus->code = code;
+
+ mbus->colorspace = V4L2_COLORSPACE_SRGB;
+ mbus->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(mbus->colorspace);
+ mbus->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(mbus->colorspace);
+ mbus->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(lcc->cs == IPUV3_COLORSPACE_RGB,
+ mbus->colorspace,
+ mbus->ycbcr_enc);
+
+ if (cc)
+ *cc = lcc;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_init_mbus_fmt);
+
+/*
+ * Initializes the TRY format to the ACTIVE format on all pads
+ * of a subdev. Can be used as the .init_cfg pad operation.
+ */
+int imx_media_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_mbus_framefmt *mf_try;
+ unsigned int pad;
+ int ret;
+
+ for (pad = 0; pad < sd->entity.num_pads; pad++) {
+ struct v4l2_subdev_format format = {
+ .pad = pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
+ if (ret)
+ continue;
+
+ mf_try = v4l2_subdev_get_try_format(sd, sd_state, pad);
+ *mf_try = format.format;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_init_cfg);
+
+/*
+ * Default the colorspace in tryfmt to SRGB if set to an unsupported
+ * colorspace or not initialized. Then set the remaining colorimetry
+ * parameters based on the colorspace if they are uninitialized.
+ *
+ * tryfmt->code must be set on entry.
+ *
+ * If this format is destined to be routed through the Image Converter,
+ * Y`CbCr encoding must be fixed. The IC supports only BT.601 Y`CbCr
+ * or Rec.709 Y`CbCr encoding.
+ */
+void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
+ bool ic_route)
+{
+ const struct imx_media_pixfmt *cc;
+ bool is_rgb = false;
+
+ cc = imx_media_find_mbus_format(tryfmt->code, PIXFMT_SEL_ANY);
+ if (!cc)
+ cc = imx_media_find_ipu_format(tryfmt->code,
+ PIXFMT_SEL_YUV_RGB);
+
+ if (cc && cc->cs == IPUV3_COLORSPACE_RGB)
+ is_rgb = true;
+
+ switch (tryfmt->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_BT2020:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_DCI_P3:
+ case V4L2_COLORSPACE_RAW:
+ break;
+ default:
+ tryfmt->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ }
+
+ if (tryfmt->xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ tryfmt->xfer_func =
+ V4L2_MAP_XFER_FUNC_DEFAULT(tryfmt->colorspace);
+
+ if (ic_route) {
+ if (tryfmt->ycbcr_enc != V4L2_YCBCR_ENC_601 &&
+ tryfmt->ycbcr_enc != V4L2_YCBCR_ENC_709)
+ tryfmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ } else {
+ if (tryfmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) {
+ tryfmt->ycbcr_enc =
+ V4L2_MAP_YCBCR_ENC_DEFAULT(tryfmt->colorspace);
+ }
+ }
+
+ if (tryfmt->quantization == V4L2_QUANTIZATION_DEFAULT)
+ tryfmt->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb,
+ tryfmt->colorspace,
+ tryfmt->ycbcr_enc);
+}
+EXPORT_SYMBOL_GPL(imx_media_try_colorimetry);
+
+int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
+ const struct v4l2_mbus_framefmt *mbus,
+ const struct imx_media_pixfmt *cc)
+{
+ u32 width;
+ u32 stride;
+
+ if (!cc) {
+ cc = imx_media_find_ipu_format(mbus->code,
+ PIXFMT_SEL_YUV_RGB);
+ if (!cc)
+ cc = imx_media_find_mbus_format(mbus->code,
+ PIXFMT_SEL_ANY);
+ if (!cc)
+ return -EINVAL;
+ }
+
+ /*
+ * TODO: the IPU currently does not support the AYUV32 format,
+ * so until it does convert to a supported YUV format.
+ */
+ if (cc->ipufmt && cc->cs == IPUV3_COLORSPACE_YUV) {
+ u32 code;
+
+ imx_media_enum_mbus_formats(&code, 0, PIXFMT_SEL_YUV);
+ cc = imx_media_find_mbus_format(code, PIXFMT_SEL_YUV);
+ }
+
+ /* Round up width for minimum burst size */
+ width = round_up(mbus->width, 8);
+
+ /* Round up stride for IDMAC line start address alignment */
+ if (cc->planar)
+ stride = round_up(width, 16);
+ else
+ stride = round_up((width * cc->bpp) >> 3, 8);
+
+ pix->width = width;
+ pix->height = mbus->height;
+ pix->pixelformat = cc->fourcc;
+ pix->colorspace = mbus->colorspace;
+ pix->xfer_func = mbus->xfer_func;
+ pix->ycbcr_enc = mbus->ycbcr_enc;
+ pix->quantization = mbus->quantization;
+ pix->field = mbus->field;
+ pix->bytesperline = stride;
+ pix->sizeimage = cc->planar ? ((stride * pix->height * cc->bpp) >> 3) :
+ stride * pix->height;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_pix_fmt);
+
+void imx_media_free_dma_buf(struct device *dev,
+ struct imx_media_dma_buf *buf)
+{
+ if (buf->virt)
+ dma_free_coherent(dev, buf->len, buf->virt, buf->phys);
+
+ buf->virt = NULL;
+ buf->phys = 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_free_dma_buf);
+
+int imx_media_alloc_dma_buf(struct device *dev,
+ struct imx_media_dma_buf *buf,
+ int size)
+{
+ imx_media_free_dma_buf(dev, buf);
+
+ buf->len = PAGE_ALIGN(size);
+ buf->virt = dma_alloc_coherent(dev, buf->len, &buf->phys,
+ GFP_DMA | GFP_KERNEL);
+ if (!buf->virt) {
+ dev_err(dev, "%s: failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_alloc_dma_buf);
+
+/* form a subdev name given a group id and ipu id */
+void imx_media_grp_id_to_sd_name(char *sd_name, int sz, u32 grp_id, int ipu_id)
+{
+ int id;
+
+ switch (grp_id) {
+ case IMX_MEDIA_GRP_ID_IPU_CSI0...IMX_MEDIA_GRP_ID_IPU_CSI1:
+ id = (grp_id >> IMX_MEDIA_GRP_ID_IPU_CSI_BIT) - 1;
+ snprintf(sd_name, sz, "ipu%d_csi%d", ipu_id + 1, id);
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_VDIC:
+ snprintf(sd_name, sz, "ipu%d_vdic", ipu_id + 1);
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
+ snprintf(sd_name, sz, "ipu%d_ic_prp", ipu_id + 1);
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
+ snprintf(sd_name, sz, "ipu%d_ic_prpenc", ipu_id + 1);
+ break;
+ case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
+ snprintf(sd_name, sz, "ipu%d_ic_prpvf", ipu_id + 1);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(imx_media_grp_id_to_sd_name);
+
+struct v4l2_subdev *
+imx_media_find_subdev_by_fwnode(struct imx_media_dev *imxmd,
+ struct fwnode_handle *fwnode)
+{
+ struct v4l2_subdev *sd;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ if (sd->fwnode == fwnode)
+ return sd;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(imx_media_find_subdev_by_fwnode);
+
+struct v4l2_subdev *
+imx_media_find_subdev_by_devname(struct imx_media_dev *imxmd,
+ const char *devname)
+{
+ struct v4l2_subdev *sd;
+
+ list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
+ if (!strcmp(devname, dev_name(sd->dev)))
+ return sd;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(imx_media_find_subdev_by_devname);
+
+/*
+ * Adds a video device to the master video device list. This is called
+ * when a video device is registered.
+ */
+void imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev)
+{
+ mutex_lock(&imxmd->mutex);
+
+ list_add_tail(&vdev->list, &imxmd->vdev_list);
+
+ mutex_unlock(&imxmd->mutex);
+}
+EXPORT_SYMBOL_GPL(imx_media_add_video_device);
+
+/*
+ * Search upstream/downstream for a subdevice or video device pad in the
+ * current pipeline, starting from start_entity. Returns the device's
+ * source/sink pad that it was reached from. Must be called with
+ * mdev->graph_mutex held.
+ *
+ * If grp_id != 0, finds a subdevice's pad of given grp_id.
+ * Else If buftype != 0, finds a video device's pad of given buffer type.
+ * Else, returns the nearest source/sink pad to start_entity.
+ */
+struct media_pad *
+imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream)
+{
+ struct media_entity *me = start_entity;
+ struct media_pad *pad = NULL;
+ struct video_device *vfd;
+ struct v4l2_subdev *sd;
+ int i;
+
+ for (i = 0; i < me->num_pads; i++) {
+ struct media_pad *spad = &me->pads[i];
+
+ if ((upstream && !(spad->flags & MEDIA_PAD_FL_SINK)) ||
+ (!upstream && !(spad->flags & MEDIA_PAD_FL_SOURCE)))
+ continue;
+
+ pad = media_pad_remote_pad_first(spad);
+ if (!pad)
+ continue;
+
+ if (grp_id) {
+ if (is_media_entity_v4l2_subdev(pad->entity)) {
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ if (sd->grp_id & grp_id)
+ return pad;
+ }
+
+ return imx_media_pipeline_pad(pad->entity, grp_id,
+ buftype, upstream);
+ } else if (buftype) {
+ if (is_media_entity_v4l2_video_device(pad->entity)) {
+ vfd = media_entity_to_video_device(pad->entity);
+ if (buftype == vfd->queue->type)
+ return pad;
+ }
+
+ return imx_media_pipeline_pad(pad->entity, grp_id,
+ buftype, upstream);
+ } else {
+ return pad;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_pad);
+
+/*
+ * Search upstream/downstream for a subdev or video device in the current
+ * pipeline. Must be called with mdev->graph_mutex held.
+ */
+static struct media_entity *
+find_pipeline_entity(struct media_entity *start, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream)
+{
+ struct media_pad *pad = NULL;
+ struct video_device *vfd;
+ struct v4l2_subdev *sd;
+
+ if (grp_id && is_media_entity_v4l2_subdev(start)) {
+ sd = media_entity_to_v4l2_subdev(start);
+ if (sd->grp_id & grp_id)
+ return &sd->entity;
+ } else if (buftype && is_media_entity_v4l2_video_device(start)) {
+ vfd = media_entity_to_video_device(start);
+ if (buftype == vfd->queue->type)
+ return &vfd->entity;
+ }
+
+ pad = imx_media_pipeline_pad(start, grp_id, buftype, upstream);
+
+ return pad ? pad->entity : NULL;
+}
+
+/*
+ * Find the upstream mipi-csi2 virtual channel reached from the given
+ * start entity in the current pipeline.
+ * Must be called with mdev->graph_mutex held.
+ */
+int imx_media_pipeline_csi2_channel(struct media_entity *start_entity)
+{
+ struct media_pad *pad;
+ int ret = -EPIPE;
+
+ pad = imx_media_pipeline_pad(start_entity, IMX_MEDIA_GRP_ID_CSI2,
+ 0, true);
+ if (pad)
+ ret = pad->index - 1;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_csi2_channel);
+
+/*
+ * Find a subdev reached upstream from the given start entity in
+ * the current pipeline.
+ * Must be called with mdev->graph_mutex held.
+ */
+struct v4l2_subdev *
+imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
+ bool upstream)
+{
+ struct media_entity *me;
+
+ me = find_pipeline_entity(start_entity, grp_id, 0, upstream);
+ if (!me)
+ return ERR_PTR(-ENODEV);
+
+ return media_entity_to_v4l2_subdev(me);
+}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_subdev);
+
+/*
+ * Find a subdev reached upstream from the given start entity in
+ * the current pipeline.
+ * Must be called with mdev->graph_mutex held.
+ */
+struct video_device *
+imx_media_pipeline_video_device(struct media_entity *start_entity,
+ enum v4l2_buf_type buftype, bool upstream)
+{
+ struct media_entity *me;
+
+ me = find_pipeline_entity(start_entity, 0, buftype, upstream);
+ if (!me)
+ return ERR_PTR(-ENODEV);
+
+ return media_entity_to_video_device(me);
+}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_video_device);
+
+/*
+ * Find a fwnode endpoint that maps to the given subdevice's pad.
+ * If there are multiple endpoints that map to the pad, only the
+ * first endpoint encountered is returned.
+ *
+ * On success the refcount of the returned fwnode endpoint is
+ * incremented.
+ */
+struct fwnode_handle *imx_media_get_pad_fwnode(struct media_pad *pad)
+{
+ struct fwnode_handle *endpoint;
+ struct v4l2_subdev *sd;
+
+ if (!is_media_entity_v4l2_subdev(pad->entity))
+ return ERR_PTR(-ENODEV);
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ fwnode_graph_for_each_endpoint(dev_fwnode(sd->dev), endpoint) {
+ int pad_idx = media_entity_get_fwnode_pad(&sd->entity,
+ endpoint,
+ pad->flags);
+ if (pad_idx < 0)
+ continue;
+
+ if (pad_idx == pad->index)
+ return endpoint;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(imx_media_get_pad_fwnode);
+
+/*
+ * Turn current pipeline streaming on/off starting from entity.
+ */
+int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
+ struct media_entity *entity,
+ bool on)
+{
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ return -EINVAL;
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ mutex_lock(&imxmd->md.graph_mutex);
+
+ if (on) {
+ ret = __media_pipeline_start(entity->pads, &imxmd->pipe);
+ if (ret)
+ goto out;
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret)
+ __media_pipeline_stop(entity->pads);
+ } else {
+ v4l2_subdev_call(sd, video, s_stream, 0);
+ if (media_pad_pipeline(entity->pads))
+ __media_pipeline_stop(entity->pads);
+ }
+
+out:
+ mutex_unlock(&imxmd->md.graph_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_set_stream);
+
+MODULE_DESCRIPTION("i.MX5/6 v4l2 media controller driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
new file mode 100644
index 000000000..3c2093c52
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -0,0 +1,971 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * V4L2 Deinterlacer Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2017 Mentor Graphics Inc.
+ */
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+/*
+ * This subdev implements two different video pipelines:
+ *
+ * CSI -> VDIC
+ *
+ * In this pipeline, the CSI sends a single interlaced field F(n-1)
+ * directly to the VDIC (and optionally the following field F(n)
+ * can be sent to memory via IDMAC channel 13). This pipeline only works
+ * in VDIC's high motion mode, which only requires a single field for
+ * processing. The other motion modes (low and medium) require three
+ * fields, so this pipeline does not work in those modes. Also, it is
+ * not clear how this pipeline can deal with the various field orders
+ * (sequential BT/TB, interlaced BT/TB).
+ *
+ * MEM -> CH8,9,10 -> VDIC
+ *
+ * In this pipeline, previous field F(n-1), current field F(n), and next
+ * field F(n+1) are transferred to the VDIC via IDMAC channels 8,9,10.
+ * These memory buffers can come from a video output or mem2mem device.
+ * All motion modes are supported by this pipeline.
+ *
+ * The "direct" CSI->VDIC pipeline requires no DMA, but it can only be
+ * used in high motion mode.
+ */
+
+struct vdic_priv;
+
+struct vdic_pipeline_ops {
+ int (*setup)(struct vdic_priv *priv);
+ void (*start)(struct vdic_priv *priv);
+ void (*stop)(struct vdic_priv *priv);
+ void (*disable)(struct vdic_priv *priv);
+};
+
+/*
+ * Min/Max supported width and heights.
+ */
+#define MIN_W 32
+#define MIN_H 32
+#define MAX_W_VDIC 968
+#define MAX_H_VDIC 2048
+#define W_ALIGN 4 /* multiple of 16 pixels */
+#define H_ALIGN 1 /* multiple of 2 lines */
+#define S_ALIGN 1 /* multiple of 2 */
+
+struct vdic_priv {
+ struct device *ipu_dev;
+ struct ipu_soc *ipu;
+
+ struct v4l2_subdev sd;
+ struct media_pad pad[VDIC_NUM_PADS];
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ /* IPU units we require */
+ struct ipu_vdi *vdi;
+
+ int active_input_pad;
+
+ struct ipuv3_channel *vdi_in_ch_p; /* F(n-1) transfer channel */
+ struct ipuv3_channel *vdi_in_ch; /* F(n) transfer channel */
+ struct ipuv3_channel *vdi_in_ch_n; /* F(n+1) transfer channel */
+
+ /* pipeline operations */
+ struct vdic_pipeline_ops *ops;
+
+ /* current and previous input buffers indirect path */
+ struct imx_media_buffer *curr_in_buf;
+ struct imx_media_buffer *prev_in_buf;
+
+ /*
+ * translated field type, input line stride, and field size
+ * for indirect path
+ */
+ u32 fieldtype;
+ u32 in_stride;
+ u32 field_size;
+
+ /* the source (a video device or subdev) */
+ struct media_entity *src;
+ /* the sink that will receive the progressive out buffers */
+ struct v4l2_subdev *sink_sd;
+
+ struct v4l2_mbus_framefmt format_mbus[VDIC_NUM_PADS];
+ const struct imx_media_pixfmt *cc[VDIC_NUM_PADS];
+ struct v4l2_fract frame_interval[VDIC_NUM_PADS];
+
+ /* the video device at IDMAC input pad */
+ struct imx_media_video_dev *vdev;
+
+ bool csi_direct; /* using direct CSI->VDIC->IC pipeline */
+
+ /* motion select control */
+ struct v4l2_ctrl_handler ctrl_hdlr;
+ enum ipu_motion_sel motion;
+
+ int stream_count;
+};
+
+static void vdic_put_ipu_resources(struct vdic_priv *priv)
+{
+ if (priv->vdi_in_ch_p)
+ ipu_idmac_put(priv->vdi_in_ch_p);
+ priv->vdi_in_ch_p = NULL;
+
+ if (priv->vdi_in_ch)
+ ipu_idmac_put(priv->vdi_in_ch);
+ priv->vdi_in_ch = NULL;
+
+ if (priv->vdi_in_ch_n)
+ ipu_idmac_put(priv->vdi_in_ch_n);
+ priv->vdi_in_ch_n = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->vdi))
+ ipu_vdi_put(priv->vdi);
+ priv->vdi = NULL;
+}
+
+static int vdic_get_ipu_resources(struct vdic_priv *priv)
+{
+ int ret, err_chan;
+ struct ipuv3_channel *ch;
+ struct ipu_vdi *vdi;
+
+ vdi = ipu_vdi_get(priv->ipu);
+ if (IS_ERR(vdi)) {
+ v4l2_err(&priv->sd, "failed to get VDIC\n");
+ ret = PTR_ERR(vdi);
+ goto out;
+ }
+ priv->vdi = vdi;
+
+ if (!priv->csi_direct) {
+ ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_PREV);
+ if (IS_ERR(ch)) {
+ err_chan = IPUV3_CHANNEL_MEM_VDI_PREV;
+ ret = PTR_ERR(ch);
+ goto out_err_chan;
+ }
+ priv->vdi_in_ch_p = ch;
+
+ ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_CUR);
+ if (IS_ERR(ch)) {
+ err_chan = IPUV3_CHANNEL_MEM_VDI_CUR;
+ ret = PTR_ERR(ch);
+ goto out_err_chan;
+ }
+ priv->vdi_in_ch = ch;
+
+ ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_NEXT);
+ if (IS_ERR(ch)) {
+ err_chan = IPUV3_CHANNEL_MEM_VDI_NEXT;
+ ret = PTR_ERR(ch);
+ goto out_err_chan;
+ }
+ priv->vdi_in_ch_n = ch;
+ }
+
+ return 0;
+
+out_err_chan:
+ v4l2_err(&priv->sd, "could not get IDMAC channel %u\n", err_chan);
+out:
+ vdic_put_ipu_resources(priv);
+ return ret;
+}
+
+/*
+ * This function is currently unused, but will be called when the
+ * output/mem2mem device at the IDMAC input pad sends us a new
+ * buffer. It kicks off the IDMAC read channels to bring in the
+ * buffer fields from memory and begin the conversions.
+ */
+static void __maybe_unused prepare_vdi_in_buffers(struct vdic_priv *priv,
+ struct imx_media_buffer *curr)
+{
+ dma_addr_t prev_phys, curr_phys, next_phys;
+ struct imx_media_buffer *prev;
+ struct vb2_buffer *curr_vb, *prev_vb;
+ u32 fs = priv->field_size;
+ u32 is = priv->in_stride;
+
+ /* current input buffer is now previous */
+ priv->prev_in_buf = priv->curr_in_buf;
+ priv->curr_in_buf = curr;
+ prev = priv->prev_in_buf ? priv->prev_in_buf : curr;
+
+ prev_vb = &prev->vbuf.vb2_buf;
+ curr_vb = &curr->vbuf.vb2_buf;
+
+ switch (priv->fieldtype) {
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + fs;
+ curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
+ next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + fs;
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + is;
+ curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
+ next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
+ break;
+ default:
+ /*
+ * can't get here, priv->fieldtype can only be one of
+ * the above. This is to quiet smatch errors.
+ */
+ return;
+ }
+
+ ipu_cpmem_set_buffer(priv->vdi_in_ch_p, 0, prev_phys);
+ ipu_cpmem_set_buffer(priv->vdi_in_ch, 0, curr_phys);
+ ipu_cpmem_set_buffer(priv->vdi_in_ch_n, 0, next_phys);
+
+ ipu_idmac_select_buffer(priv->vdi_in_ch_p, 0);
+ ipu_idmac_select_buffer(priv->vdi_in_ch, 0);
+ ipu_idmac_select_buffer(priv->vdi_in_ch_n, 0);
+}
+
+static int setup_vdi_channel(struct vdic_priv *priv,
+ struct ipuv3_channel *channel,
+ dma_addr_t phys0, dma_addr_t phys1)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ unsigned int burst_size;
+ struct ipu_image image;
+ int ret;
+
+ ipu_cpmem_zero(channel);
+
+ memset(&image, 0, sizeof(image));
+ image.pix = vdev->fmt;
+ image.rect = vdev->compose;
+ /* one field to VDIC channels */
+ image.pix.height /= 2;
+ image.rect.height /= 2;
+ image.phys0 = phys0;
+ image.phys1 = phys1;
+
+ ret = ipu_cpmem_set_image(channel, &image);
+ if (ret)
+ return ret;
+
+ burst_size = (image.pix.width & 0xf) ? 8 : 16;
+ ipu_cpmem_set_burstsize(channel, burst_size);
+
+ ipu_cpmem_set_axi_id(channel, 1);
+
+ ipu_idmac_set_double_buffer(channel, false);
+
+ return 0;
+}
+
+static int vdic_setup_direct(struct vdic_priv *priv)
+{
+ /* set VDIC to receive from CSI for direct path */
+ ipu_fsu_link(priv->ipu, IPUV3_CHANNEL_CSI_DIRECT,
+ IPUV3_CHANNEL_CSI_VDI_PREV);
+
+ return 0;
+}
+
+static void vdic_start_direct(struct vdic_priv *priv)
+{
+}
+
+static void vdic_stop_direct(struct vdic_priv *priv)
+{
+}
+
+static void vdic_disable_direct(struct vdic_priv *priv)
+{
+ ipu_fsu_unlink(priv->ipu, IPUV3_CHANNEL_CSI_DIRECT,
+ IPUV3_CHANNEL_CSI_VDI_PREV);
+}
+
+static int vdic_setup_indirect(struct vdic_priv *priv)
+{
+ struct v4l2_mbus_framefmt *infmt;
+ const struct imx_media_pixfmt *incc;
+ int in_size, ret;
+
+ infmt = &priv->format_mbus[VDIC_SINK_PAD_IDMAC];
+ incc = priv->cc[VDIC_SINK_PAD_IDMAC];
+
+ in_size = (infmt->width * incc->bpp * infmt->height) >> 3;
+
+ /* 1/2 full image size */
+ priv->field_size = in_size / 2;
+ priv->in_stride = incc->planar ?
+ infmt->width : (infmt->width * incc->bpp) >> 3;
+
+ priv->prev_in_buf = NULL;
+ priv->curr_in_buf = NULL;
+
+ priv->fieldtype = infmt->field;
+
+ /* init the vdi-in channels */
+ ret = setup_vdi_channel(priv, priv->vdi_in_ch_p, 0, 0);
+ if (ret)
+ return ret;
+ ret = setup_vdi_channel(priv, priv->vdi_in_ch, 0, 0);
+ if (ret)
+ return ret;
+ return setup_vdi_channel(priv, priv->vdi_in_ch_n, 0, 0);
+}
+
+static void vdic_start_indirect(struct vdic_priv *priv)
+{
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->vdi_in_ch_p);
+ ipu_idmac_enable_channel(priv->vdi_in_ch);
+ ipu_idmac_enable_channel(priv->vdi_in_ch_n);
+}
+
+static void vdic_stop_indirect(struct vdic_priv *priv)
+{
+ /* disable channels */
+ ipu_idmac_disable_channel(priv->vdi_in_ch_p);
+ ipu_idmac_disable_channel(priv->vdi_in_ch);
+ ipu_idmac_disable_channel(priv->vdi_in_ch_n);
+}
+
+static void vdic_disable_indirect(struct vdic_priv *priv)
+{
+}
+
+static struct vdic_pipeline_ops direct_ops = {
+ .setup = vdic_setup_direct,
+ .start = vdic_start_direct,
+ .stop = vdic_stop_direct,
+ .disable = vdic_disable_direct,
+};
+
+static struct vdic_pipeline_ops indirect_ops = {
+ .setup = vdic_setup_indirect,
+ .start = vdic_start_indirect,
+ .stop = vdic_stop_indirect,
+ .disable = vdic_disable_indirect,
+};
+
+static int vdic_start(struct vdic_priv *priv)
+{
+ struct v4l2_mbus_framefmt *infmt;
+ int ret;
+
+ infmt = &priv->format_mbus[priv->active_input_pad];
+
+ priv->ops = priv->csi_direct ? &direct_ops : &indirect_ops;
+
+ ret = vdic_get_ipu_resources(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * init the VDIC.
+ *
+ * note we don't give infmt->code to ipu_vdi_setup(). The VDIC
+ * only supports 4:2:2 or 4:2:0, and this subdev will only
+ * negotiate 4:2:2 at its sink pads.
+ */
+ ipu_vdi_setup(priv->vdi, MEDIA_BUS_FMT_UYVY8_2X8,
+ infmt->width, infmt->height);
+ ipu_vdi_set_field_order(priv->vdi, V4L2_STD_UNKNOWN, infmt->field);
+ ipu_vdi_set_motion(priv->vdi, priv->motion);
+
+ ret = priv->ops->setup(priv);
+ if (ret)
+ goto out_put_ipu;
+
+ ipu_vdi_enable(priv->vdi);
+
+ priv->ops->start(priv);
+
+ return 0;
+
+out_put_ipu:
+ vdic_put_ipu_resources(priv);
+ return ret;
+}
+
+static void vdic_stop(struct vdic_priv *priv)
+{
+ priv->ops->stop(priv);
+ ipu_vdi_disable(priv->vdi);
+ priv->ops->disable(priv);
+
+ vdic_put_ipu_resources(priv);
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int vdic_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vdic_priv *priv = container_of(ctrl->handler,
+ struct vdic_priv, ctrl_hdlr);
+ enum ipu_motion_sel motion;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_DEINTERLACING_MODE:
+ motion = ctrl->val;
+ if (motion != priv->motion) {
+ /* can't change motion control mid-streaming */
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->motion = motion;
+ }
+ break;
+ default:
+ v4l2_err(&priv->sd, "Invalid control\n");
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops vdic_ctrl_ops = {
+ .s_ctrl = vdic_s_ctrl,
+};
+
+static const char * const vdic_ctrl_motion_menu[] = {
+ "No Motion Compensation",
+ "Low Motion",
+ "Medium Motion",
+ "High Motion",
+};
+
+static int vdic_init_controls(struct vdic_priv *priv)
+{
+ struct v4l2_ctrl_handler *hdlr = &priv->ctrl_hdlr;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdlr, 1);
+
+ v4l2_ctrl_new_std_menu_items(hdlr, &vdic_ctrl_ops,
+ V4L2_CID_DEINTERLACING_MODE,
+ HIGH_MOTION, 0, HIGH_MOTION,
+ vdic_ctrl_motion_menu);
+
+ priv->sd.ctrl_handler = hdlr;
+
+ if (hdlr->error) {
+ ret = hdlr->error;
+ goto out_free;
+ }
+
+ v4l2_ctrl_handler_setup(hdlr);
+ return 0;
+
+out_free:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+static int vdic_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *src_sd = NULL;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src || !priv->sink_sd) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ if (priv->csi_direct)
+ src_sd = media_entity_to_v4l2_subdev(priv->src);
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(priv->ipu_dev, "%s: stream %s\n", sd->name,
+ enable ? "ON" : "OFF");
+
+ if (enable)
+ ret = vdic_start(priv);
+ else
+ vdic_stop(priv);
+ if (ret)
+ goto out;
+
+ if (src_sd) {
+ /* start/stop upstream */
+ ret = v4l2_subdev_call(src_sd, video, s_stream, enable);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ if (enable)
+ vdic_stop(priv);
+ goto out;
+ }
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__vdic_get_fmt(struct vdic_priv *priv, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
+ else
+ return &priv->format_mbus[pad];
+}
+
+static int vdic_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ return imx_media_enum_ipu_formats(&code->code, code->index,
+ PIXFMT_SEL_YUV);
+}
+
+static int vdic_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void vdic_try_fmt(struct vdic_priv *priv,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat,
+ const struct imx_media_pixfmt **cc)
+{
+ struct v4l2_mbus_framefmt *infmt;
+
+ *cc = imx_media_find_ipu_format(sdformat->format.code,
+ PIXFMT_SEL_YUV);
+ if (!*cc) {
+ u32 code;
+
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+ *cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ infmt = __vdic_get_fmt(priv, sd_state, priv->active_input_pad,
+ sdformat->which);
+
+ switch (sdformat->pad) {
+ case VDIC_SRC_PAD_DIRECT:
+ sdformat->format = *infmt;
+ /* output is always progressive! */
+ sdformat->format.field = V4L2_FIELD_NONE;
+ break;
+ case VDIC_SINK_PAD_DIRECT:
+ case VDIC_SINK_PAD_IDMAC:
+ v4l_bound_align_image(&sdformat->format.width,
+ MIN_W, MAX_W_VDIC, W_ALIGN,
+ &sdformat->format.height,
+ MIN_H, MAX_H_VDIC, H_ALIGN, S_ALIGN);
+
+ /* input must be interlaced! Choose SEQ_TB if not */
+ if (!V4L2_FIELD_HAS_BOTH(sdformat->format.field))
+ sdformat->format.field = V4L2_FIELD_SEQ_TB;
+ break;
+ }
+
+ imx_media_try_colorimetry(&sdformat->format, true);
+}
+
+static int vdic_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ vdic_try_fmt(priv, sd_state, sdformat, &cc);
+
+ fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+
+ /* propagate format to source pad */
+ if (sdformat->pad == VDIC_SINK_PAD_DIRECT ||
+ sdformat->pad == VDIC_SINK_PAD_IDMAC) {
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_subdev_format format;
+
+ format.pad = VDIC_SRC_PAD_DIRECT;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ vdic_try_fmt(priv, sd_state, &format, &outcc);
+
+ outfmt = __vdic_get_fmt(priv, sd_state, VDIC_SRC_PAD_DIRECT,
+ sdformat->which);
+ *outfmt = format.format;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[VDIC_SRC_PAD_DIRECT] = outcc;
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(priv->ipu_dev, "%s: link setup %s -> %s",
+ sd->name, remote->entity->name, local->entity->name);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SOURCE) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->sink_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->sink_sd = remote_sd;
+ } else {
+ priv->sink_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is a sink pad */
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ priv->src = NULL;
+ goto out;
+ }
+
+ if (local->index == VDIC_SINK_PAD_IDMAC) {
+ struct imx_media_video_dev *vdev = priv->vdev;
+
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!vdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ priv->csi_direct = false;
+ } else {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ /* direct pad must connect to a CSI */
+ if (!(remote_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) ||
+ remote->index != CSI_SRC_PAD_DIRECT) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->csi_direct = true;
+ }
+
+ priv->src = remote->entity;
+ /* record which input pad is now active */
+ priv->active_input_pad = local->index;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->csi_direct && priv->motion != HIGH_MOTION) {
+ v4l2_err(&priv->sd,
+ "direct CSI pipeline requires high motion\n");
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+
+ if (fi->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fi->interval = priv->frame_interval[fi->pad];
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int vdic_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fract *input_fi, *output_fi;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ input_fi = &priv->frame_interval[priv->active_input_pad];
+ output_fi = &priv->frame_interval[VDIC_SRC_PAD_DIRECT];
+
+ switch (fi->pad) {
+ case VDIC_SINK_PAD_DIRECT:
+ case VDIC_SINK_PAD_IDMAC:
+ /* No limits on valid input frame intervals */
+ if (fi->interval.numerator == 0 ||
+ fi->interval.denominator == 0)
+ fi->interval = priv->frame_interval[fi->pad];
+ /* Reset output interval */
+ *output_fi = fi->interval;
+ if (priv->csi_direct)
+ output_fi->denominator *= 2;
+ break;
+ case VDIC_SRC_PAD_DIRECT:
+ /*
+ * frame rate at output pad is double input
+ * rate when using direct CSI->VDIC pipeline.
+ *
+ * TODO: implement VDIC frame skipping
+ */
+ fi->interval = *input_fi;
+ if (priv->csi_direct)
+ fi->interval.denominator *= 2;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->frame_interval[fi->pad] = fi->interval;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_registered(struct v4l2_subdev *sd)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ int i, ret;
+ u32 code;
+
+ for (i = 0; i < VDIC_NUM_PADS; i++) {
+ code = 0;
+ if (i != VDIC_SINK_PAD_IDMAC)
+ imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
+
+ /* set a default mbus format */
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, code,
+ V4L2_FIELD_NONE, &priv->cc[i]);
+ if (ret)
+ return ret;
+
+ /* init default frame interval */
+ priv->frame_interval[i].numerator = 1;
+ priv->frame_interval[i].denominator = 30;
+ if (i == VDIC_SRC_PAD_DIRECT)
+ priv->frame_interval[i].denominator *= 2;
+ }
+
+ priv->active_input_pad = VDIC_SINK_PAD_DIRECT;
+
+ return vdic_init_controls(priv);
+}
+
+static void vdic_unregistered(struct v4l2_subdev *sd)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+}
+
+static const struct v4l2_subdev_pad_ops vdic_pad_ops = {
+ .init_cfg = imx_media_init_cfg,
+ .enum_mbus_code = vdic_enum_mbus_code,
+ .get_fmt = vdic_get_fmt,
+ .set_fmt = vdic_set_fmt,
+ .link_validate = vdic_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops vdic_video_ops = {
+ .g_frame_interval = vdic_g_frame_interval,
+ .s_frame_interval = vdic_s_frame_interval,
+ .s_stream = vdic_s_stream,
+};
+
+static const struct media_entity_operations vdic_entity_ops = {
+ .link_setup = vdic_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_ops vdic_subdev_ops = {
+ .video = &vdic_video_ops,
+ .pad = &vdic_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops vdic_internal_ops = {
+ .registered = vdic_registered,
+ .unregistered = vdic_unregistered,
+};
+
+struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id)
+{
+ struct vdic_priv *priv;
+ int i, ret;
+
+ priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->ipu_dev = ipu_dev;
+ priv->ipu = ipu;
+
+ v4l2_subdev_init(&priv->sd, &vdic_subdev_ops);
+ v4l2_set_subdevdata(&priv->sd, priv);
+ priv->sd.internal_ops = &vdic_internal_ops;
+ priv->sd.entity.ops = &vdic_entity_ops;
+ priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ priv->sd.owner = ipu_dev->driver->owner;
+ priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ priv->sd.grp_id = grp_id;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(ipu));
+
+ mutex_init(&priv->lock);
+
+ for (i = 0; i < VDIC_NUM_PADS; i++)
+ priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+
+ ret = media_entity_pads_init(&priv->sd.entity, VDIC_NUM_PADS,
+ priv->pad);
+ if (ret)
+ goto free;
+
+ ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
+ if (ret)
+ goto free;
+
+ return &priv->sd;
+free:
+ mutex_destroy(&priv->lock);
+ return ERR_PTR(ret);
+}
+
+int imx_media_vdic_unregister(struct v4l2_subdev *sd)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+
+ v4l2_info(sd, "Removing\n");
+
+ v4l2_device_unregister_subdev(sd);
+ mutex_destroy(&priv->lock);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
new file mode 100644
index 000000000..f263fc3ad
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ */
+#ifndef _IMX_MEDIA_H
+#define _IMX_MEDIA_H
+
+#include <linux/platform_device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <video/imx-ipu-v3.h>
+
+#define IMX_MEDIA_DEF_PIX_WIDTH 640
+#define IMX_MEDIA_DEF_PIX_HEIGHT 480
+
+/*
+ * Enumeration of the IPU internal sub-devices
+ */
+enum {
+ IPU_CSI0 = 0,
+ IPU_CSI1,
+ IPU_VDIC,
+ IPU_IC_PRP,
+ IPU_IC_PRPENC,
+ IPU_IC_PRPVF,
+ NUM_IPU_SUBDEVS,
+};
+
+/*
+ * Pad definitions for the subdevs with multiple source or
+ * sink pads
+ */
+
+/* ipu_csi */
+enum {
+ CSI_SINK_PAD = 0,
+ CSI_SRC_PAD_DIRECT,
+ CSI_SRC_PAD_IDMAC,
+ CSI_NUM_PADS,
+};
+
+/* ipu_vdic */
+enum {
+ VDIC_SINK_PAD_DIRECT = 0,
+ VDIC_SINK_PAD_IDMAC,
+ VDIC_SRC_PAD_DIRECT,
+ VDIC_NUM_PADS,
+};
+
+/* ipu_ic_prp */
+enum {
+ PRP_SINK_PAD = 0,
+ PRP_SRC_PAD_PRPENC,
+ PRP_SRC_PAD_PRPVF,
+ PRP_NUM_PADS,
+};
+
+/* ipu_ic_prpencvf */
+enum {
+ PRPENCVF_SINK_PAD = 0,
+ PRPENCVF_SRC_PAD,
+ PRPENCVF_NUM_PADS,
+};
+
+/* How long to wait for EOF interrupts in the buffer-capture subdevs */
+#define IMX_MEDIA_EOF_TIMEOUT 2000
+
+struct imx_media_pixfmt {
+ /* the in-memory FourCC pixel format */
+ u32 fourcc;
+ /*
+ * the set of equivalent media bus codes for the fourcc.
+ * NOTE! codes pointer is NULL for in-memory-only formats.
+ */
+ const u32 *codes;
+ int bpp; /* total bpp */
+ /* cycles per pixel for generic (bayer) formats for the parallel bus */
+ int cycles;
+ enum ipu_color_space cs;
+ bool planar; /* is a planar format */
+ bool bayer; /* is a raw bayer format */
+ bool ipufmt; /* is one of the IPU internal formats */
+};
+
+enum imx_pixfmt_sel {
+ PIXFMT_SEL_YUV = BIT(0), /* select YUV formats */
+ PIXFMT_SEL_RGB = BIT(1), /* select RGB formats */
+ PIXFMT_SEL_BAYER = BIT(2), /* select BAYER formats */
+ PIXFMT_SEL_IPU = BIT(3), /* select IPU-internal formats */
+ PIXFMT_SEL_YUV_RGB = PIXFMT_SEL_YUV | PIXFMT_SEL_RGB,
+ PIXFMT_SEL_ANY = PIXFMT_SEL_YUV | PIXFMT_SEL_RGB | PIXFMT_SEL_BAYER,
+};
+
+struct imx_media_buffer {
+ struct vb2_v4l2_buffer vbuf; /* v4l buffer must be first */
+ struct list_head list;
+};
+
+struct imx_media_video_dev {
+ struct video_device *vfd;
+
+ /* the user format */
+ struct v4l2_pix_format fmt;
+ /* the compose rectangle */
+ struct v4l2_rect compose;
+ const struct imx_media_pixfmt *cc;
+
+ /* links this vdev to master list */
+ struct list_head list;
+};
+
+static inline struct imx_media_buffer *to_imx_media_vb(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ return container_of(vbuf, struct imx_media_buffer, vbuf);
+}
+
+/*
+ * to support control inheritance to video devices, this
+ * retrieves a pad's list_head of video devices that can
+ * be reached from the pad. Note that only the lists in
+ * source pads get populated, sink pads have empty lists.
+ */
+static inline struct list_head *
+to_pad_vdev_list(struct v4l2_subdev *sd, int pad_index)
+{
+ struct list_head *vdev_list = sd->host_priv;
+
+ return vdev_list ? &vdev_list[pad_index] : NULL;
+}
+
+/* an entry in a pad's video device list */
+struct imx_media_pad_vdev {
+ struct imx_media_video_dev *vdev;
+ struct list_head list;
+};
+
+struct imx_media_dev {
+ struct media_device md;
+ struct v4l2_device v4l2_dev;
+
+ /* the pipeline object */
+ struct media_pipeline pipe;
+
+ struct mutex mutex; /* protect elements below */
+
+ /* master video device list */
+ struct list_head vdev_list;
+
+ /* IPUs this media driver control, valid after subdevs bound */
+ struct ipu_soc *ipu[2];
+
+ /* for async subdev registration */
+ struct v4l2_async_notifier notifier;
+
+ /* IC scaler/CSC mem2mem video device */
+ struct imx_media_video_dev *m2m_vdev;
+
+ /* the IPU internal subdev's registered synchronously */
+ struct v4l2_subdev *sync_sd[2][NUM_IPU_SUBDEVS];
+};
+
+/* imx-media-utils.c */
+const struct imx_media_pixfmt *
+imx_media_find_pixel_format(u32 fourcc, enum imx_pixfmt_sel sel);
+int imx_media_enum_pixel_formats(u32 *fourcc, u32 index,
+ enum imx_pixfmt_sel sel, u32 code);
+const struct imx_media_pixfmt *
+imx_media_find_mbus_format(u32 code, enum imx_pixfmt_sel sel);
+int imx_media_enum_mbus_formats(u32 *code, u32 index,
+ enum imx_pixfmt_sel sel);
+
+static inline const struct imx_media_pixfmt *
+imx_media_find_ipu_format(u32 code, enum imx_pixfmt_sel fmt_sel)
+{
+ return imx_media_find_mbus_format(code, fmt_sel | PIXFMT_SEL_IPU);
+}
+
+static inline int imx_media_enum_ipu_formats(u32 *code, u32 index,
+ enum imx_pixfmt_sel fmt_sel)
+{
+ return imx_media_enum_mbus_formats(code, index,
+ fmt_sel | PIXFMT_SEL_IPU);
+}
+
+int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
+ u32 width, u32 height, u32 code, u32 field,
+ const struct imx_media_pixfmt **cc);
+int imx_media_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state);
+void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
+ bool ic_route);
+int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
+ const struct v4l2_mbus_framefmt *mbus,
+ const struct imx_media_pixfmt *cc);
+void imx_media_grp_id_to_sd_name(char *sd_name, int sz,
+ u32 grp_id, int ipu_id);
+struct v4l2_subdev *
+imx_media_find_subdev_by_fwnode(struct imx_media_dev *imxmd,
+ struct fwnode_handle *fwnode);
+struct v4l2_subdev *
+imx_media_find_subdev_by_devname(struct imx_media_dev *imxmd,
+ const char *devname);
+void imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev);
+int imx_media_pipeline_csi2_channel(struct media_entity *start_entity);
+struct media_pad *
+imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
+ enum v4l2_buf_type buftype, bool upstream);
+struct v4l2_subdev *
+imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
+ bool upstream);
+struct video_device *
+imx_media_pipeline_video_device(struct media_entity *start_entity,
+ enum v4l2_buf_type buftype, bool upstream);
+struct fwnode_handle *imx_media_get_pad_fwnode(struct media_pad *pad);
+
+struct imx_media_dma_buf {
+ void *virt;
+ dma_addr_t phys;
+ unsigned long len;
+};
+
+void imx_media_free_dma_buf(struct device *dev,
+ struct imx_media_dma_buf *buf);
+int imx_media_alloc_dma_buf(struct device *dev,
+ struct imx_media_dma_buf *buf,
+ int size);
+
+int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
+ struct media_entity *entity,
+ bool on);
+
+/* imx-media-dev-common.c */
+int imx_media_probe_complete(struct v4l2_async_notifier *notifier);
+struct imx_media_dev *imx_media_dev_init(struct device *dev,
+ const struct media_device_ops *ops);
+int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
+ const struct v4l2_async_notifier_operations *ops);
+
+/* imx-media-fim.c */
+struct imx_media_fim;
+void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp);
+int imx_media_fim_set_stream(struct imx_media_fim *fim,
+ const struct v4l2_fract *frame_interval,
+ bool on);
+int imx_media_fim_add_controls(struct imx_media_fim *fim);
+struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd);
+void imx_media_fim_free(struct imx_media_fim *fim);
+
+/* imx-media-internal-sd.c */
+int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi);
+void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd);
+
+/* imx-media-of.c */
+int imx_media_add_of_subdevs(struct imx_media_dev *dev,
+ struct device_node *np);
+int imx_media_of_add_csi(struct imx_media_dev *imxmd,
+ struct device_node *csi_np);
+
+/* imx-media-vdic.c */
+struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+int imx_media_vdic_unregister(struct v4l2_subdev *sd);
+
+/* imx-ic-common.c */
+struct v4l2_subdev *imx_media_ic_register(struct v4l2_device *v4l2_dev,
+ struct device *ipu_dev,
+ struct ipu_soc *ipu,
+ u32 grp_id);
+int imx_media_ic_unregister(struct v4l2_subdev *sd);
+
+/* imx-media-capture.c */
+struct imx_media_video_dev *
+imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
+ int pad, bool legacy_api);
+void imx_media_capture_device_remove(struct imx_media_video_dev *vdev);
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev,
+ u32 link_flags);
+void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev);
+struct imx_media_buffer *
+imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
+void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
+
+/* imx-media-csc-scaler.c */
+struct imx_media_video_dev *
+imx_media_csc_scaler_device_init(struct imx_media_dev *dev);
+int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev);
+void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev);
+
+/* subdev group ids */
+#define IMX_MEDIA_GRP_ID_CSI2 BIT(8)
+#define IMX_MEDIA_GRP_ID_CSI BIT(9)
+#define IMX_MEDIA_GRP_ID_IPU_CSI_BIT 10
+#define IMX_MEDIA_GRP_ID_IPU_CSI (0x3 << IMX_MEDIA_GRP_ID_IPU_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_IPU_CSI0 BIT(IMX_MEDIA_GRP_ID_IPU_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_IPU_CSI1 (2 << IMX_MEDIA_GRP_ID_IPU_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_IPU_VDIC BIT(12)
+#define IMX_MEDIA_GRP_ID_IPU_IC_PRP BIT(13)
+#define IMX_MEDIA_GRP_ID_IPU_IC_PRPENC BIT(14)
+#define IMX_MEDIA_GRP_ID_IPU_IC_PRPVF BIT(15)
+#define IMX_MEDIA_GRP_ID_CSI_MUX BIT(16)
+
+#endif
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
new file mode 100644
index 000000000..c4cb558a8
--- /dev/null
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -0,0 +1,803 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPI CSI-2 Receiver Subdev for Freescale i.MX6 SOC.
+ *
+ * Copyright (c) 2012-2017 Mentor Graphics Inc.
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include "imx-media.h"
+
+/*
+ * there must be 5 pads: 1 input pad from sensor, and
+ * the 4 virtual channel output pads
+ */
+#define CSI2_SINK_PAD 0
+#define CSI2_NUM_SINK_PADS 1
+#define CSI2_NUM_SRC_PADS 4
+#define CSI2_NUM_PADS 5
+
+/*
+ * The default maximum bit-rate per lane in Mbps, if the
+ * source subdev does not provide V4L2_CID_LINK_FREQ.
+ */
+#define CSI2_DEFAULT_MAX_MBPS 849
+
+struct csi2_dev {
+ struct device *dev;
+ struct v4l2_subdev sd;
+ struct v4l2_async_notifier notifier;
+ struct media_pad pad[CSI2_NUM_PADS];
+ struct clk *dphy_clk;
+ struct clk *pllref_clk;
+ struct clk *pix_clk; /* what is this? */
+ void __iomem *base;
+
+ struct v4l2_subdev *remote;
+ unsigned int remote_pad;
+ unsigned short data_lanes;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ struct v4l2_mbus_framefmt format_mbus;
+
+ int stream_count;
+ struct v4l2_subdev *src_sd;
+ bool sink_linked[CSI2_NUM_SRC_PADS];
+};
+
+#define DEVICE_NAME "imx6-mipi-csi2"
+
+/* Register offsets */
+#define CSI2_VERSION 0x000
+#define CSI2_N_LANES 0x004
+#define CSI2_PHY_SHUTDOWNZ 0x008
+#define CSI2_DPHY_RSTZ 0x00c
+#define CSI2_RESETN 0x010
+#define CSI2_PHY_STATE 0x014
+#define PHY_STOPSTATEDATA_BIT 4
+#define PHY_STOPSTATEDATA(n) BIT(PHY_STOPSTATEDATA_BIT + (n))
+#define PHY_RXCLKACTIVEHS BIT(8)
+#define PHY_RXULPSCLKNOT BIT(9)
+#define PHY_STOPSTATECLK BIT(10)
+#define CSI2_DATA_IDS_1 0x018
+#define CSI2_DATA_IDS_2 0x01c
+#define CSI2_ERR1 0x020
+#define CSI2_ERR2 0x024
+#define CSI2_MSK1 0x028
+#define CSI2_MSK2 0x02c
+#define CSI2_PHY_TST_CTRL0 0x030
+#define PHY_TESTCLR BIT(0)
+#define PHY_TESTCLK BIT(1)
+#define CSI2_PHY_TST_CTRL1 0x034
+#define PHY_TESTEN BIT(16)
+/*
+ * i.MX CSI2IPU Gasket registers follow. The CSI2IPU gasket is
+ * not part of the MIPI CSI-2 core, but its registers fall in the
+ * same register map range.
+ */
+#define CSI2IPU_GASKET 0xf00
+#define CSI2IPU_YUV422_YUYV BIT(2)
+
+static inline struct csi2_dev *sd_to_dev(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csi2_dev, sd);
+}
+
+static inline struct csi2_dev *notifier_to_dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi2_dev, notifier);
+}
+
+/*
+ * The required sequence of MIPI CSI-2 startup as specified in the i.MX6
+ * reference manual is as follows:
+ *
+ * 1. Deassert presetn signal (global reset).
+ * It's not clear what this "global reset" signal is (maybe APB
+ * global reset), but in any case this step would be probably
+ * be carried out during driver load in csi2_probe().
+ *
+ * 2. Configure MIPI Camera Sensor to put all Tx lanes in LP-11 state.
+ * This must be carried out by the MIPI sensor's s_power(ON) subdev
+ * op.
+ *
+ * 3. D-PHY initialization.
+ * 4. CSI2 Controller programming (Set N_LANES, deassert PHY_SHUTDOWNZ,
+ * deassert PHY_RSTZ, deassert CSI2_RESETN).
+ * 5. Read the PHY status register (PHY_STATE) to confirm that all data and
+ * clock lanes of the D-PHY are in LP-11 state.
+ * 6. Configure the MIPI Camera Sensor to start transmitting a clock on the
+ * D-PHY clock lane.
+ * 7. CSI2 Controller programming - Read the PHY status register (PHY_STATE)
+ * to confirm that the D-PHY is receiving a clock on the D-PHY clock lane.
+ *
+ * All steps 3 through 7 are carried out by csi2_s_stream(ON) here. Step
+ * 6 is accomplished by calling the source subdev's s_stream(ON) between
+ * steps 5 and 7.
+ */
+
+static void csi2_enable(struct csi2_dev *csi2, bool enable)
+{
+ if (enable) {
+ writel(0x1, csi2->base + CSI2_PHY_SHUTDOWNZ);
+ writel(0x1, csi2->base + CSI2_DPHY_RSTZ);
+ writel(0x1, csi2->base + CSI2_RESETN);
+ } else {
+ writel(0x0, csi2->base + CSI2_PHY_SHUTDOWNZ);
+ writel(0x0, csi2->base + CSI2_DPHY_RSTZ);
+ writel(0x0, csi2->base + CSI2_RESETN);
+ }
+}
+
+static void csi2_set_lanes(struct csi2_dev *csi2, unsigned int lanes)
+{
+ writel(lanes - 1, csi2->base + CSI2_N_LANES);
+}
+
+static void dw_mipi_csi2_phy_write(struct csi2_dev *csi2,
+ u32 test_code, u32 test_data)
+{
+ /* Clear PHY test interface */
+ writel(PHY_TESTCLR, csi2->base + CSI2_PHY_TST_CTRL0);
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL1);
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Raise test interface strobe signal */
+ writel(PHY_TESTCLK, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Configure address write on falling edge and lower strobe signal */
+ writel(PHY_TESTEN | test_code, csi2->base + CSI2_PHY_TST_CTRL1);
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Configure data write on rising edge and raise strobe signal */
+ writel(test_data, csi2->base + CSI2_PHY_TST_CTRL1);
+ writel(PHY_TESTCLK, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Clear strobe signal */
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
+}
+
+/*
+ * This table is based on the table documented at
+ * https://community.nxp.com/docs/DOC-94312. It assumes
+ * a 27MHz D-PHY pll reference clock.
+ */
+static const struct {
+ u32 max_mbps;
+ u32 hsfreqrange_sel;
+} hsfreq_map[] = {
+ { 90, 0x00}, {100, 0x20}, {110, 0x40}, {125, 0x02},
+ {140, 0x22}, {150, 0x42}, {160, 0x04}, {180, 0x24},
+ {200, 0x44}, {210, 0x06}, {240, 0x26}, {250, 0x46},
+ {270, 0x08}, {300, 0x28}, {330, 0x48}, {360, 0x2a},
+ {400, 0x4a}, {450, 0x0c}, {500, 0x2c}, {550, 0x0e},
+ {600, 0x2e}, {650, 0x10}, {700, 0x30}, {750, 0x12},
+ {800, 0x32}, {850, 0x14}, {900, 0x34}, {950, 0x54},
+ {1000, 0x74},
+};
+
+static int max_mbps_to_hsfreqrange_sel(u32 max_mbps)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hsfreq_map); i++)
+ if (hsfreq_map[i].max_mbps > max_mbps)
+ return hsfreq_map[i].hsfreqrange_sel;
+
+ return -EINVAL;
+}
+
+static int csi2_dphy_init(struct csi2_dev *csi2)
+{
+ struct v4l2_ctrl *ctrl;
+ u32 mbps_per_lane;
+ int sel;
+
+ ctrl = v4l2_ctrl_find(csi2->src_sd->ctrl_handler,
+ V4L2_CID_LINK_FREQ);
+ if (!ctrl)
+ mbps_per_lane = CSI2_DEFAULT_MAX_MBPS;
+ else
+ mbps_per_lane = DIV_ROUND_UP_ULL(2 * ctrl->qmenu_int[ctrl->val],
+ USEC_PER_SEC);
+
+ sel = max_mbps_to_hsfreqrange_sel(mbps_per_lane);
+ if (sel < 0)
+ return sel;
+
+ dw_mipi_csi2_phy_write(csi2, 0x44, sel);
+
+ return 0;
+}
+
+/*
+ * Waits for ultra-low-power state on D-PHY clock lane. This is currently
+ * unused and may not be needed at all, but keep around just in case.
+ */
+static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
+{
+ u32 reg;
+ int ret;
+
+ /* wait for ULP on clock lane */
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ !(reg & PHY_RXULPSCLKNOT), 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "ULP timeout, phy_state = 0x%08x\n", reg);
+ return ret;
+ }
+
+ /* wait until no errors on bus */
+ ret = readl_poll_timeout(csi2->base + CSI2_ERR1, reg,
+ reg == 0x0, 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "stable bus timeout, err1 = 0x%08x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Waits for low-power LP-11 state on data and clock lanes. */
+static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2, unsigned int lanes)
+{
+ u32 mask, reg;
+ int ret;
+
+ mask = PHY_STOPSTATECLK | (((1 << lanes) - 1) << PHY_STOPSTATEDATA_BIT);
+
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ (reg & mask) == mask, 0, 500000);
+ if (ret) {
+ v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
+ v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
+ }
+}
+
+/* Wait for active clock on the clock lane. */
+static int csi2_dphy_wait_clock_lane(struct csi2_dev *csi2)
+{
+ u32 reg;
+ int ret;
+
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ (reg & PHY_RXCLKACTIVEHS), 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "clock lane timeout, phy_state = 0x%08x\n",
+ reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Setup the i.MX CSI2IPU Gasket */
+static void csi2ipu_gasket_init(struct csi2_dev *csi2)
+{
+ u32 reg = 0;
+
+ switch (csi2->format_mbus.code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ reg = CSI2IPU_YUV422_YUYV;
+ break;
+ default:
+ break;
+ }
+
+ writel(reg, csi2->base + CSI2IPU_GASKET);
+}
+
+static int csi2_get_active_lanes(struct csi2_dev *csi2, unsigned int *lanes)
+{
+ struct v4l2_mbus_config mbus_config = { 0 };
+ int ret;
+
+ *lanes = csi2->data_lanes;
+
+ ret = v4l2_subdev_call(csi2->remote, pad, get_mbus_config,
+ csi2->remote_pad, &mbus_config);
+ if (ret == -ENOIOCTLCMD) {
+ dev_dbg(csi2->dev, "No remote mbus configuration available\n");
+ return 0;
+ }
+
+ if (ret) {
+ dev_err(csi2->dev, "Failed to get remote mbus configuration\n");
+ return ret;
+ }
+
+ if (mbus_config.type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(csi2->dev, "Unsupported media bus type %u\n",
+ mbus_config.type);
+ return -EINVAL;
+ }
+
+ if (mbus_config.bus.mipi_csi2.num_data_lanes > csi2->data_lanes) {
+ dev_err(csi2->dev,
+ "Unsupported mbus config: too many data lanes %u\n",
+ mbus_config.bus.mipi_csi2.num_data_lanes);
+ return -EINVAL;
+ }
+
+ *lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
+
+ return 0;
+}
+
+static int csi2_start(struct csi2_dev *csi2)
+{
+ unsigned int lanes;
+ int ret;
+
+ ret = clk_prepare_enable(csi2->pix_clk);
+ if (ret)
+ return ret;
+
+ /* setup the gasket */
+ csi2ipu_gasket_init(csi2);
+
+ /* Step 3 */
+ ret = csi2_dphy_init(csi2);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = csi2_get_active_lanes(csi2, &lanes);
+ if (ret)
+ goto err_disable_clk;
+
+ /* Step 4 */
+ csi2_set_lanes(csi2, lanes);
+ csi2_enable(csi2, true);
+
+ /* Step 5 */
+ ret = v4l2_subdev_call(csi2->src_sd, video, pre_streamon,
+ V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP);
+ if (ret && ret != -ENOIOCTLCMD)
+ goto err_assert_reset;
+ csi2_dphy_wait_stopstate(csi2, lanes);
+
+ /* Step 6 */
+ ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret)
+ goto err_stop_lp11;
+
+ /* Step 7 */
+ ret = csi2_dphy_wait_clock_lane(csi2);
+ if (ret)
+ goto err_stop_upstream;
+
+ return 0;
+
+err_stop_upstream:
+ v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
+err_stop_lp11:
+ v4l2_subdev_call(csi2->src_sd, video, post_streamoff);
+err_assert_reset:
+ csi2_enable(csi2, false);
+err_disable_clk:
+ clk_disable_unprepare(csi2->pix_clk);
+ return ret;
+}
+
+static void csi2_stop(struct csi2_dev *csi2)
+{
+ /* stop upstream */
+ v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
+ v4l2_subdev_call(csi2->src_sd, video, post_streamoff);
+
+ csi2_enable(csi2, false);
+ clk_disable_unprepare(csi2->pix_clk);
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int csi2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ int i, ret = 0;
+
+ mutex_lock(&csi2->lock);
+
+ if (!csi2->src_sd) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ for (i = 0; i < CSI2_NUM_SRC_PADS; i++) {
+ if (csi2->sink_linked[i])
+ break;
+ }
+ if (i >= CSI2_NUM_SRC_PADS) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (csi2->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(csi2->dev, "stream %s\n", enable ? "ON" : "OFF");
+ if (enable)
+ ret = csi2_start(csi2);
+ else
+ csi2_stop(csi2);
+ if (ret)
+ goto out;
+
+update_count:
+ csi2->stream_count += enable ? 1 : -1;
+ if (csi2->stream_count < 0)
+ csi2->stream_count = 0;
+out:
+ mutex_unlock(&csi2->lock);
+ return ret;
+}
+
+static int csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(csi2->dev, "link setup %s -> %s", remote->entity->name,
+ local->entity->name);
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ mutex_lock(&csi2->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SOURCE) {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->sink_linked[local->index - 1]) {
+ ret = -EBUSY;
+ goto out;
+ }
+ csi2->sink_linked[local->index - 1] = true;
+ } else {
+ csi2->sink_linked[local->index - 1] = false;
+ }
+ } else {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ csi2->src_sd = remote_sd;
+ } else {
+ csi2->src_sd = NULL;
+ }
+ }
+
+out:
+ mutex_unlock(&csi2->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__csi2_get_fmt(struct csi2_dev *csi2, struct v4l2_subdev_state *sd_state,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csi2->sd, sd_state, pad);
+ else
+ return &csi2->format_mbus;
+}
+
+static int csi2_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ mutex_lock(&csi2->lock);
+
+ fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
+
+ sdformat->format = *fmt;
+
+ mutex_unlock(&csi2->lock);
+
+ return 0;
+}
+
+static int csi2_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= CSI2_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&csi2->lock);
+
+ if (csi2->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Output pads mirror active input pad, no limits on input pads */
+ if (sdformat->pad != CSI2_SINK_PAD)
+ sdformat->format = csi2->format_mbus;
+
+ fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
+
+ *fmt = sdformat->format;
+out:
+ mutex_unlock(&csi2->lock);
+ return ret;
+}
+
+static int csi2_registered(struct v4l2_subdev *sd)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+
+ /* set a default mbus format */
+ return imx_media_init_mbus_fmt(&csi2->format_mbus,
+ IMX_MEDIA_DEF_PIX_WIDTH,
+ IMX_MEDIA_DEF_PIX_HEIGHT, 0,
+ V4L2_FIELD_NONE, NULL);
+}
+
+static const struct media_entity_operations csi2_entity_ops = {
+ .link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
+};
+
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+ .s_stream = csi2_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .init_cfg = imx_media_init_cfg,
+ .get_fmt = csi2_get_fmt,
+ .set_fmt = csi2_set_fmt,
+};
+
+static const struct v4l2_subdev_ops csi2_subdev_ops = {
+ .video = &csi2_video_ops,
+ .pad = &csi2_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+ .registered = csi2_registered,
+};
+
+static int csi2_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct csi2_dev *csi2 = notifier_to_dev(notifier);
+ struct media_pad *sink = &csi2->sd.entity.pads[CSI2_SINK_PAD];
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&sd->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(csi2->dev, "Failed to find pad for %s\n", sd->name);
+ return pad;
+ }
+
+ csi2->remote = sd;
+ csi2->remote_pad = pad;
+
+ dev_dbg(csi2->dev, "Bound %s pad: %d\n", sd->name, pad);
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, 0);
+}
+
+static void csi2_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct csi2_dev *csi2 = notifier_to_dev(notifier);
+
+ csi2->remote = NULL;
+}
+
+static const struct v4l2_async_notifier_operations csi2_notify_ops = {
+ .bound = csi2_notify_bound,
+ .unbind = csi2_notify_unbind,
+};
+
+static int csi2_async_register(struct csi2_dev *csi2)
+{
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_subdev *asd;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_nf_init(&csi2->notifier);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi2->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ csi2->data_lanes = vep.bus.mipi_csi2.num_data_lanes;
+
+ dev_dbg(csi2->dev, "data lanes: %d\n", vep.bus.mipi_csi2.num_data_lanes);
+ dev_dbg(csi2->dev, "flags: 0x%08x\n", vep.bus.mipi_csi2.flags);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2->notifier, ep,
+ struct v4l2_async_subdev);
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+
+ csi2->notifier.ops = &csi2_notify_ops;
+
+ ret = v4l2_async_subdev_nf_register(&csi2->sd, &csi2->notifier);
+ if (ret)
+ return ret;
+
+ return v4l2_async_register_subdev(&csi2->sd);
+
+err_parse:
+ fwnode_handle_put(ep);
+ return ret;
+}
+
+static int csi2_probe(struct platform_device *pdev)
+{
+ struct csi2_dev *csi2;
+ struct resource *res;
+ int i, ret;
+
+ csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
+ if (!csi2)
+ return -ENOMEM;
+
+ csi2->dev = &pdev->dev;
+
+ v4l2_subdev_init(&csi2->sd, &csi2_subdev_ops);
+ v4l2_set_subdevdata(&csi2->sd, &pdev->dev);
+ csi2->sd.internal_ops = &csi2_internal_ops;
+ csi2->sd.entity.ops = &csi2_entity_ops;
+ csi2->sd.dev = &pdev->dev;
+ csi2->sd.owner = THIS_MODULE;
+ csi2->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strscpy(csi2->sd.name, DEVICE_NAME, sizeof(csi2->sd.name));
+ csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi2->sd.grp_id = IMX_MEDIA_GRP_ID_CSI2;
+
+ for (i = 0; i < CSI2_NUM_PADS; i++) {
+ csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&csi2->sd.entity, CSI2_NUM_PADS,
+ csi2->pad);
+ if (ret)
+ return ret;
+
+ csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref");
+ if (IS_ERR(csi2->pllref_clk)) {
+ v4l2_err(&csi2->sd, "failed to get pll reference clock\n");
+ return PTR_ERR(csi2->pllref_clk);
+ }
+
+ csi2->dphy_clk = devm_clk_get(&pdev->dev, "dphy");
+ if (IS_ERR(csi2->dphy_clk)) {
+ v4l2_err(&csi2->sd, "failed to get dphy clock\n");
+ return PTR_ERR(csi2->dphy_clk);
+ }
+
+ csi2->pix_clk = devm_clk_get(&pdev->dev, "pix");
+ if (IS_ERR(csi2->pix_clk)) {
+ v4l2_err(&csi2->sd, "failed to get pixel clock\n");
+ return PTR_ERR(csi2->pix_clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ v4l2_err(&csi2->sd, "failed to get platform resources\n");
+ return -ENODEV;
+ }
+
+ csi2->base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
+ if (!csi2->base)
+ return -ENOMEM;
+
+ mutex_init(&csi2->lock);
+
+ ret = clk_prepare_enable(csi2->pllref_clk);
+ if (ret) {
+ v4l2_err(&csi2->sd, "failed to enable pllref_clk\n");
+ goto rmmutex;
+ }
+
+ ret = clk_prepare_enable(csi2->dphy_clk);
+ if (ret) {
+ v4l2_err(&csi2->sd, "failed to enable dphy_clk\n");
+ goto pllref_off;
+ }
+
+ platform_set_drvdata(pdev, &csi2->sd);
+
+ ret = csi2_async_register(csi2);
+ if (ret)
+ goto clean_notifier;
+
+ return 0;
+
+clean_notifier:
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
+ clk_disable_unprepare(csi2->dphy_clk);
+pllref_off:
+ clk_disable_unprepare(csi2->pllref_clk);
+rmmutex:
+ mutex_destroy(&csi2->lock);
+ return ret;
+}
+
+static int csi2_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
+ v4l2_async_unregister_subdev(sd);
+ clk_disable_unprepare(csi2->dphy_clk);
+ clk_disable_unprepare(csi2->pllref_clk);
+ mutex_destroy(&csi2->lock);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct of_device_id csi2_dt_ids[] = {
+ { .compatible = "fsl,imx6-mipi-csi2", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, csi2_dt_ids);
+
+static struct platform_driver csi2_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = csi2_dt_ids,
+ },
+ .probe = csi2_probe,
+ .remove = csi2_remove,
+};
+
+module_platform_driver(csi2_driver);
+
+MODULE_DESCRIPTION("i.MX5/6 MIPI CSI-2 Receiver driver");
+MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
new file mode 100644
index 000000000..5f6376c32
--- /dev/null
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -0,0 +1,2310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Capture CSI Subdev for Freescale i.MX6UL/L / i.MX7 SOC
+ *
+ * Copyright (c) 2019 Linaro Ltd
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gcd.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define IMX7_CSI_PAD_SINK 0
+#define IMX7_CSI_PAD_SRC 1
+#define IMX7_CSI_PADS_NUM 2
+
+/* csi control reg 1 */
+#define BIT_SWAP16_EN BIT(31)
+#define BIT_EXT_VSYNC BIT(30)
+#define BIT_EOF_INT_EN BIT(29)
+#define BIT_PRP_IF_EN BIT(28)
+#define BIT_CCIR_MODE BIT(27)
+#define BIT_COF_INT_EN BIT(26)
+#define BIT_SF_OR_INTEN BIT(25)
+#define BIT_RF_OR_INTEN BIT(24)
+#define BIT_SFF_DMA_DONE_INTEN BIT(22)
+#define BIT_STATFF_INTEN BIT(21)
+#define BIT_FB2_DMA_DONE_INTEN BIT(20)
+#define BIT_FB1_DMA_DONE_INTEN BIT(19)
+#define BIT_RXFF_INTEN BIT(18)
+#define BIT_SOF_POL BIT(17)
+#define BIT_SOF_INTEN BIT(16)
+#define BIT_MCLKDIV(n) ((n) << 12)
+#define BIT_MCLKDIV_MASK (0xf << 12)
+#define BIT_HSYNC_POL BIT(11)
+#define BIT_CCIR_EN BIT(10)
+#define BIT_MCLKEN BIT(9)
+#define BIT_FCC BIT(8)
+#define BIT_PACK_DIR BIT(7)
+#define BIT_CLR_STATFIFO BIT(6)
+#define BIT_CLR_RXFIFO BIT(5)
+#define BIT_GCLK_MODE BIT(4)
+#define BIT_INV_DATA BIT(3)
+#define BIT_INV_PCLK BIT(2)
+#define BIT_REDGE BIT(1)
+#define BIT_PIXEL_BIT BIT(0)
+
+/* control reg 2 */
+#define BIT_DMA_BURST_TYPE_RFF_INCR4 (1 << 30)
+#define BIT_DMA_BURST_TYPE_RFF_INCR8 (2 << 30)
+#define BIT_DMA_BURST_TYPE_RFF_INCR16 (3 << 30)
+#define BIT_DMA_BURST_TYPE_RFF_MASK (3 << 30)
+
+/* control reg 3 */
+#define BIT_FRMCNT(n) ((n) << 16)
+#define BIT_FRMCNT_MASK (0xffff << 16)
+#define BIT_FRMCNT_RST BIT(15)
+#define BIT_DMA_REFLASH_RFF BIT(14)
+#define BIT_DMA_REFLASH_SFF BIT(13)
+#define BIT_DMA_REQ_EN_RFF BIT(12)
+#define BIT_DMA_REQ_EN_SFF BIT(11)
+#define BIT_STATFF_LEVEL(n) ((n) << 8)
+#define BIT_STATFF_LEVEL_MASK (0x7 << 8)
+#define BIT_HRESP_ERR_EN BIT(7)
+#define BIT_RXFF_LEVEL(n) ((n) << 4)
+#define BIT_RXFF_LEVEL_MASK (0x7 << 4)
+#define BIT_TWO_8BIT_SENSOR BIT(3)
+#define BIT_ZERO_PACK_EN BIT(2)
+#define BIT_ECC_INT_EN BIT(1)
+#define BIT_ECC_AUTO_EN BIT(0)
+
+/* csi status reg */
+#define BIT_ADDR_CH_ERR_INT BIT(28)
+#define BIT_FIELD0_INT BIT(27)
+#define BIT_FIELD1_INT BIT(26)
+#define BIT_SFF_OR_INT BIT(25)
+#define BIT_RFF_OR_INT BIT(24)
+#define BIT_DMA_TSF_DONE_SFF BIT(22)
+#define BIT_STATFF_INT BIT(21)
+#define BIT_DMA_TSF_DONE_FB2 BIT(20)
+#define BIT_DMA_TSF_DONE_FB1 BIT(19)
+#define BIT_RXFF_INT BIT(18)
+#define BIT_EOF_INT BIT(17)
+#define BIT_SOF_INT BIT(16)
+#define BIT_F2_INT BIT(15)
+#define BIT_F1_INT BIT(14)
+#define BIT_COF_INT BIT(13)
+#define BIT_HRESP_ERR_INT BIT(7)
+#define BIT_ECC_INT BIT(1)
+#define BIT_DRDY BIT(0)
+
+/* csi image parameter reg */
+#define BIT_IMAGE_WIDTH(n) ((n) << 16)
+#define BIT_IMAGE_HEIGHT(n) (n)
+
+/* csi control reg 18 */
+#define BIT_CSI_HW_ENABLE BIT(31)
+#define BIT_MIPI_DATA_FORMAT_RAW8 (0x2a << 25)
+#define BIT_MIPI_DATA_FORMAT_RAW10 (0x2b << 25)
+#define BIT_MIPI_DATA_FORMAT_RAW12 (0x2c << 25)
+#define BIT_MIPI_DATA_FORMAT_RAW14 (0x2d << 25)
+#define BIT_MIPI_DATA_FORMAT_YUV422_8B (0x1e << 25)
+#define BIT_MIPI_DATA_FORMAT_MASK (0x3f << 25)
+#define BIT_DATA_FROM_MIPI BIT(22)
+#define BIT_MIPI_YU_SWAP BIT(21)
+#define BIT_MIPI_DOUBLE_CMPNT BIT(20)
+#define BIT_MASK_OPTION_FIRST_FRAME (0 << 18)
+#define BIT_MASK_OPTION_CSI_EN (1 << 18)
+#define BIT_MASK_OPTION_SECOND_FRAME (2 << 18)
+#define BIT_MASK_OPTION_ON_DATA (3 << 18)
+#define BIT_BASEADDR_CHG_ERR_EN BIT(9)
+#define BIT_BASEADDR_SWITCH_SEL BIT(5)
+#define BIT_BASEADDR_SWITCH_EN BIT(4)
+#define BIT_PARALLEL24_EN BIT(3)
+#define BIT_DEINTERLACE_EN BIT(2)
+#define BIT_TVDECODER_IN_EN BIT(1)
+#define BIT_NTSC_EN BIT(0)
+
+#define CSI_MCLK_VF 1
+#define CSI_MCLK_ENC 2
+#define CSI_MCLK_RAW 4
+#define CSI_MCLK_I2C 8
+
+#define CSI_CSICR1 0x00
+#define CSI_CSICR2 0x04
+#define CSI_CSICR3 0x08
+#define CSI_STATFIFO 0x0c
+#define CSI_CSIRXFIFO 0x10
+#define CSI_CSIRXCNT 0x14
+#define CSI_CSISR 0x18
+
+#define CSI_CSIDBG 0x1c
+#define CSI_CSIDMASA_STATFIFO 0x20
+#define CSI_CSIDMATS_STATFIFO 0x24
+#define CSI_CSIDMASA_FB1 0x28
+#define CSI_CSIDMASA_FB2 0x2c
+#define CSI_CSIFBUF_PARA 0x30
+#define CSI_CSIIMAG_PARA 0x34
+
+#define CSI_CSICR18 0x48
+#define CSI_CSICR19 0x4c
+
+#define IMX7_CSI_VIDEO_NAME "imx-capture"
+/* In bytes, per queue */
+#define IMX7_CSI_VIDEO_MEM_LIMIT SZ_512M
+#define IMX7_CSI_VIDEO_EOF_TIMEOUT 2000
+
+#define IMX7_CSI_DEF_MBUS_CODE MEDIA_BUS_FMT_UYVY8_2X8
+#define IMX7_CSI_DEF_PIX_FORMAT V4L2_PIX_FMT_UYVY
+#define IMX7_CSI_DEF_PIX_WIDTH 640
+#define IMX7_CSI_DEF_PIX_HEIGHT 480
+
+enum imx_csi_model {
+ IMX7_CSI_IMX7 = 0,
+ IMX7_CSI_IMX8MQ,
+};
+
+struct imx7_csi_pixfmt {
+ /* the in-memory FourCC pixel format */
+ u32 fourcc;
+ /*
+ * the set of equivalent media bus codes for the fourcc.
+ * NOTE! codes pointer is NULL for in-memory-only formats.
+ */
+ const u32 *codes;
+ int bpp; /* total bpp */
+ bool yuv;
+};
+
+struct imx7_csi_vb2_buffer {
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head list;
+};
+
+static inline struct imx7_csi_vb2_buffer *
+to_imx7_csi_vb2_buffer(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ return container_of(vbuf, struct imx7_csi_vb2_buffer, vbuf);
+}
+
+struct imx7_csi_dma_buf {
+ void *virt;
+ dma_addr_t phys;
+ unsigned long len;
+};
+
+struct imx7_csi {
+ struct device *dev;
+
+ /* Resources and locks */
+ void __iomem *regbase;
+ int irq;
+ struct clk *mclk;
+
+ struct mutex lock; /* Protects is_streaming, format_mbus, cc */
+ spinlock_t irqlock; /* Protects last_eof */
+
+ /* Media and V4L2 device */
+ struct media_device mdev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_async_notifier notifier;
+ struct media_pipeline pipe;
+
+ struct v4l2_subdev *src_sd;
+ bool is_csi2;
+
+ /* V4L2 subdev */
+ struct v4l2_subdev sd;
+ struct media_pad pad[IMX7_CSI_PADS_NUM];
+
+ struct v4l2_mbus_framefmt format_mbus[IMX7_CSI_PADS_NUM];
+ const struct imx7_csi_pixfmt *cc[IMX7_CSI_PADS_NUM];
+
+ /* Video device */
+ struct video_device *vdev; /* Video device */
+ struct media_pad vdev_pad; /* Video device pad */
+
+ struct v4l2_pix_format vdev_fmt; /* The user format */
+ const struct imx7_csi_pixfmt *vdev_cc;
+ struct v4l2_rect vdev_compose; /* The compose rectangle */
+
+ struct mutex vdev_mutex; /* Protect vdev operations */
+
+ struct vb2_queue q; /* The videobuf2 queue */
+ struct list_head ready_q; /* List of queued buffers */
+ spinlock_t q_lock; /* Protect ready_q */
+
+ /* Buffers and streaming state */
+ struct imx7_csi_vb2_buffer *active_vb2_buf[2];
+ struct imx7_csi_dma_buf underrun_buf;
+
+ bool is_streaming;
+ int buf_num;
+ u32 frame_sequence;
+
+ bool last_eof;
+ struct completion last_eof_completion;
+
+ enum imx_csi_model model;
+};
+
+static struct imx7_csi *
+imx7_csi_notifier_to_dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct imx7_csi, notifier);
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware Configuration
+ */
+
+static u32 imx7_csi_reg_read(struct imx7_csi *csi, unsigned int offset)
+{
+ return readl(csi->regbase + offset);
+}
+
+static void imx7_csi_reg_write(struct imx7_csi *csi, unsigned int value,
+ unsigned int offset)
+{
+ writel(value, csi->regbase + offset);
+}
+
+static u32 imx7_csi_irq_clear(struct imx7_csi *csi)
+{
+ u32 isr;
+
+ isr = imx7_csi_reg_read(csi, CSI_CSISR);
+ imx7_csi_reg_write(csi, isr, CSI_CSISR);
+
+ return isr;
+}
+
+static void imx7_csi_init_default(struct imx7_csi *csi)
+{
+ imx7_csi_reg_write(csi, BIT_SOF_POL | BIT_REDGE | BIT_GCLK_MODE |
+ BIT_HSYNC_POL | BIT_FCC | BIT_MCLKDIV(1) |
+ BIT_MCLKEN, CSI_CSICR1);
+ imx7_csi_reg_write(csi, 0, CSI_CSICR2);
+ imx7_csi_reg_write(csi, BIT_FRMCNT_RST, CSI_CSICR3);
+
+ imx7_csi_reg_write(csi, BIT_IMAGE_WIDTH(IMX7_CSI_DEF_PIX_WIDTH) |
+ BIT_IMAGE_HEIGHT(IMX7_CSI_DEF_PIX_HEIGHT),
+ CSI_CSIIMAG_PARA);
+
+ imx7_csi_reg_write(csi, BIT_DMA_REFLASH_RFF, CSI_CSICR3);
+}
+
+static void imx7_csi_hw_enable_irq(struct imx7_csi *csi)
+{
+ u32 cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+
+ cr1 |= BIT_RFF_OR_INT;
+ cr1 |= BIT_FB1_DMA_DONE_INTEN;
+ cr1 |= BIT_FB2_DMA_DONE_INTEN;
+
+ imx7_csi_reg_write(csi, cr1, CSI_CSICR1);
+}
+
+static void imx7_csi_hw_disable_irq(struct imx7_csi *csi)
+{
+ u32 cr1 = imx7_csi_reg_read(csi, CSI_CSICR1);
+
+ cr1 &= ~BIT_RFF_OR_INT;
+ cr1 &= ~BIT_FB1_DMA_DONE_INTEN;
+ cr1 &= ~BIT_FB2_DMA_DONE_INTEN;
+
+ imx7_csi_reg_write(csi, cr1, CSI_CSICR1);
+}
+
+static void imx7_csi_hw_enable(struct imx7_csi *csi)
+{
+ u32 cr = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ cr |= BIT_CSI_HW_ENABLE;
+
+ imx7_csi_reg_write(csi, cr, CSI_CSICR18);
+}
+
+static void imx7_csi_hw_disable(struct imx7_csi *csi)
+{
+ u32 cr = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ cr &= ~BIT_CSI_HW_ENABLE;
+
+ imx7_csi_reg_write(csi, cr, CSI_CSICR18);
+}
+
+static void imx7_csi_dma_reflash(struct imx7_csi *csi)
+{
+ u32 cr3;
+
+ cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+ cr3 |= BIT_DMA_REFLASH_RFF;
+ imx7_csi_reg_write(csi, cr3, CSI_CSICR3);
+}
+
+static void imx7_csi_rx_fifo_clear(struct imx7_csi *csi)
+{
+ u32 cr1 = imx7_csi_reg_read(csi, CSI_CSICR1) & ~BIT_FCC;
+
+ imx7_csi_reg_write(csi, cr1, CSI_CSICR1);
+ imx7_csi_reg_write(csi, cr1 | BIT_CLR_RXFIFO, CSI_CSICR1);
+ imx7_csi_reg_write(csi, cr1 | BIT_FCC, CSI_CSICR1);
+}
+
+static void imx7_csi_dmareq_rff_enable(struct imx7_csi *csi)
+{
+ u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+
+ cr3 |= BIT_DMA_REQ_EN_RFF;
+ cr3 |= BIT_HRESP_ERR_EN;
+ cr3 &= ~BIT_RXFF_LEVEL_MASK;
+ cr3 |= BIT_RXFF_LEVEL(2);
+
+ imx7_csi_reg_write(csi, cr3, CSI_CSICR3);
+}
+
+static void imx7_csi_dmareq_rff_disable(struct imx7_csi *csi)
+{
+ u32 cr3 = imx7_csi_reg_read(csi, CSI_CSICR3);
+
+ cr3 &= ~BIT_DMA_REQ_EN_RFF;
+ cr3 &= ~BIT_HRESP_ERR_EN;
+ imx7_csi_reg_write(csi, cr3, CSI_CSICR3);
+}
+
+static void imx7_csi_update_buf(struct imx7_csi *csi, dma_addr_t phys,
+ int buf_num)
+{
+ if (buf_num == 1)
+ imx7_csi_reg_write(csi, phys, CSI_CSIDMASA_FB2);
+ else
+ imx7_csi_reg_write(csi, phys, CSI_CSIDMASA_FB1);
+}
+
+static struct imx7_csi_vb2_buffer *imx7_csi_video_next_buf(struct imx7_csi *csi);
+
+static void imx7_csi_setup_vb2_buf(struct imx7_csi *csi)
+{
+ struct imx7_csi_vb2_buffer *buf;
+ struct vb2_buffer *vb2_buf;
+ dma_addr_t phys[2];
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ buf = imx7_csi_video_next_buf(csi);
+ if (buf) {
+ csi->active_vb2_buf[i] = buf;
+ vb2_buf = &buf->vbuf.vb2_buf;
+ phys[i] = vb2_dma_contig_plane_dma_addr(vb2_buf, 0);
+ } else {
+ csi->active_vb2_buf[i] = NULL;
+ phys[i] = csi->underrun_buf.phys;
+ }
+
+ imx7_csi_update_buf(csi, phys[i], i);
+ }
+}
+
+static void imx7_csi_dma_unsetup_vb2_buf(struct imx7_csi *csi,
+ enum vb2_buffer_state return_status)
+{
+ struct imx7_csi_vb2_buffer *buf;
+ int i;
+
+ /* return any remaining active frames with return_status */
+ for (i = 0; i < 2; i++) {
+ buf = csi->active_vb2_buf[i];
+ if (buf) {
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, return_status);
+ csi->active_vb2_buf[i] = NULL;
+ }
+ }
+}
+
+static void imx7_csi_free_dma_buf(struct imx7_csi *csi,
+ struct imx7_csi_dma_buf *buf)
+{
+ if (buf->virt)
+ dma_free_coherent(csi->dev, buf->len, buf->virt, buf->phys);
+
+ buf->virt = NULL;
+ buf->phys = 0;
+}
+
+static int imx7_csi_alloc_dma_buf(struct imx7_csi *csi,
+ struct imx7_csi_dma_buf *buf, int size)
+{
+ imx7_csi_free_dma_buf(csi, buf);
+
+ buf->len = PAGE_ALIGN(size);
+ buf->virt = dma_alloc_coherent(csi->dev, buf->len, &buf->phys,
+ GFP_DMA | GFP_KERNEL);
+ if (!buf->virt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int imx7_csi_dma_setup(struct imx7_csi *csi)
+{
+ int ret;
+
+ ret = imx7_csi_alloc_dma_buf(csi, &csi->underrun_buf,
+ csi->vdev_fmt.sizeimage);
+ if (ret < 0) {
+ v4l2_warn(&csi->sd, "consider increasing the CMA area\n");
+ return ret;
+ }
+
+ csi->frame_sequence = 0;
+ csi->last_eof = false;
+ init_completion(&csi->last_eof_completion);
+
+ imx7_csi_setup_vb2_buf(csi);
+
+ return 0;
+}
+
+static void imx7_csi_dma_cleanup(struct imx7_csi *csi,
+ enum vb2_buffer_state return_status)
+{
+ imx7_csi_dma_unsetup_vb2_buf(csi, return_status);
+ imx7_csi_free_dma_buf(csi, &csi->underrun_buf);
+}
+
+static void imx7_csi_dma_stop(struct imx7_csi *csi)
+{
+ unsigned long timeout_jiffies;
+ unsigned long flags;
+ int ret;
+
+ /* mark next EOF interrupt as the last before stream off */
+ spin_lock_irqsave(&csi->irqlock, flags);
+ csi->last_eof = true;
+ spin_unlock_irqrestore(&csi->irqlock, flags);
+
+ /*
+ * and then wait for interrupt handler to mark completion.
+ */
+ timeout_jiffies = msecs_to_jiffies(IMX7_CSI_VIDEO_EOF_TIMEOUT);
+ ret = wait_for_completion_timeout(&csi->last_eof_completion,
+ timeout_jiffies);
+ if (ret == 0)
+ v4l2_warn(&csi->sd, "wait last EOF timeout\n");
+
+ imx7_csi_hw_disable_irq(csi);
+}
+
+static void imx7_csi_configure(struct imx7_csi *csi)
+{
+ struct v4l2_pix_format *out_pix = &csi->vdev_fmt;
+ int width = out_pix->width;
+ u32 stride = 0;
+ u32 cr3 = BIT_FRMCNT_RST;
+ u32 cr1, cr18;
+
+ cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ cr18 &= ~(BIT_CSI_HW_ENABLE | BIT_MIPI_DATA_FORMAT_MASK |
+ BIT_DATA_FROM_MIPI | BIT_MIPI_DOUBLE_CMPNT |
+ BIT_BASEADDR_CHG_ERR_EN | BIT_BASEADDR_SWITCH_SEL |
+ BIT_BASEADDR_SWITCH_EN | BIT_DEINTERLACE_EN);
+
+ if (out_pix->field == V4L2_FIELD_INTERLACED) {
+ cr18 |= BIT_DEINTERLACE_EN;
+ stride = out_pix->width;
+ }
+
+ if (!csi->is_csi2) {
+ cr1 = BIT_SOF_POL | BIT_REDGE | BIT_GCLK_MODE | BIT_HSYNC_POL
+ | BIT_FCC | BIT_MCLKDIV(1) | BIT_MCLKEN;
+
+ cr18 |= BIT_BASEADDR_SWITCH_EN | BIT_BASEADDR_SWITCH_SEL |
+ BIT_BASEADDR_CHG_ERR_EN;
+
+ if (out_pix->pixelformat == V4L2_PIX_FMT_UYVY ||
+ out_pix->pixelformat == V4L2_PIX_FMT_YUYV)
+ width *= 2;
+ } else {
+ cr1 = BIT_SOF_POL | BIT_REDGE | BIT_HSYNC_POL | BIT_FCC
+ | BIT_MCLKDIV(1) | BIT_MCLKEN;
+
+ cr18 |= BIT_DATA_FROM_MIPI;
+
+ switch (csi->format_mbus[IMX7_CSI_PAD_SINK].code) {
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW8;
+ break;
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ cr3 |= BIT_TWO_8BIT_SENSOR;
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW10;
+ break;
+ case MEDIA_BUS_FMT_Y12_1X12:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ cr3 |= BIT_TWO_8BIT_SENSOR;
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW12;
+ break;
+ case MEDIA_BUS_FMT_Y14_1X14:
+ case MEDIA_BUS_FMT_SBGGR14_1X14:
+ case MEDIA_BUS_FMT_SGBRG14_1X14:
+ case MEDIA_BUS_FMT_SGRBG14_1X14:
+ case MEDIA_BUS_FMT_SRGGB14_1X14:
+ cr3 |= BIT_TWO_8BIT_SENSOR;
+ cr18 |= BIT_MIPI_DATA_FORMAT_RAW14;
+ break;
+
+ /*
+ * The CSI bridge has a 16-bit input bus. Depending on the
+ * connected source, data may be transmitted with 8 or 10 bits
+ * per clock sample (in bits [9:2] or [9:0] respectively) or
+ * with 16 bits per clock sample (in bits [15:0]). The data is
+ * then packed into a 32-bit FIFO (as shown in figure 13-11 of
+ * the i.MX8MM reference manual rev. 3).
+ *
+ * The data packing in a 32-bit FIFO input word is controlled by
+ * the CR3 TWO_8BIT_SENSOR field (also known as SENSOR_16BITS in
+ * the i.MX8MM reference manual). When set to 0, data packing
+ * groups four 8-bit input samples (bits [9:2]). When set to 1,
+ * data packing groups two 16-bit input samples (bits [15:0]).
+ *
+ * The register field CR18 MIPI_DOUBLE_CMPNT also needs to be
+ * configured according to the input format for YUV 4:2:2 data.
+ * The field controls the gasket between the CSI-2 receiver and
+ * the CSI bridge. On i.MX7 and i.MX8MM, the field must be set
+ * to 1 when the CSIS outputs 16-bit samples. On i.MX8MQ, the
+ * gasket ignores the MIPI_DOUBLE_CMPNT bit and YUV 4:2:2 always
+ * uses 16-bit samples. Setting MIPI_DOUBLE_CMPNT in that case
+ * has no effect, but doesn't cause any issue.
+ */
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ cr18 |= BIT_MIPI_DATA_FORMAT_YUV422_8B;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ cr3 |= BIT_TWO_8BIT_SENSOR;
+ cr18 |= BIT_MIPI_DATA_FORMAT_YUV422_8B |
+ BIT_MIPI_DOUBLE_CMPNT;
+ break;
+ }
+ }
+
+ imx7_csi_reg_write(csi, cr1, CSI_CSICR1);
+ imx7_csi_reg_write(csi, BIT_DMA_BURST_TYPE_RFF_INCR16, CSI_CSICR2);
+ imx7_csi_reg_write(csi, cr3, CSI_CSICR3);
+ imx7_csi_reg_write(csi, cr18, CSI_CSICR18);
+
+ imx7_csi_reg_write(csi, (width * out_pix->height) >> 2, CSI_CSIRXCNT);
+ imx7_csi_reg_write(csi, BIT_IMAGE_WIDTH(width) |
+ BIT_IMAGE_HEIGHT(out_pix->height),
+ CSI_CSIIMAG_PARA);
+ imx7_csi_reg_write(csi, stride, CSI_CSIFBUF_PARA);
+}
+
+static int imx7_csi_init(struct imx7_csi *csi)
+{
+ int ret;
+
+ ret = clk_prepare_enable(csi->mclk);
+ if (ret < 0)
+ return ret;
+
+ imx7_csi_configure(csi);
+
+ ret = imx7_csi_dma_setup(csi);
+ if (ret < 0) {
+ clk_disable_unprepare(csi->mclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void imx7_csi_deinit(struct imx7_csi *csi,
+ enum vb2_buffer_state return_status)
+{
+ imx7_csi_dma_cleanup(csi, return_status);
+ imx7_csi_init_default(csi);
+ imx7_csi_dmareq_rff_disable(csi);
+ clk_disable_unprepare(csi->mclk);
+}
+
+static void imx7_csi_baseaddr_switch_on_second_frame(struct imx7_csi *csi)
+{
+ u32 cr18 = imx7_csi_reg_read(csi, CSI_CSICR18);
+
+ cr18 |= BIT_BASEADDR_SWITCH_EN | BIT_BASEADDR_SWITCH_SEL |
+ BIT_BASEADDR_CHG_ERR_EN;
+ cr18 |= BIT_MASK_OPTION_SECOND_FRAME;
+ imx7_csi_reg_write(csi, cr18, CSI_CSICR18);
+}
+
+static void imx7_csi_enable(struct imx7_csi *csi)
+{
+ /* Clear the Rx FIFO and reflash the DMA controller. */
+ imx7_csi_rx_fifo_clear(csi);
+ imx7_csi_dma_reflash(csi);
+
+ usleep_range(2000, 3000);
+
+ /* Clear and enable the interrupts. */
+ imx7_csi_irq_clear(csi);
+ imx7_csi_hw_enable_irq(csi);
+
+ /* Enable the RxFIFO DMA and the CSI. */
+ imx7_csi_dmareq_rff_enable(csi);
+ imx7_csi_hw_enable(csi);
+
+ if (csi->model == IMX7_CSI_IMX8MQ)
+ imx7_csi_baseaddr_switch_on_second_frame(csi);
+}
+
+static void imx7_csi_disable(struct imx7_csi *csi)
+{
+ imx7_csi_dma_stop(csi);
+
+ imx7_csi_dmareq_rff_disable(csi);
+
+ imx7_csi_hw_disable_irq(csi);
+
+ imx7_csi_hw_disable(csi);
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
+
+static void imx7_csi_error_recovery(struct imx7_csi *csi)
+{
+ imx7_csi_hw_disable(csi);
+
+ imx7_csi_rx_fifo_clear(csi);
+
+ imx7_csi_dma_reflash(csi);
+
+ imx7_csi_hw_enable(csi);
+}
+
+static void imx7_csi_vb2_buf_done(struct imx7_csi *csi)
+{
+ struct imx7_csi_vb2_buffer *done, *next;
+ struct vb2_buffer *vb;
+ dma_addr_t phys;
+
+ done = csi->active_vb2_buf[csi->buf_num];
+ if (done) {
+ done->vbuf.field = csi->vdev_fmt.field;
+ done->vbuf.sequence = csi->frame_sequence;
+ vb = &done->vbuf.vb2_buf;
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+ csi->frame_sequence++;
+
+ /* get next queued buffer */
+ next = imx7_csi_video_next_buf(csi);
+ if (next) {
+ phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
+ csi->active_vb2_buf[csi->buf_num] = next;
+ } else {
+ phys = csi->underrun_buf.phys;
+ csi->active_vb2_buf[csi->buf_num] = NULL;
+ }
+
+ imx7_csi_update_buf(csi, phys, csi->buf_num);
+}
+
+static irqreturn_t imx7_csi_irq_handler(int irq, void *data)
+{
+ struct imx7_csi *csi = data;
+ u32 status;
+
+ spin_lock(&csi->irqlock);
+
+ status = imx7_csi_irq_clear(csi);
+
+ if (status & BIT_RFF_OR_INT) {
+ dev_warn(csi->dev, "Rx fifo overflow\n");
+ imx7_csi_error_recovery(csi);
+ }
+
+ if (status & BIT_HRESP_ERR_INT) {
+ dev_warn(csi->dev, "Hresponse error detected\n");
+ imx7_csi_error_recovery(csi);
+ }
+
+ if (status & BIT_ADDR_CH_ERR_INT) {
+ imx7_csi_hw_disable(csi);
+
+ imx7_csi_dma_reflash(csi);
+
+ imx7_csi_hw_enable(csi);
+ }
+
+ if ((status & BIT_DMA_TSF_DONE_FB1) &&
+ (status & BIT_DMA_TSF_DONE_FB2)) {
+ /*
+ * For both FB1 and FB2 interrupter bits set case,
+ * CSI DMA is work in one of FB1 and FB2 buffer,
+ * but software can not know the state.
+ * Skip it to avoid base address updated
+ * when csi work in field0 and field1 will write to
+ * new base address.
+ */
+ } else if (status & BIT_DMA_TSF_DONE_FB1) {
+ csi->buf_num = 0;
+ } else if (status & BIT_DMA_TSF_DONE_FB2) {
+ csi->buf_num = 1;
+ }
+
+ if ((status & BIT_DMA_TSF_DONE_FB1) ||
+ (status & BIT_DMA_TSF_DONE_FB2)) {
+ imx7_csi_vb2_buf_done(csi);
+
+ if (csi->last_eof) {
+ complete(&csi->last_eof_completion);
+ csi->last_eof = false;
+ }
+ }
+
+ spin_unlock(&csi->irqlock);
+
+ return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * Format Helpers
+ */
+
+#define IMX_BUS_FMTS(fmt...) (const u32[]) {fmt, 0}
+
+/*
+ * List of supported pixel formats for the subdevs. Keep V4L2_PIX_FMT_UYVY and
+ * MEDIA_BUS_FMT_UYVY8_2X8 first to match IMX7_CSI_DEF_PIX_FORMAT and
+ * IMX7_CSI_DEF_MBUS_CODE.
+ */
+static const struct imx7_csi_pixfmt pixel_formats[] = {
+ /*** YUV formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16
+ ),
+ .yuv = true,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codes = IMX_BUS_FMTS(
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16
+ ),
+ .yuv = true,
+ .bpp = 16,
+ },
+ /*** raw bayer and grayscale formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR14_1X14),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG14_1X14),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG14_1X14),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB14_1X14),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y8_1X8),
+ .bpp = 8,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y10_1X10),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y12,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y12_1X12),
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_Y14,
+ .codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y14_1X14),
+ .bpp = 16,
+ },
+};
+
+/*
+ * Search in the pixel_formats[] array for an entry with the given fourcc
+ * return it.
+ */
+static const struct imx7_csi_pixfmt *imx7_csi_find_pixel_format(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx7_csi_pixfmt *fmt = &pixel_formats[i];
+
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+/*
+ * Search in the pixel_formats[] array for an entry with the given media
+ * bus code and return it.
+ */
+static const struct imx7_csi_pixfmt *imx7_csi_find_mbus_format(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx7_csi_pixfmt *fmt = &pixel_formats[i];
+ unsigned int j;
+
+ if (!fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (code == fmt->codes[j])
+ return fmt;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Enumerate entries in the pixel_formats[] array that match the
+ * requested search criteria. Return the media-bus code that matches
+ * the search criteria at the requested match index.
+ *
+ * @code: The returned media-bus code that matches the search criteria at
+ * the requested match index.
+ * @index: The requested match index.
+ */
+static int imx7_csi_enum_mbus_formats(u32 *code, u32 index)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx7_csi_pixfmt *fmt = &pixel_formats[i];
+ unsigned int j;
+
+ if (!fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (index == 0) {
+ *code = fmt->codes[j];
+ return 0;
+ }
+
+ index--;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int imx7_csi_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
+ const struct v4l2_mbus_framefmt *mbus,
+ const struct imx7_csi_pixfmt *cc)
+{
+ u32 width;
+ u32 stride;
+
+ if (!cc) {
+ cc = imx7_csi_find_mbus_format(mbus->code);
+ if (!cc)
+ return -EINVAL;
+ }
+
+ /* Round up width for minimum burst size */
+ width = round_up(mbus->width, 8);
+
+ /* Round up stride for IDMAC line start address alignment */
+ stride = round_up((width * cc->bpp) >> 3, 8);
+
+ pix->width = width;
+ pix->height = mbus->height;
+ pix->pixelformat = cc->fourcc;
+ pix->colorspace = mbus->colorspace;
+ pix->xfer_func = mbus->xfer_func;
+ pix->ycbcr_enc = mbus->ycbcr_enc;
+ pix->quantization = mbus->quantization;
+ pix->field = mbus->field;
+ pix->bytesperline = stride;
+ pix->sizeimage = stride * pix->height;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Video Capture Device - IOCTLs
+ */
+
+static int imx7_csi_video_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+
+ strscpy(cap->driver, IMX7_CSI_VIDEO_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX7_CSI_VIDEO_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", dev_name(csi->dev));
+
+ return 0;
+}
+
+static int imx7_csi_video_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ unsigned int index = f->index;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ const struct imx7_csi_pixfmt *fmt = &pixel_formats[i];
+
+ /*
+ * If a media bus code is specified, only consider formats that
+ * match it.
+ */
+ if (f->mbus_code) {
+ unsigned int j;
+
+ if (!fmt->codes)
+ continue;
+
+ for (j = 0; fmt->codes[j]; j++) {
+ if (f->mbus_code == fmt->codes[j])
+ break;
+ }
+
+ if (!fmt->codes[j])
+ continue;
+ }
+
+ if (index == 0) {
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+
+ index--;
+ }
+
+ return -EINVAL;
+}
+
+static int imx7_csi_video_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct imx7_csi_pixfmt *cc;
+
+ if (fsize->index > 0)
+ return -EINVAL;
+
+ cc = imx7_csi_find_pixel_format(fsize->pixel_format);
+ if (!cc)
+ return -EINVAL;
+
+ /*
+ * TODO: The constraints are hardware-specific and may depend on the
+ * pixel format. This should come from the driver using
+ * imx_media_capture.
+ */
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = 1;
+ fsize->stepwise.max_width = 65535;
+ fsize->stepwise.min_height = 1;
+ fsize->stepwise.max_height = 65535;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int imx7_csi_video_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+
+ f->fmt.pix = csi->vdev_fmt;
+
+ return 0;
+}
+
+static const struct imx7_csi_pixfmt *
+__imx7_csi_video_try_fmt(struct v4l2_pix_format *pixfmt,
+ struct v4l2_rect *compose)
+{
+ struct v4l2_mbus_framefmt fmt_src;
+ const struct imx7_csi_pixfmt *cc;
+
+ /*
+ * Find the pixel format, default to the first supported format if not
+ * found.
+ */
+ cc = imx7_csi_find_pixel_format(pixfmt->pixelformat);
+ if (!cc) {
+ pixfmt->pixelformat = IMX7_CSI_DEF_PIX_FORMAT;
+ cc = imx7_csi_find_pixel_format(pixfmt->pixelformat);
+ }
+
+ /* Allow IDMAC interweave but enforce field order from source. */
+ if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) {
+ switch (pixfmt->field) {
+ case V4L2_FIELD_SEQ_TB:
+ pixfmt->field = V4L2_FIELD_INTERLACED_TB;
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ pixfmt->field = V4L2_FIELD_INTERLACED_BT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ v4l2_fill_mbus_format(&fmt_src, pixfmt, 0);
+ imx7_csi_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src, cc);
+
+ if (compose) {
+ compose->width = fmt_src.width;
+ compose->height = fmt_src.height;
+ }
+
+ return cc;
+}
+
+static int imx7_csi_video_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ __imx7_csi_video_try_fmt(&f->fmt.pix, NULL);
+ return 0;
+}
+
+static int imx7_csi_video_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+ const struct imx7_csi_pixfmt *cc;
+
+ if (vb2_is_busy(&csi->q)) {
+ dev_err(csi->dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ cc = __imx7_csi_video_try_fmt(&f->fmt.pix, &csi->vdev_compose);
+
+ csi->vdev_cc = cc;
+ csi->vdev_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int imx7_csi_video_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* The compose rectangle is fixed to the source format. */
+ s->r = csi->vdev_compose;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ /*
+ * The hardware writes with a configurable but fixed DMA burst
+ * size. If the source format width is not burst size aligned,
+ * the written frame contains padding to the right.
+ */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = csi->vdev_fmt.width;
+ s->r.height = csi->vdev_fmt.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops imx7_csi_video_ioctl_ops = {
+ .vidioc_querycap = imx7_csi_video_querycap,
+
+ .vidioc_enum_fmt_vid_cap = imx7_csi_video_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = imx7_csi_video_enum_framesizes,
+
+ .vidioc_g_fmt_vid_cap = imx7_csi_video_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = imx7_csi_video_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = imx7_csi_video_s_fmt_vid_cap,
+
+ .vidioc_g_selection = imx7_csi_video_g_selection,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * Video Capture Device - Queue Operations
+ */
+
+static int imx7_csi_video_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &csi->vdev_fmt;
+ unsigned int count = *nbuffers;
+
+ if (vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (*nplanes != 1 || sizes[0] < pix->sizeimage)
+ return -EINVAL;
+ count += vq->num_buffers;
+ }
+
+ count = min_t(__u32, IMX7_CSI_VIDEO_MEM_LIMIT / pix->sizeimage, count);
+
+ if (*nplanes)
+ *nbuffers = (count < vq->num_buffers) ? 0 :
+ count - vq->num_buffers;
+ else
+ *nbuffers = count;
+
+ *nplanes = 1;
+ sizes[0] = pix->sizeimage;
+
+ return 0;
+}
+
+static int imx7_csi_video_buf_init(struct vb2_buffer *vb)
+{
+ struct imx7_csi_vb2_buffer *buf = to_imx7_csi_vb2_buffer(vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int imx7_csi_video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_pix_format *pix = &csi->vdev_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix->sizeimage) {
+ dev_err(csi->dev,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), (long)pix->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, pix->sizeimage);
+
+ return 0;
+}
+
+static void imx7_csi_video_buf_queue(struct vb2_buffer *vb)
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
+ struct imx7_csi_vb2_buffer *buf = to_imx7_csi_vb2_buffer(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&csi->q_lock, flags);
+
+ list_add_tail(&buf->list, &csi->ready_q);
+
+ spin_unlock_irqrestore(&csi->q_lock, flags);
+}
+
+static int imx7_csi_video_validate_fmt(struct imx7_csi *csi)
+{
+ struct v4l2_subdev_format fmt_src;
+ const struct imx7_csi_pixfmt *cc;
+ int ret;
+
+ /* Retrieve the media bus format on the source subdev. */
+ fmt_src.pad = IMX7_CSI_PAD_SRC;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(&csi->sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ /*
+ * Verify that the media bus size matches the size set on the video
+ * node. It is sufficient to check the compose rectangle size without
+ * checking the rounded size from pix_fmt, as the rounded size is
+ * derived directly from the compose rectangle size, and will thus
+ * always match if the compose rectangle matches.
+ */
+ if (csi->vdev_compose.width != fmt_src.format.width ||
+ csi->vdev_compose.height != fmt_src.format.height)
+ return -EPIPE;
+
+ /*
+ * Verify that the media bus code is compatible with the pixel format
+ * set on the video node.
+ */
+ cc = imx7_csi_find_mbus_format(fmt_src.format.code);
+ if (!cc || csi->vdev_cc->yuv != cc->yuv)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int imx7_csi_video_start_streaming(struct vb2_queue *vq,
+ unsigned int count)
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vq);
+ struct imx7_csi_vb2_buffer *buf, *tmp;
+ unsigned long flags;
+ int ret;
+
+ ret = imx7_csi_video_validate_fmt(csi);
+ if (ret) {
+ dev_err(csi->dev, "capture format not valid\n");
+ goto err_buffers;
+ }
+
+ mutex_lock(&csi->mdev.graph_mutex);
+
+ ret = __video_device_pipeline_start(csi->vdev, &csi->pipe);
+ if (ret)
+ goto err_unlock;
+
+ ret = v4l2_subdev_call(&csi->sd, video, s_stream, 1);
+ if (ret)
+ goto err_stop;
+
+ mutex_unlock(&csi->mdev.graph_mutex);
+
+ return 0;
+
+err_stop:
+ __video_device_pipeline_stop(csi->vdev);
+err_unlock:
+ mutex_unlock(&csi->mdev.graph_mutex);
+ dev_err(csi->dev, "pipeline start failed with %d\n", ret);
+err_buffers:
+ spin_lock_irqsave(&csi->q_lock, flags);
+ list_for_each_entry_safe(buf, tmp, &csi->ready_q, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&csi->q_lock, flags);
+ return ret;
+}
+
+static void imx7_csi_video_stop_streaming(struct vb2_queue *vq)
+{
+ struct imx7_csi *csi = vb2_get_drv_priv(vq);
+ struct imx7_csi_vb2_buffer *frame;
+ struct imx7_csi_vb2_buffer *tmp;
+ unsigned long flags;
+
+ mutex_lock(&csi->mdev.graph_mutex);
+ v4l2_subdev_call(&csi->sd, video, s_stream, 0);
+ __video_device_pipeline_stop(csi->vdev);
+ mutex_unlock(&csi->mdev.graph_mutex);
+
+ /* release all active buffers */
+ spin_lock_irqsave(&csi->q_lock, flags);
+ list_for_each_entry_safe(frame, tmp, &csi->ready_q, list) {
+ list_del(&frame->list);
+ vb2_buffer_done(&frame->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&csi->q_lock, flags);
+}
+
+static const struct vb2_ops imx7_csi_video_qops = {
+ .queue_setup = imx7_csi_video_queue_setup,
+ .buf_init = imx7_csi_video_buf_init,
+ .buf_prepare = imx7_csi_video_buf_prepare,
+ .buf_queue = imx7_csi_video_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = imx7_csi_video_start_streaming,
+ .stop_streaming = imx7_csi_video_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * Video Capture Device - File Operations
+ */
+
+static int imx7_csi_video_open(struct file *file)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&csi->vdev_mutex))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ dev_err(csi->dev, "v4l2_fh_open failed\n");
+ goto out;
+ }
+
+ ret = v4l2_pipeline_pm_get(&csi->vdev->entity);
+ if (ret)
+ v4l2_fh_release(file);
+
+out:
+ mutex_unlock(&csi->vdev_mutex);
+ return ret;
+}
+
+static int imx7_csi_video_release(struct file *file)
+{
+ struct imx7_csi *csi = video_drvdata(file);
+ struct vb2_queue *vq = &csi->q;
+
+ mutex_lock(&csi->vdev_mutex);
+
+ if (file->private_data == vq->owner) {
+ vb2_queue_release(vq);
+ vq->owner = NULL;
+ }
+
+ v4l2_pipeline_pm_put(&csi->vdev->entity);
+
+ v4l2_fh_release(file);
+ mutex_unlock(&csi->vdev_mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations imx7_csi_video_fops = {
+ .owner = THIS_MODULE,
+ .open = imx7_csi_video_open,
+ .release = imx7_csi_video_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * Video Capture Device - Init & Cleanup
+ */
+
+static struct imx7_csi_vb2_buffer *imx7_csi_video_next_buf(struct imx7_csi *csi)
+{
+ struct imx7_csi_vb2_buffer *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&csi->q_lock, flags);
+
+ /* get next queued buffer */
+ if (!list_empty(&csi->ready_q)) {
+ buf = list_entry(csi->ready_q.next, struct imx7_csi_vb2_buffer,
+ list);
+ list_del(&buf->list);
+ }
+
+ spin_unlock_irqrestore(&csi->q_lock, flags);
+
+ return buf;
+}
+
+static int imx7_csi_video_init_format(struct imx7_csi *csi)
+{
+ struct v4l2_subdev_format fmt_src = {
+ .pad = IMX7_CSI_PAD_SRC,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ fmt_src.format.code = IMX7_CSI_DEF_MBUS_CODE;
+ fmt_src.format.width = IMX7_CSI_DEF_PIX_WIDTH;
+ fmt_src.format.height = IMX7_CSI_DEF_PIX_HEIGHT;
+
+ imx7_csi_mbus_fmt_to_pix_fmt(&csi->vdev_fmt, &fmt_src.format, NULL);
+ csi->vdev_compose.width = fmt_src.format.width;
+ csi->vdev_compose.height = fmt_src.format.height;
+
+ csi->vdev_cc = imx7_csi_find_pixel_format(csi->vdev_fmt.pixelformat);
+
+ return 0;
+}
+
+static int imx7_csi_video_register(struct imx7_csi *csi)
+{
+ struct v4l2_subdev *sd = &csi->sd;
+ struct v4l2_device *v4l2_dev = sd->v4l2_dev;
+ struct video_device *vdev = csi->vdev;
+ int ret;
+
+ vdev->v4l2_dev = v4l2_dev;
+
+ /* Initialize the default format and compose rectangle. */
+ ret = imx7_csi_video_init_format(csi);
+ if (ret < 0)
+ return ret;
+
+ /* Register the video device. */
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(csi->dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ dev_info(csi->dev, "Registered %s as /dev/%s\n", vdev->name,
+ video_device_node_name(vdev));
+
+ /* Create the link from the CSI subdev to the video device. */
+ ret = media_create_pad_link(&sd->entity, IMX7_CSI_PAD_SRC,
+ &vdev->entity, 0, MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(csi->dev, "failed to create link to device node\n");
+ video_unregister_device(vdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void imx7_csi_video_unregister(struct imx7_csi *csi)
+{
+ media_entity_cleanup(&csi->vdev->entity);
+ video_unregister_device(csi->vdev);
+}
+
+static int imx7_csi_video_init(struct imx7_csi *csi)
+{
+ struct video_device *vdev;
+ struct vb2_queue *vq;
+ int ret;
+
+ mutex_init(&csi->vdev_mutex);
+ INIT_LIST_HEAD(&csi->ready_q);
+ spin_lock_init(&csi->q_lock);
+
+ /* Allocate and initialize the video device. */
+ vdev = video_device_alloc();
+ if (!vdev)
+ return -ENOMEM;
+
+ vdev->fops = &imx7_csi_video_fops;
+ vdev->ioctl_ops = &imx7_csi_video_ioctl_ops;
+ vdev->minor = -1;
+ vdev->release = video_device_release;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
+ | V4L2_CAP_IO_MC;
+ vdev->lock = &csi->vdev_mutex;
+ vdev->queue = &csi->q;
+
+ snprintf(vdev->name, sizeof(vdev->name), "%s capture", csi->sd.name);
+
+ video_set_drvdata(vdev, csi);
+ csi->vdev = vdev;
+
+ /* Initialize the video device pad. */
+ csi->vdev_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vdev->entity, 1, &csi->vdev_pad);
+ if (ret) {
+ video_device_release(vdev);
+ return ret;
+ }
+
+ /* Initialize the vb2 queue. */
+ vq = &csi->q;
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vq->drv_priv = csi;
+ vq->buf_struct_size = sizeof(struct imx7_csi_vb2_buffer);
+ vq->ops = &imx7_csi_video_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->lock = &csi->vdev_mutex;
+ vq->min_buffers_needed = 2;
+ vq->dev = csi->dev;
+
+ ret = vb2_queue_init(vq);
+ if (ret) {
+ dev_err(csi->dev, "vb2_queue_init failed\n");
+ video_device_release(vdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdev Operations
+ */
+
+static int imx7_csi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&csi->lock);
+
+ if (!csi->src_sd) {
+ ret = -EPIPE;
+ goto out_unlock;
+ }
+
+ if (csi->is_streaming == !!enable)
+ goto out_unlock;
+
+ if (enable) {
+ ret = imx7_csi_init(csi);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = v4l2_subdev_call(csi->src_sd, video, s_stream, 1);
+ if (ret < 0) {
+ imx7_csi_deinit(csi, VB2_BUF_STATE_QUEUED);
+ goto out_unlock;
+ }
+
+ imx7_csi_enable(csi);
+ } else {
+ imx7_csi_disable(csi);
+
+ v4l2_subdev_call(csi->src_sd, video, s_stream, 0);
+
+ imx7_csi_deinit(csi, VB2_BUF_STATE_ERROR);
+ }
+
+ csi->is_streaming = !!enable;
+
+out_unlock:
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+imx7_csi_get_format(struct imx7_csi *csi,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
+
+ return &csi->format_mbus[pad];
+}
+
+static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ const enum v4l2_subdev_format_whence which =
+ sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ const struct imx7_csi_pixfmt *cc;
+ int i;
+
+ cc = imx7_csi_find_mbus_format(IMX7_CSI_DEF_MBUS_CODE);
+
+ for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
+ struct v4l2_mbus_framefmt *mf =
+ imx7_csi_get_format(csi, sd_state, i, which);
+
+ mf->code = IMX7_CSI_DEF_MBUS_CODE;
+ mf->width = IMX7_CSI_DEF_PIX_WIDTH;
+ mf->height = IMX7_CSI_DEF_PIX_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
+
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(mf->colorspace);
+ mf->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(mf->colorspace);
+ mf->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(!cc->yuv,
+ mf->colorspace, mf->ycbcr_enc);
+
+ csi->cc[i] = cc;
+ }
+
+ return 0;
+}
+
+static int imx7_csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *in_fmt;
+ int ret = 0;
+
+ mutex_lock(&csi->lock);
+
+ in_fmt = imx7_csi_get_format(csi, sd_state, IMX7_CSI_PAD_SINK,
+ code->which);
+
+ switch (code->pad) {
+ case IMX7_CSI_PAD_SINK:
+ ret = imx7_csi_enum_mbus_formats(&code->code, code->index);
+ break;
+ case IMX7_CSI_PAD_SRC:
+ if (code->index != 0) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ code->code = in_fmt->code;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out_unlock:
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+static int imx7_csi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ mutex_lock(&csi->lock);
+
+ fmt = imx7_csi_get_format(csi, sd_state, sdformat->pad,
+ sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ sdformat->format = *fmt;
+
+out_unlock:
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+/*
+ * Default the colorspace in tryfmt to SRGB if set to an unsupported
+ * colorspace or not initialized. Then set the remaining colorimetry
+ * parameters based on the colorspace if they are uninitialized.
+ *
+ * tryfmt->code must be set on entry.
+ */
+static void imx7_csi_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt)
+{
+ const struct imx7_csi_pixfmt *cc;
+ bool is_rgb = false;
+
+ cc = imx7_csi_find_mbus_format(tryfmt->code);
+ if (cc && !cc->yuv)
+ is_rgb = true;
+
+ switch (tryfmt->colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_BT2020:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_DCI_P3:
+ case V4L2_COLORSPACE_RAW:
+ break;
+ default:
+ tryfmt->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ }
+
+ if (tryfmt->xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ tryfmt->xfer_func =
+ V4L2_MAP_XFER_FUNC_DEFAULT(tryfmt->colorspace);
+
+ if (tryfmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ tryfmt->ycbcr_enc =
+ V4L2_MAP_YCBCR_ENC_DEFAULT(tryfmt->colorspace);
+
+ if (tryfmt->quantization == V4L2_QUANTIZATION_DEFAULT)
+ tryfmt->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb,
+ tryfmt->colorspace,
+ tryfmt->ycbcr_enc);
+}
+
+static int imx7_csi_try_fmt(struct imx7_csi *csi,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat,
+ const struct imx7_csi_pixfmt **cc)
+{
+ const struct imx7_csi_pixfmt *in_cc;
+ struct v4l2_mbus_framefmt *in_fmt;
+ u32 code;
+
+ in_fmt = imx7_csi_get_format(csi, sd_state, IMX7_CSI_PAD_SINK,
+ sdformat->which);
+ if (!in_fmt)
+ return -EINVAL;
+
+ switch (sdformat->pad) {
+ case IMX7_CSI_PAD_SRC:
+ in_cc = imx7_csi_find_mbus_format(in_fmt->code);
+
+ sdformat->format.width = in_fmt->width;
+ sdformat->format.height = in_fmt->height;
+ sdformat->format.code = in_fmt->code;
+ sdformat->format.field = in_fmt->field;
+ *cc = in_cc;
+
+ sdformat->format.colorspace = in_fmt->colorspace;
+ sdformat->format.xfer_func = in_fmt->xfer_func;
+ sdformat->format.quantization = in_fmt->quantization;
+ sdformat->format.ycbcr_enc = in_fmt->ycbcr_enc;
+ break;
+ case IMX7_CSI_PAD_SINK:
+ *cc = imx7_csi_find_mbus_format(sdformat->format.code);
+ if (!*cc) {
+ code = IMX7_CSI_DEF_MBUS_CODE;
+ *cc = imx7_csi_find_mbus_format(code);
+ sdformat->format.code = code;
+ }
+
+ if (sdformat->format.field != V4L2_FIELD_INTERLACED)
+ sdformat->format.field = V4L2_FIELD_NONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ imx7_csi_try_colorimetry(&sdformat->format);
+
+ return 0;
+}
+
+static int imx7_csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ const struct imx7_csi_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ const struct imx7_csi_pixfmt *cc;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_format format;
+ int ret = 0;
+
+ if (sdformat->pad >= IMX7_CSI_PADS_NUM)
+ return -EINVAL;
+
+ mutex_lock(&csi->lock);
+
+ if (csi->is_streaming) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = imx7_csi_try_fmt(csi, sd_state, sdformat, &cc);
+ if (ret < 0)
+ goto out_unlock;
+
+ fmt = imx7_csi_get_format(csi, sd_state, sdformat->pad,
+ sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ *fmt = sdformat->format;
+
+ if (sdformat->pad == IMX7_CSI_PAD_SINK) {
+ /* propagate format to source pads */
+ format.pad = IMX7_CSI_PAD_SRC;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ if (imx7_csi_try_fmt(csi, sd_state, &format, &outcc)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ outfmt = imx7_csi_get_format(csi, sd_state, IMX7_CSI_PAD_SRC,
+ sdformat->which);
+ *outfmt = format.format;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ csi->cc[IMX7_CSI_PAD_SRC] = outcc;
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ csi->cc[sdformat->pad] = cc;
+
+out_unlock:
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+static int imx7_csi_pad_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ struct media_pad *pad = NULL;
+ unsigned int i;
+ int ret;
+
+ if (!csi->src_sd)
+ return -EPIPE;
+
+ /*
+ * Validate the source link, and record whether the source uses the
+ * parallel input or the CSI-2 receiver.
+ */
+ ret = v4l2_subdev_link_validate_default(sd, link, source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ switch (csi->src_sd->entity.function) {
+ case MEDIA_ENT_F_VID_IF_BRIDGE:
+ /* The input is the CSI-2 receiver. */
+ csi->is_csi2 = true;
+ break;
+
+ case MEDIA_ENT_F_VID_MUX:
+ /* The input is the mux, check its input. */
+ for (i = 0; i < csi->src_sd->entity.num_pads; i++) {
+ struct media_pad *spad = &csi->src_sd->entity.pads[i];
+
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ pad = media_pad_remote_pad_first(spad);
+ if (pad)
+ break;
+ }
+
+ if (!pad)
+ return -ENODEV;
+
+ csi->is_csi2 = pad->entity->function == MEDIA_ENT_F_VID_IF_BRIDGE;
+ break;
+
+ default:
+ /*
+ * The input is an external entity, it must use the parallel
+ * bus.
+ */
+ csi->is_csi2 = false;
+ break;
+ }
+
+ return 0;
+}
+
+static int imx7_csi_registered(struct v4l2_subdev *sd)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = imx7_csi_video_init(csi);
+ if (ret)
+ return ret;
+
+ ret = imx7_csi_video_register(csi);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
+ if (ret)
+ goto err_unreg;
+
+ ret = media_device_register(&csi->mdev);
+ if (ret)
+ goto err_unreg;
+
+ return 0;
+
+err_unreg:
+ imx7_csi_video_unregister(csi);
+ return ret;
+}
+
+static void imx7_csi_unregistered(struct v4l2_subdev *sd)
+{
+ struct imx7_csi *csi = v4l2_get_subdevdata(sd);
+
+ imx7_csi_video_unregister(csi);
+}
+
+static const struct v4l2_subdev_video_ops imx7_csi_video_ops = {
+ .s_stream = imx7_csi_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx7_csi_pad_ops = {
+ .init_cfg = imx7_csi_init_cfg,
+ .enum_mbus_code = imx7_csi_enum_mbus_code,
+ .get_fmt = imx7_csi_get_fmt,
+ .set_fmt = imx7_csi_set_fmt,
+ .link_validate = imx7_csi_pad_link_validate,
+};
+
+static const struct v4l2_subdev_ops imx7_csi_subdev_ops = {
+ .video = &imx7_csi_video_ops,
+ .pad = &imx7_csi_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops imx7_csi_internal_ops = {
+ .registered = imx7_csi_registered,
+ .unregistered = imx7_csi_unregistered,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Entity Operations
+ */
+
+static const struct media_entity_operations imx7_csi_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove
+ */
+
+static int imx7_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier);
+ struct media_pad *sink = &csi->sd.entity.pads[IMX7_CSI_PAD_SINK];
+
+ csi->src_sd = sd;
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static int imx7_csi_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier);
+
+ return v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations imx7_csi_notify_ops = {
+ .bound = imx7_csi_notify_bound,
+ .complete = imx7_csi_notify_complete,
+};
+
+static int imx7_csi_async_register(struct imx7_csi *csi)
+{
+ struct v4l2_async_subdev *asd;
+ struct fwnode_handle *ep;
+ int ret;
+
+ v4l2_async_nf_init(&csi->notifier);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (ep) {
+ asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
+ struct v4l2_async_subdev);
+
+ fwnode_handle_put(ep);
+
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ /* OK if asd already exists */
+ if (ret != -EEXIST)
+ return ret;
+ }
+ }
+
+ csi->notifier.ops = &imx7_csi_notify_ops;
+
+ ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void imx7_csi_media_cleanup(struct imx7_csi *csi)
+{
+ v4l2_device_unregister(&csi->v4l2_dev);
+ media_device_unregister(&csi->mdev);
+ media_device_cleanup(&csi->mdev);
+}
+
+static const struct media_device_ops imx7_csi_media_ops = {
+ .link_notify = v4l2_pipeline_link_notify,
+};
+
+static int imx7_csi_media_dev_init(struct imx7_csi *csi)
+{
+ int ret;
+
+ strscpy(csi->mdev.model, "imx-media", sizeof(csi->mdev.model));
+ csi->mdev.ops = &imx7_csi_media_ops;
+ csi->mdev.dev = csi->dev;
+
+ csi->v4l2_dev.mdev = &csi->mdev;
+ strscpy(csi->v4l2_dev.name, "imx-media",
+ sizeof(csi->v4l2_dev.name));
+ snprintf(csi->mdev.bus_info, sizeof(csi->mdev.bus_info),
+ "platform:%s", dev_name(csi->mdev.dev));
+
+ media_device_init(&csi->mdev);
+
+ ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(&csi->v4l2_dev,
+ "Failed to register v4l2_device: %d\n", ret);
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ media_device_cleanup(&csi->mdev);
+
+ return ret;
+}
+
+static int imx7_csi_media_init(struct imx7_csi *csi)
+{
+ unsigned int i;
+ int ret;
+
+ /* add media device */
+ ret = imx7_csi_media_dev_init(csi);
+ if (ret)
+ return ret;
+
+ v4l2_subdev_init(&csi->sd, &imx7_csi_subdev_ops);
+ v4l2_set_subdevdata(&csi->sd, csi);
+ csi->sd.internal_ops = &imx7_csi_internal_ops;
+ csi->sd.entity.ops = &imx7_csi_entity_ops;
+ csi->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi->sd.dev = csi->dev;
+ csi->sd.owner = THIS_MODULE;
+ csi->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
+
+ for (i = 0; i < IMX7_CSI_PADS_NUM; i++)
+ csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csi->sd.entity, IMX7_CSI_PADS_NUM,
+ csi->pad);
+ if (ret)
+ goto error;
+
+ ret = v4l2_device_register_subdev(&csi->v4l2_dev, &csi->sd);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ imx7_csi_media_cleanup(csi);
+ return ret;
+}
+
+static int imx7_csi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx7_csi *csi;
+ int ret;
+
+ csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->dev = dev;
+ platform_set_drvdata(pdev, csi);
+
+ spin_lock_init(&csi->irqlock);
+ mutex_init(&csi->lock);
+
+ /* Acquire resources and install interrupt handler. */
+ csi->mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(csi->mclk)) {
+ ret = PTR_ERR(csi->mclk);
+ dev_err(dev, "Failed to get mclk: %d", ret);
+ goto destroy_mutex;
+ }
+
+ csi->irq = platform_get_irq(pdev, 0);
+ if (csi->irq < 0) {
+ ret = csi->irq;
+ goto destroy_mutex;
+ }
+
+ csi->regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(csi->regbase)) {
+ ret = PTR_ERR(csi->regbase);
+ goto destroy_mutex;
+ }
+
+ csi->model = (enum imx_csi_model)(uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ ret = devm_request_irq(dev, csi->irq, imx7_csi_irq_handler, 0, "csi",
+ (void *)csi);
+ if (ret < 0) {
+ dev_err(dev, "Request CSI IRQ failed.\n");
+ goto destroy_mutex;
+ }
+
+ /* Initialize all the media device infrastructure. */
+ ret = imx7_csi_media_init(csi);
+ if (ret)
+ goto destroy_mutex;
+
+ /* Set the default mbus formats. */
+ ret = imx7_csi_init_cfg(&csi->sd, NULL);
+ if (ret)
+ goto media_cleanup;
+
+ ret = imx7_csi_async_register(csi);
+ if (ret)
+ goto subdev_notifier_cleanup;
+
+ return 0;
+
+subdev_notifier_cleanup:
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+media_cleanup:
+ imx7_csi_media_cleanup(csi);
+
+destroy_mutex:
+ mutex_destroy(&csi->lock);
+
+ return ret;
+}
+
+static int imx7_csi_remove(struct platform_device *pdev)
+{
+ struct imx7_csi *csi = platform_get_drvdata(pdev);
+
+ imx7_csi_media_cleanup(csi);
+
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+ v4l2_async_unregister_subdev(&csi->sd);
+
+ mutex_destroy(&csi->lock);
+
+ return 0;
+}
+
+static const struct of_device_id imx7_csi_of_match[] = {
+ { .compatible = "fsl,imx8mq-csi", .data = (void *)IMX7_CSI_IMX8MQ },
+ { .compatible = "fsl,imx7-csi", .data = (void *)IMX7_CSI_IMX7 },
+ { .compatible = "fsl,imx6ul-csi", .data = (void *)IMX7_CSI_IMX7 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx7_csi_of_match);
+
+static struct platform_driver imx7_csi_driver = {
+ .probe = imx7_csi_probe,
+ .remove = imx7_csi_remove,
+ .driver = {
+ .of_match_table = imx7_csi_of_match,
+ .name = "imx7-csi",
+ },
+};
+module_platform_driver(imx7_csi_driver);
+
+MODULE_DESCRIPTION("i.MX7 CSI subdev driver");
+MODULE_AUTHOR("Rui Miguel Silva <rui.silva@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx7-csi");
diff --git a/drivers/staging/media/imx/imx8mq-mipi-csi2.c b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
new file mode 100644
index 000000000..83194328d
--- /dev/null
+++ b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
@@ -0,0 +1,1004 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NXP i.MX8MQ SoC series MIPI-CSI2 receiver driver
+ *
+ * Copyright (C) 2021 Purism SPC
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+
+#define MIPI_CSI2_DRIVER_NAME "imx8mq-mipi-csi2"
+#define MIPI_CSI2_SUBDEV_NAME MIPI_CSI2_DRIVER_NAME
+
+#define MIPI_CSI2_PAD_SINK 0
+#define MIPI_CSI2_PAD_SOURCE 1
+#define MIPI_CSI2_PADS_NUM 2
+
+#define MIPI_CSI2_DEF_PIX_WIDTH 640
+#define MIPI_CSI2_DEF_PIX_HEIGHT 480
+
+/* Register map definition */
+
+/* i.MX8MQ CSI-2 controller CSR */
+#define CSI2RX_CFG_NUM_LANES 0x100
+#define CSI2RX_CFG_DISABLE_DATA_LANES 0x104
+#define CSI2RX_BIT_ERR 0x108
+#define CSI2RX_IRQ_STATUS 0x10c
+#define CSI2RX_IRQ_MASK 0x110
+#define CSI2RX_IRQ_MASK_ALL 0x1ff
+#define CSI2RX_IRQ_MASK_ULPS_STATUS_CHANGE 0x8
+#define CSI2RX_ULPS_STATUS 0x114
+#define CSI2RX_PPI_ERRSOT_HS 0x118
+#define CSI2RX_PPI_ERRSOTSYNC_HS 0x11c
+#define CSI2RX_PPI_ERRESC 0x120
+#define CSI2RX_PPI_ERRSYNCESC 0x124
+#define CSI2RX_PPI_ERRCONTROL 0x128
+#define CSI2RX_CFG_DISABLE_PAYLOAD_0 0x12c
+#define CSI2RX_CFG_VID_VC_IGNORE 0x180
+#define CSI2RX_CFG_VID_VC 0x184
+#define CSI2RX_CFG_VID_P_FIFO_SEND_LEVEL 0x188
+#define CSI2RX_CFG_DISABLE_PAYLOAD_1 0x130
+
+enum {
+ ST_POWERED = 1,
+ ST_STREAMING = 2,
+ ST_SUSPENDED = 4,
+};
+
+enum imx8mq_mipi_csi_clk {
+ CSI2_CLK_CORE,
+ CSI2_CLK_ESC,
+ CSI2_CLK_UI,
+ CSI2_NUM_CLKS,
+};
+
+static const char * const imx8mq_mipi_csi_clk_id[CSI2_NUM_CLKS] = {
+ [CSI2_CLK_CORE] = "core",
+ [CSI2_CLK_ESC] = "esc",
+ [CSI2_CLK_UI] = "ui",
+};
+
+#define CSI2_NUM_CLKS ARRAY_SIZE(imx8mq_mipi_csi_clk_id)
+
+#define GPR_CSI2_1_RX_ENABLE BIT(13)
+#define GPR_CSI2_1_VID_INTFC_ENB BIT(12)
+#define GPR_CSI2_1_HSEL BIT(10)
+#define GPR_CSI2_1_CONT_CLK_MODE BIT(8)
+#define GPR_CSI2_1_S_PRG_RXHS_SETTLE(x) (((x) & 0x3f) << 2)
+
+/*
+ * The send level configures the number of entries that must accumulate in
+ * the Pixel FIFO before the data will be transferred to the video output.
+ * The exact value needed for this configuration is dependent on the rate at
+ * which the sensor transfers data to the CSI-2 Controller and the user
+ * video clock.
+ *
+ * The calculation is the classical rate-in rate-out type of problem: If the
+ * video bandwidth is 10% faster than the incoming mipi data and the video
+ * line length is 500 pixels, then the fifo should be allowed to fill
+ * 10% of the line length or 50 pixels. If the gap data is ok, then the level
+ * can be set to 16 and ignored.
+ */
+#define CSI2RX_SEND_LEVEL 64
+
+struct csi_state {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk_bulk_data clks[CSI2_NUM_CLKS];
+ struct reset_control *rst;
+ struct regulator *mipi_phy_regulator;
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[MIPI_CSI2_PADS_NUM];
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *src_sd;
+
+ struct v4l2_mbus_config_mipi_csi2 bus;
+
+ struct mutex lock; /* Protect csi2_fmt, format_mbus, state, hs_settle */
+ const struct csi2_pix_format *csi2_fmt;
+ struct v4l2_mbus_framefmt format_mbus[MIPI_CSI2_PADS_NUM];
+ u32 state;
+ u32 hs_settle;
+
+ struct regmap *phy_gpr;
+ u8 phy_gpr_reg;
+
+ struct icc_path *icc_path;
+ s32 icc_path_bw;
+};
+
+/* -----------------------------------------------------------------------------
+ * Format helpers
+ */
+
+struct csi2_pix_format {
+ u32 code;
+ u8 width;
+};
+
+static const struct csi2_pix_format imx8mq_mipi_csi_formats[] = {
+ /* RAW (Bayer and greyscale) formats. */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .width = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .width = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .width = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .width = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .width = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .width = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .width = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .width = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .width = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .width = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .width = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .width = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .width = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .width = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_Y12_1X12,
+ .width = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .width = 14,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .width = 14,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .width = 14,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .width = 14,
+ },
+ /* YUV formats */
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .width = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .width = 16,
+ }
+};
+
+static const struct csi2_pix_format *find_csi2_format(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx8mq_mipi_csi_formats); i++)
+ if (code == imx8mq_mipi_csi_formats[i].code)
+ return &imx8mq_mipi_csi_formats[i];
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static inline void imx8mq_mipi_csi_write(struct csi_state *state, u32 reg, u32 val)
+{
+ writel(val, state->regs + reg);
+}
+
+static int imx8mq_mipi_csi_sw_reset(struct csi_state *state)
+{
+ int ret;
+
+ /*
+ * these are most likely self-clearing reset bits. to make it
+ * more clear, the reset-imx7 driver should implement the
+ * .reset() operation.
+ */
+ ret = reset_control_assert(state->rst);
+ if (ret < 0) {
+ dev_err(state->dev, "Failed to assert resets: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void imx8mq_mipi_csi_system_enable(struct csi_state *state, int on)
+{
+ if (!on) {
+ imx8mq_mipi_csi_write(state, CSI2RX_CFG_DISABLE_DATA_LANES, 0xf);
+ return;
+ }
+
+ regmap_update_bits(state->phy_gpr,
+ state->phy_gpr_reg,
+ 0x3fff,
+ GPR_CSI2_1_RX_ENABLE |
+ GPR_CSI2_1_VID_INTFC_ENB |
+ GPR_CSI2_1_HSEL |
+ GPR_CSI2_1_CONT_CLK_MODE |
+ GPR_CSI2_1_S_PRG_RXHS_SETTLE(state->hs_settle));
+}
+
+static void imx8mq_mipi_csi_set_params(struct csi_state *state)
+{
+ int lanes = state->bus.num_data_lanes;
+
+ imx8mq_mipi_csi_write(state, CSI2RX_CFG_NUM_LANES, lanes - 1);
+ imx8mq_mipi_csi_write(state, CSI2RX_CFG_DISABLE_DATA_LANES,
+ (0xf << lanes) & 0xf);
+ imx8mq_mipi_csi_write(state, CSI2RX_IRQ_MASK, CSI2RX_IRQ_MASK_ALL);
+ /*
+ * 0x180 bit 0 controls the Virtual Channel behaviour: when set the
+ * interface ignores the Virtual Channel (VC) field in received packets;
+ * when cleared it causes the interface to only accept packets whose VC
+ * matches the value to which VC is set at offset 0x184.
+ */
+ imx8mq_mipi_csi_write(state, CSI2RX_CFG_VID_VC_IGNORE, 1);
+ imx8mq_mipi_csi_write(state, CSI2RX_CFG_VID_P_FIFO_SEND_LEVEL,
+ CSI2RX_SEND_LEVEL);
+}
+
+static int imx8mq_mipi_csi_clk_enable(struct csi_state *state)
+{
+ return clk_bulk_prepare_enable(CSI2_NUM_CLKS, state->clks);
+}
+
+static void imx8mq_mipi_csi_clk_disable(struct csi_state *state)
+{
+ clk_bulk_disable_unprepare(CSI2_NUM_CLKS, state->clks);
+}
+
+static int imx8mq_mipi_csi_clk_get(struct csi_state *state)
+{
+ unsigned int i;
+
+ for (i = 0; i < CSI2_NUM_CLKS; i++)
+ state->clks[i].id = imx8mq_mipi_csi_clk_id[i];
+
+ return devm_clk_bulk_get(state->dev, CSI2_NUM_CLKS, state->clks);
+}
+
+static int imx8mq_mipi_csi_calc_hs_settle(struct csi_state *state)
+{
+ s64 link_freq;
+ u32 lane_rate;
+ unsigned long esc_clk_rate;
+ u32 min_ths_settle, max_ths_settle, ths_settle_ns, esc_clk_period_ns;
+
+ /* Calculate the line rate from the pixel rate. */
+ link_freq = v4l2_get_link_freq(state->src_sd->ctrl_handler,
+ state->csi2_fmt->width,
+ state->bus.num_data_lanes * 2);
+ if (link_freq < 0) {
+ dev_err(state->dev, "Unable to obtain link frequency: %d\n",
+ (int)link_freq);
+ return link_freq;
+ }
+
+ lane_rate = link_freq * 2;
+ if (lane_rate < 80000000 || lane_rate > 1500000000) {
+ dev_dbg(state->dev, "Out-of-bound lane rate %u\n", lane_rate);
+ return -EINVAL;
+ }
+
+ /*
+ * The D-PHY specification requires Ths-settle to be in the range
+ * 85ns + 6*UI to 140ns + 10*UI, with the unit interval UI being half
+ * the clock period.
+ *
+ * The Ths-settle value is expressed in the hardware as a multiple of
+ * the Esc clock period:
+ *
+ * Ths-settle = (PRG_RXHS_SETTLE + 1) * Tperiod of RxClkInEsc
+ *
+ * Due to the one cycle inaccuracy introduced by rounding, the
+ * documentation recommends picking a value away from the boundaries.
+ * Let's pick the average.
+ */
+ esc_clk_rate = clk_get_rate(state->clks[CSI2_CLK_ESC].clk);
+ if (!esc_clk_rate) {
+ dev_err(state->dev, "Could not get esc clock rate.\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(state->dev, "esc clk rate: %lu\n", esc_clk_rate);
+ esc_clk_period_ns = 1000000000 / esc_clk_rate;
+
+ min_ths_settle = 85 + 6 * 1000000 / (lane_rate / 1000);
+ max_ths_settle = 140 + 10 * 1000000 / (lane_rate / 1000);
+ ths_settle_ns = (min_ths_settle + max_ths_settle) / 2;
+
+ state->hs_settle = ths_settle_ns / esc_clk_period_ns - 1;
+
+ dev_dbg(state->dev, "lane rate %u Ths_settle %u hs_settle %u\n",
+ lane_rate, ths_settle_ns, state->hs_settle);
+
+ return 0;
+}
+
+static int imx8mq_mipi_csi_start_stream(struct csi_state *state)
+{
+ int ret;
+
+ ret = imx8mq_mipi_csi_sw_reset(state);
+ if (ret)
+ return ret;
+
+ imx8mq_mipi_csi_set_params(state);
+ ret = imx8mq_mipi_csi_calc_hs_settle(state);
+ if (ret)
+ return ret;
+
+ imx8mq_mipi_csi_system_enable(state, true);
+
+ return 0;
+}
+
+static void imx8mq_mipi_csi_stop_stream(struct csi_state *state)
+{
+ imx8mq_mipi_csi_system_enable(state, false);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static struct csi_state *mipi_sd_to_csi2_state(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csi_state, sd);
+}
+
+static int imx8mq_mipi_csi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+ int ret = 0;
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(state->dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ mutex_lock(&state->lock);
+
+ if (enable) {
+ if (state->state & ST_SUSPENDED) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = imx8mq_mipi_csi_start_stream(state);
+ if (ret < 0)
+ goto unlock;
+
+ ret = v4l2_subdev_call(state->src_sd, video, s_stream, 1);
+ if (ret < 0)
+ goto unlock;
+
+ state->state |= ST_STREAMING;
+ } else {
+ v4l2_subdev_call(state->src_sd, video, s_stream, 0);
+ imx8mq_mipi_csi_stop_stream(state);
+ state->state &= ~ST_STREAMING;
+ }
+
+unlock:
+ mutex_unlock(&state->lock);
+
+ if (!enable || ret < 0)
+ pm_runtime_put(state->dev);
+
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+imx8mq_mipi_csi_get_format(struct csi_state *state,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which,
+ unsigned int pad)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&state->sd, sd_state, pad);
+
+ return &state->format_mbus[pad];
+}
+
+static int imx8mq_mipi_csi_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+ struct v4l2_mbus_framefmt *fmt_sink;
+ struct v4l2_mbus_framefmt *fmt_source;
+ enum v4l2_subdev_format_whence which;
+
+ which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt_sink = imx8mq_mipi_csi_get_format(state, sd_state, which,
+ MIPI_CSI2_PAD_SINK);
+
+ fmt_sink->code = MEDIA_BUS_FMT_SGBRG10_1X10;
+ fmt_sink->width = MIPI_CSI2_DEF_PIX_WIDTH;
+ fmt_sink->height = MIPI_CSI2_DEF_PIX_HEIGHT;
+ fmt_sink->field = V4L2_FIELD_NONE;
+
+ fmt_sink->colorspace = V4L2_COLORSPACE_RAW;
+ fmt_sink->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt_sink->colorspace);
+ fmt_sink->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt_sink->colorspace);
+ fmt_sink->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(false, fmt_sink->colorspace,
+ fmt_sink->ycbcr_enc);
+
+ fmt_source = imx8mq_mipi_csi_get_format(state, sd_state, which,
+ MIPI_CSI2_PAD_SOURCE);
+ *fmt_source = *fmt_sink;
+
+ return 0;
+}
+
+static int imx8mq_mipi_csi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = imx8mq_mipi_csi_get_format(state, sd_state, sdformat->which,
+ sdformat->pad);
+
+ mutex_lock(&state->lock);
+
+ sdformat->format = *fmt;
+
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static int imx8mq_mipi_csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+
+ /*
+ * We can't transcode in any way, the source format is identical
+ * to the sink format.
+ */
+ if (code->pad == MIPI_CSI2_PAD_SOURCE) {
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ fmt = imx8mq_mipi_csi_get_format(state, sd_state, code->which,
+ code->pad);
+ code->code = fmt->code;
+ return 0;
+ }
+
+ if (code->pad != MIPI_CSI2_PAD_SINK)
+ return -EINVAL;
+
+ if (code->index >= ARRAY_SIZE(imx8mq_mipi_csi_formats))
+ return -EINVAL;
+
+ code->code = imx8mq_mipi_csi_formats[code->index].code;
+
+ return 0;
+}
+
+static int imx8mq_mipi_csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+ struct csi2_pix_format const *csi2_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+
+ /*
+ * The device can't transcode in any way, the source format can't be
+ * modified.
+ */
+ if (sdformat->pad == MIPI_CSI2_PAD_SOURCE)
+ return imx8mq_mipi_csi_get_fmt(sd, sd_state, sdformat);
+
+ if (sdformat->pad != MIPI_CSI2_PAD_SINK)
+ return -EINVAL;
+
+ csi2_fmt = find_csi2_format(sdformat->format.code);
+ if (!csi2_fmt)
+ csi2_fmt = &imx8mq_mipi_csi_formats[0];
+
+ fmt = imx8mq_mipi_csi_get_format(state, sd_state, sdformat->which,
+ sdformat->pad);
+
+ mutex_lock(&state->lock);
+
+ fmt->code = csi2_fmt->code;
+ fmt->width = sdformat->format.width;
+ fmt->height = sdformat->format.height;
+
+ sdformat->format = *fmt;
+
+ /* Propagate the format from sink to source. */
+ fmt = imx8mq_mipi_csi_get_format(state, sd_state, sdformat->which,
+ MIPI_CSI2_PAD_SOURCE);
+ *fmt = sdformat->format;
+
+ /* Store the CSI2 format descriptor for active formats. */
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ state->csi2_fmt = csi2_fmt;
+
+ mutex_unlock(&state->lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops imx8mq_mipi_csi_video_ops = {
+ .s_stream = imx8mq_mipi_csi_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx8mq_mipi_csi_pad_ops = {
+ .init_cfg = imx8mq_mipi_csi_init_cfg,
+ .enum_mbus_code = imx8mq_mipi_csi_enum_mbus_code,
+ .get_fmt = imx8mq_mipi_csi_get_fmt,
+ .set_fmt = imx8mq_mipi_csi_set_fmt,
+};
+
+static const struct v4l2_subdev_ops imx8mq_mipi_csi_subdev_ops = {
+ .video = &imx8mq_mipi_csi_video_ops,
+ .pad = &imx8mq_mipi_csi_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media entity operations
+ */
+
+static const struct media_entity_operations imx8mq_mipi_csi_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
+};
+
+/* -----------------------------------------------------------------------------
+ * Async subdev notifier
+ */
+
+static struct csi_state *
+mipi_notifier_to_csi2_state(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi_state, notifier);
+}
+
+static int imx8mq_mipi_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct csi_state *state = mipi_notifier_to_csi2_state(notifier);
+ struct media_pad *sink = &state->sd.entity.pads[MIPI_CSI2_PAD_SINK];
+
+ state->src_sd = sd;
+
+ return v4l2_create_fwnode_links_to_pad(sd, sink, MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static const struct v4l2_async_notifier_operations imx8mq_mipi_csi_notify_ops = {
+ .bound = imx8mq_mipi_csi_notify_bound,
+};
+
+static int imx8mq_mipi_csi_async_register(struct csi_state *state)
+{
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_subdev *asd;
+ struct fwnode_handle *ep;
+ unsigned int i;
+ int ret;
+
+ v4l2_async_nf_init(&state->notifier);
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(state->dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!ep)
+ return -ENOTCONN;
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ if (ret)
+ goto err_parse;
+
+ for (i = 0; i < vep.bus.mipi_csi2.num_data_lanes; ++i) {
+ if (vep.bus.mipi_csi2.data_lanes[i] != i + 1) {
+ dev_err(state->dev,
+ "data lanes reordering is not supported");
+ ret = -EINVAL;
+ goto err_parse;
+ }
+ }
+
+ state->bus = vep.bus.mipi_csi2;
+
+ dev_dbg(state->dev, "data lanes: %d flags: 0x%08x\n",
+ state->bus.num_data_lanes,
+ state->bus.flags);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&state->notifier, ep,
+ struct v4l2_async_subdev);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto err_parse;
+ }
+
+ fwnode_handle_put(ep);
+
+ state->notifier.ops = &imx8mq_mipi_csi_notify_ops;
+
+ ret = v4l2_async_subdev_nf_register(&state->sd, &state->notifier);
+ if (ret)
+ return ret;
+
+ return v4l2_async_register_subdev(&state->sd);
+
+err_parse:
+ fwnode_handle_put(ep);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Suspend/resume
+ */
+
+static void imx8mq_mipi_csi_pm_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+
+ mutex_lock(&state->lock);
+
+ if (state->state & ST_POWERED) {
+ imx8mq_mipi_csi_stop_stream(state);
+ imx8mq_mipi_csi_clk_disable(state);
+ state->state &= ~ST_POWERED;
+ }
+
+ mutex_unlock(&state->lock);
+}
+
+static int imx8mq_mipi_csi_pm_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+ int ret = 0;
+
+ mutex_lock(&state->lock);
+
+ if (!(state->state & ST_POWERED)) {
+ state->state |= ST_POWERED;
+ ret = imx8mq_mipi_csi_clk_enable(state);
+ }
+ if (state->state & ST_STREAMING) {
+ ret = imx8mq_mipi_csi_start_stream(state);
+ if (ret)
+ goto unlock;
+ }
+
+ state->state &= ~ST_SUSPENDED;
+
+unlock:
+ mutex_unlock(&state->lock);
+
+ return ret ? -EAGAIN : 0;
+}
+
+static int __maybe_unused imx8mq_mipi_csi_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+
+ imx8mq_mipi_csi_pm_suspend(dev);
+
+ state->state |= ST_SUSPENDED;
+
+ return 0;
+}
+
+static int __maybe_unused imx8mq_mipi_csi_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+
+ if (!(state->state & ST_SUSPENDED))
+ return 0;
+
+ return imx8mq_mipi_csi_pm_resume(dev);
+}
+
+static int __maybe_unused imx8mq_mipi_csi_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+ int ret;
+
+ imx8mq_mipi_csi_pm_suspend(dev);
+
+ ret = icc_set_bw(state->icc_path, 0, 0);
+ if (ret)
+ dev_err(dev, "icc_set_bw failed with %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused imx8mq_mipi_csi_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+ int ret;
+
+ ret = icc_set_bw(state->icc_path, 0, state->icc_path_bw);
+ if (ret) {
+ dev_err(dev, "icc_set_bw failed with %d\n", ret);
+ return ret;
+ }
+
+ return imx8mq_mipi_csi_pm_resume(dev);
+}
+
+static const struct dev_pm_ops imx8mq_mipi_csi_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx8mq_mipi_csi_runtime_suspend,
+ imx8mq_mipi_csi_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(imx8mq_mipi_csi_suspend, imx8mq_mipi_csi_resume)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe/remove & platform driver
+ */
+
+static int imx8mq_mipi_csi_subdev_init(struct csi_state *state)
+{
+ struct v4l2_subdev *sd = &state->sd;
+
+ v4l2_subdev_init(sd, &imx8mq_mipi_csi_subdev_ops);
+ sd->owner = THIS_MODULE;
+ snprintf(sd->name, sizeof(sd->name), "%s %s",
+ MIPI_CSI2_SUBDEV_NAME, dev_name(state->dev));
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ sd->entity.ops = &imx8mq_mipi_csi_entity_ops;
+
+ sd->dev = state->dev;
+
+ state->csi2_fmt = &imx8mq_mipi_csi_formats[0];
+ imx8mq_mipi_csi_init_cfg(sd, NULL);
+
+ state->pads[MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ state->pads[MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ return media_entity_pads_init(&sd->entity, MIPI_CSI2_PADS_NUM,
+ state->pads);
+}
+
+static void imx8mq_mipi_csi_release_icc(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+
+ icc_put(state->icc_path);
+}
+
+static int imx8mq_mipi_csi_init_icc(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+
+ /* Optional interconnect request */
+ state->icc_path = of_icc_get(&pdev->dev, "dram");
+ if (IS_ERR_OR_NULL(state->icc_path))
+ return PTR_ERR_OR_ZERO(state->icc_path);
+
+ state->icc_path_bw = MBps_to_icc(700);
+
+ return 0;
+}
+
+static int imx8mq_mipi_csi_parse_dt(struct csi_state *state)
+{
+ struct device *dev = state->dev;
+ struct device_node *np = state->dev->of_node;
+ struct device_node *node;
+ phandle ph;
+ u32 out_val[2];
+ int ret = 0;
+
+ state->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(state->rst)) {
+ dev_err(dev, "Failed to get reset: %pe\n", state->rst);
+ return PTR_ERR(state->rst);
+ }
+
+ ret = of_property_read_u32_array(np, "fsl,mipi-phy-gpr", out_val,
+ ARRAY_SIZE(out_val));
+ if (ret) {
+ dev_err(dev, "no fsl,mipi-phy-gpr property found: %d\n", ret);
+ return ret;
+ }
+
+ ph = *out_val;
+
+ node = of_find_node_by_phandle(ph);
+ if (!node) {
+ dev_err(dev, "Error finding node by phandle\n");
+ return -ENODEV;
+ }
+ state->phy_gpr = syscon_node_to_regmap(node);
+ of_node_put(node);
+ if (IS_ERR(state->phy_gpr)) {
+ dev_err(dev, "failed to get gpr regmap: %pe\n", state->phy_gpr);
+ return PTR_ERR(state->phy_gpr);
+ }
+
+ state->phy_gpr_reg = out_val[1];
+ dev_dbg(dev, "phy gpr register set to 0x%x\n", state->phy_gpr_reg);
+
+ return ret;
+}
+
+static int imx8mq_mipi_csi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct csi_state *state;
+ int ret;
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->dev = dev;
+
+ ret = imx8mq_mipi_csi_parse_dt(state);
+ if (ret < 0) {
+ dev_err(dev, "Failed to parse device tree: %d\n", ret);
+ return ret;
+ }
+
+ /* Acquire resources. */
+ state->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(state->regs))
+ return PTR_ERR(state->regs);
+
+ ret = imx8mq_mipi_csi_clk_get(state);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, &state->sd);
+
+ mutex_init(&state->lock);
+
+ ret = imx8mq_mipi_csi_subdev_init(state);
+ if (ret < 0)
+ goto mutex;
+
+ ret = imx8mq_mipi_csi_init_icc(pdev);
+ if (ret)
+ goto mutex;
+
+ /* Enable runtime PM. */
+ pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = imx8mq_mipi_csi_runtime_resume(dev);
+ if (ret < 0)
+ goto icc;
+ }
+
+ ret = imx8mq_mipi_csi_async_register(state);
+ if (ret < 0)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ pm_runtime_disable(&pdev->dev);
+ imx8mq_mipi_csi_runtime_suspend(&pdev->dev);
+
+ media_entity_cleanup(&state->sd.entity);
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
+ v4l2_async_unregister_subdev(&state->sd);
+icc:
+ imx8mq_mipi_csi_release_icc(pdev);
+mutex:
+ mutex_destroy(&state->lock);
+
+ return ret;
+}
+
+static int imx8mq_mipi_csi_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csi_state *state = mipi_sd_to_csi2_state(sd);
+
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
+ v4l2_async_unregister_subdev(&state->sd);
+
+ pm_runtime_disable(&pdev->dev);
+ imx8mq_mipi_csi_runtime_suspend(&pdev->dev);
+ media_entity_cleanup(&state->sd.entity);
+ mutex_destroy(&state->lock);
+ pm_runtime_set_suspended(&pdev->dev);
+ imx8mq_mipi_csi_release_icc(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id imx8mq_mipi_csi_of_match[] = {
+ { .compatible = "fsl,imx8mq-mipi-csi2", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8mq_mipi_csi_of_match);
+
+static struct platform_driver imx8mq_mipi_csi_driver = {
+ .probe = imx8mq_mipi_csi_probe,
+ .remove = imx8mq_mipi_csi_remove,
+ .driver = {
+ .of_match_table = imx8mq_mipi_csi_of_match,
+ .name = MIPI_CSI2_DRIVER_NAME,
+ .pm = &imx8mq_mipi_csi_pm_ops,
+ },
+};
+
+module_platform_driver(imx8mq_mipi_csi_driver);
+
+MODULE_DESCRIPTION("i.MX8MQ MIPI CSI-2 receiver driver");
+MODULE_AUTHOR("Martin Kepplinger <martin.kepplinger@puri.sm>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx8mq-mipi-csi2");