summaryrefslogtreecommitdiffstats
path: root/drivers/media/platform/renesas
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/platform/renesas')
-rw-r--r--drivers/media/platform/renesas/Kconfig122
-rw-r--r--drivers/media/platform/renesas/Makefile16
-rw-r--r--drivers/media/platform/renesas/rcar-fcp.c180
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c531
-rw-r--r--drivers/media/platform/renesas/rcar-vin/Kconfig32
-rw-r--r--drivers/media/platform/renesas/rcar-vin/Makefile5
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c1456
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-csi2.c1959
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c1672
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c1148
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h320
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c1486
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c2460
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c1752
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c1737
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/Kconfig33
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/Makefile6
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c335
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h154
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c872
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c255
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c1058
-rw-r--r--drivers/media/platform/renesas/sh_vou.c1374
-rw-r--r--drivers/media/platform/renesas/vsp1/Makefile10
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h127
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.c455
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_brx.h44
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_clu.c286
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_clu.h45
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.c1169
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_dl.h79
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c1000
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.h76
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c1021
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.c700
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_entity.h192
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgo.c223
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgo.h41
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgt.c215
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgt.h38
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c593
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.h77
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.c178
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hsit.h34
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.c178
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.h33
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lut.c242
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lut.h42
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c468
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.h177
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h886
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c434
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c287
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.h89
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.c396
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_sru.h38
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.c429
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uds.h37
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uif.c268
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_uif.h32
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c1327
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.h61
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c613
63 files changed, 31603 insertions, 0 deletions
diff --git a/drivers/media/platform/renesas/Kconfig b/drivers/media/platform/renesas/Kconfig
new file mode 100644
index 0000000000..ed788e991f
--- /dev/null
+++ b/drivers/media/platform/renesas/Kconfig
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Renesas media platform drivers"
+
+# V4L drivers
+
+config VIDEO_RENESAS_CEU
+ tristate "Renesas Capture Engine Unit (CEU) driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_SHMOBILE || ARCH_R7S72100 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ This is a v4l2 driver for the Renesas CEU Interface
+
+config VIDEO_RCAR_ISP
+ tristate "R-Car Image Signal Processor (ISP)"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car Image Signal Processor (ISP).
+ Enable this to support the Renesas R-Car Image Signal
+ Processor (ISP).
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-isp.
+
+config VIDEO_SH_VOU
+ tristate "SuperH VOU video output driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && I2C
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for the Video Output Unit (VOU) on SuperH SoCs.
+
+source "drivers/media/platform/renesas/rcar-vin/Kconfig"
+source "drivers/media/platform/renesas/rzg2l-cru/Kconfig"
+
+# Mem2mem drivers
+
+config VIDEO_RENESAS_FCP
+ tristate "Renesas Frame Compression Processor"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on OF
+ help
+ This is a driver for the Renesas Frame Compression Processor (FCP).
+ The FCP is a companion module of video processing modules in the
+ Renesas R-Car Gen3 and RZ/G2 SoCs. It handles memory access for
+ the codec, VSP and FDP modules.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar-fcp.
+
+config VIDEO_RENESAS_FDP1
+ tristate "Renesas Fine Display Processor"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a V4L2 driver for the Renesas Fine Display Processor
+ providing colour space conversion, and de-interlacing features.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar_fdp1.
+
+config VIDEO_RENESAS_JPU
+ tristate "Renesas JPEG Processing Unit"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This is a V4L2 driver for the Renesas JPEG Processing Unit.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar_jpu.
+
+config VIDEO_RENESAS_VSP1
+ tristate "Renesas VSP1 Video Processing Engine"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ help
+ This is a V4L2 driver for the Renesas VSP1 video processing engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vsp1.
+
+# SDR drivers
+
+config VIDEO_RCAR_DRIF
+ tristate "Renesas Digital Radio Interface (DRIF)"
+ depends on SDR_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select VIDEOBUF2_VMALLOC
+ select V4L2_ASYNC
+ help
+ Say Y if you want to enable R-Car Gen3 DRIF support. DRIF is Digital
+ Radio Interface that interfaces with an RF front end chip. It is a
+ receiver of digital data which uses DMA to transfer received data to
+ a configured location for an application to use.
+
+ To compile this driver as a module, choose M here; the module
+ will be called rcar_drif.
diff --git a/drivers/media/platform/renesas/Makefile b/drivers/media/platform/renesas/Makefile
new file mode 100644
index 0000000000..55854e8688
--- /dev/null
+++ b/drivers/media/platform/renesas/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Renesas capture/playback device drivers.
+#
+
+obj-y += rcar-vin/
+obj-y += rzg2l-cru/
+obj-y += vsp1/
+
+obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o
+obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
+obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o
+obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
+obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
+obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
+obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
diff --git a/drivers/media/platform/renesas/rcar-fcp.c b/drivers/media/platform/renesas/rcar-fcp.c
new file mode 100644
index 0000000000..bcef7b87da
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-fcp.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * rcar-fcp.c -- R-Car Frame Compression Processor Driver
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/rcar-fcp.h>
+
+struct rcar_fcp_device {
+ struct list_head list;
+ struct device *dev;
+};
+
+static LIST_HEAD(fcp_devices);
+static DEFINE_MUTEX(fcp_lock);
+
+/* -----------------------------------------------------------------------------
+ * Public API
+ */
+
+/**
+ * rcar_fcp_get - Find and acquire a reference to an FCP instance
+ * @np: Device node of the FCP instance
+ *
+ * Search the list of registered FCP instances for the instance corresponding to
+ * the given device node.
+ *
+ * Return a pointer to the FCP instance, or an ERR_PTR if the instance can't be
+ * found.
+ */
+struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np)
+{
+ struct rcar_fcp_device *fcp;
+
+ mutex_lock(&fcp_lock);
+
+ list_for_each_entry(fcp, &fcp_devices, list) {
+ if (fcp->dev->of_node != np)
+ continue;
+
+ get_device(fcp->dev);
+ goto done;
+ }
+
+ fcp = ERR_PTR(-EPROBE_DEFER);
+
+done:
+ mutex_unlock(&fcp_lock);
+ return fcp;
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_get);
+
+/**
+ * rcar_fcp_put - Release a reference to an FCP instance
+ * @fcp: The FCP instance
+ *
+ * Release the FCP instance acquired by a call to rcar_fcp_get().
+ */
+void rcar_fcp_put(struct rcar_fcp_device *fcp)
+{
+ if (fcp)
+ put_device(fcp->dev);
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_put);
+
+struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp)
+{
+ return fcp->dev;
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_get_device);
+
+/**
+ * rcar_fcp_enable - Enable an FCP
+ * @fcp: The FCP instance
+ *
+ * Before any memory access through an FCP is performed by a module, the FCP
+ * must be enabled by a call to this function. The enable calls are reference
+ * counted, each successful call must be followed by one rcar_fcp_disable()
+ * call when no more memory transfer can occur through the FCP.
+ *
+ * Return 0 on success or a negative error code if an error occurs. The enable
+ * reference count isn't increased when this function returns an error.
+ */
+int rcar_fcp_enable(struct rcar_fcp_device *fcp)
+{
+ if (!fcp)
+ return 0;
+
+ return pm_runtime_resume_and_get(fcp->dev);
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_enable);
+
+/**
+ * rcar_fcp_disable - Disable an FCP
+ * @fcp: The FCP instance
+ *
+ * This function is the counterpart of rcar_fcp_enable(). As enable calls are
+ * reference counted a disable call may not disable the FCP synchronously.
+ */
+void rcar_fcp_disable(struct rcar_fcp_device *fcp)
+{
+ if (fcp)
+ pm_runtime_put(fcp->dev);
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_disable);
+
+/* -----------------------------------------------------------------------------
+ * Platform Driver
+ */
+
+static int rcar_fcp_probe(struct platform_device *pdev)
+{
+ struct rcar_fcp_device *fcp;
+
+ fcp = devm_kzalloc(&pdev->dev, sizeof(*fcp), GFP_KERNEL);
+ if (fcp == NULL)
+ return -ENOMEM;
+
+ fcp->dev = &pdev->dev;
+
+ dma_set_max_seg_size(fcp->dev, UINT_MAX);
+
+ pm_runtime_enable(&pdev->dev);
+
+ mutex_lock(&fcp_lock);
+ list_add_tail(&fcp->list, &fcp_devices);
+ mutex_unlock(&fcp_lock);
+
+ platform_set_drvdata(pdev, fcp);
+
+ return 0;
+}
+
+static void rcar_fcp_remove(struct platform_device *pdev)
+{
+ struct rcar_fcp_device *fcp = platform_get_drvdata(pdev);
+
+ mutex_lock(&fcp_lock);
+ list_del(&fcp->list);
+ mutex_unlock(&fcp_lock);
+
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct of_device_id rcar_fcp_of_match[] = {
+ { .compatible = "renesas,fcpf" },
+ { .compatible = "renesas,fcpv" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rcar_fcp_of_match);
+
+static struct platform_driver rcar_fcp_platform_driver = {
+ .probe = rcar_fcp_probe,
+ .remove_new = rcar_fcp_remove,
+ .driver = {
+ .name = "rcar-fcp",
+ .of_match_table = rcar_fcp_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_platform_driver(rcar_fcp_platform_driver);
+
+MODULE_ALIAS("rcar-fcp");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas FCP Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp.c
new file mode 100644
index 0000000000..7360cf3863
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-isp.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * Driver for Renesas R-Car ISP Channel Selector
+ *
+ * The ISP hardware is capable of more than just channel selection, features
+ * such as demosaicing, white balance control and color space conversion are
+ * also possible. These more advanced features are not supported by the driver
+ * due to lack of documentation.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-subdev.h>
+
+#define ISPINPUTSEL0_REG 0x0008
+#define ISPINPUTSEL0_SEL_CSI0 BIT(31)
+
+#define ISPSTART_REG 0x0014
+#define ISPSTART_START 0xffff
+#define ISPSTART_STOP 0x0000
+
+#define ISPPROCMODE_DT_REG(n) (0x1100 + (0x4 * (n)))
+#define ISPPROCMODE_DT_PROC_MODE_VC3(pm) (((pm) & 0x3f) << 24)
+#define ISPPROCMODE_DT_PROC_MODE_VC2(pm) (((pm) & 0x3f) << 16)
+#define ISPPROCMODE_DT_PROC_MODE_VC1(pm) (((pm) & 0x3f) << 8)
+#define ISPPROCMODE_DT_PROC_MODE_VC0(pm) ((pm) & 0x3f)
+
+#define ISPCS_FILTER_ID_CH_REG(n) (0x3000 + (0x0100 * (n)))
+
+#define ISPCS_DT_CODE03_CH_REG(n) (0x3008 + (0x100 * (n)))
+#define ISPCS_DT_CODE03_EN3 BIT(31)
+#define ISPCS_DT_CODE03_DT3(dt) (((dt) & 0x3f) << 24)
+#define ISPCS_DT_CODE03_EN2 BIT(23)
+#define ISPCS_DT_CODE03_DT2(dt) (((dt) & 0x3f) << 16)
+#define ISPCS_DT_CODE03_EN1 BIT(15)
+#define ISPCS_DT_CODE03_DT1(dt) (((dt) & 0x3f) << 8)
+#define ISPCS_DT_CODE03_EN0 BIT(7)
+#define ISPCS_DT_CODE03_DT0(dt) ((dt) & 0x3f)
+
+struct rcar_isp_format {
+ u32 code;
+ unsigned int datatype;
+ unsigned int procmode;
+};
+
+static const struct rcar_isp_format rcar_isp_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .datatype = MIPI_CSI2_DT_RGB888,
+ .procmode = 0x15
+ }, {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x10,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .procmode = 0x0c,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .procmode = 0x0c,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .procmode = 0x0c,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .procmode = 0x0c,
+ },
+};
+
+static const struct rcar_isp_format *risp_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rcar_isp_formats); i++) {
+ if (rcar_isp_formats[i].code == code)
+ return &rcar_isp_formats[i];
+ }
+
+ return NULL;
+}
+
+enum rcar_isp_input {
+ RISP_CSI_INPUT0,
+ RISP_CSI_INPUT1,
+};
+
+enum rcar_isp_pads {
+ RCAR_ISP_SINK,
+ RCAR_ISP_PORT0,
+ RCAR_ISP_PORT1,
+ RCAR_ISP_PORT2,
+ RCAR_ISP_PORT3,
+ RCAR_ISP_PORT4,
+ RCAR_ISP_PORT5,
+ RCAR_ISP_PORT6,
+ RCAR_ISP_PORT7,
+ RCAR_ISP_NUM_PADS,
+};
+
+struct rcar_isp {
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *rstc;
+
+ enum rcar_isp_input csi_input;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RCAR_ISP_NUM_PADS];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *remote;
+
+ struct mutex lock; /* Protects mf and stream_count. */
+ struct v4l2_mbus_framefmt mf;
+ int stream_count;
+};
+
+static inline struct rcar_isp *sd_to_isp(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rcar_isp, subdev);
+}
+
+static inline struct rcar_isp *notifier_to_isp(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct rcar_isp, notifier);
+}
+
+static void risp_write(struct rcar_isp *isp, u32 offset, u32 value)
+{
+ iowrite32(value, isp->base + offset);
+}
+
+static u32 risp_read(struct rcar_isp *isp, u32 offset)
+{
+ return ioread32(isp->base + offset);
+}
+
+static int risp_power_on(struct rcar_isp *isp)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(isp->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = reset_control_deassert(isp->rstc);
+ if (ret < 0) {
+ pm_runtime_put(isp->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void risp_power_off(struct rcar_isp *isp)
+{
+ reset_control_assert(isp->rstc);
+ pm_runtime_put(isp->dev);
+}
+
+static int risp_start(struct rcar_isp *isp)
+{
+ const struct rcar_isp_format *format;
+ unsigned int vc;
+ u32 sel_csi = 0;
+ int ret;
+
+ format = risp_code_to_fmt(isp->mf.code);
+ if (!format) {
+ dev_err(isp->dev, "Unsupported bus format\n");
+ return -EINVAL;
+ }
+
+ ret = risp_power_on(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power on ISP\n");
+ return ret;
+ }
+
+ /* Select CSI-2 input source. */
+ if (isp->csi_input == RISP_CSI_INPUT1)
+ sel_csi = ISPINPUTSEL0_SEL_CSI0;
+
+ risp_write(isp, ISPINPUTSEL0_REG,
+ risp_read(isp, ISPINPUTSEL0_REG) | sel_csi);
+
+ /* Configure Channel Selector. */
+ for (vc = 0; vc < 4; vc++) {
+ u8 ch = vc + 4;
+ u8 dt = format->datatype;
+
+ risp_write(isp, ISPCS_FILTER_ID_CH_REG(ch), BIT(vc));
+ risp_write(isp, ISPCS_DT_CODE03_CH_REG(ch),
+ ISPCS_DT_CODE03_EN3 | ISPCS_DT_CODE03_DT3(dt) |
+ ISPCS_DT_CODE03_EN2 | ISPCS_DT_CODE03_DT2(dt) |
+ ISPCS_DT_CODE03_EN1 | ISPCS_DT_CODE03_DT1(dt) |
+ ISPCS_DT_CODE03_EN0 | ISPCS_DT_CODE03_DT0(dt));
+ }
+
+ /* Setup processing method. */
+ risp_write(isp, ISPPROCMODE_DT_REG(format->datatype),
+ ISPPROCMODE_DT_PROC_MODE_VC3(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC2(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC1(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC0(format->procmode));
+
+ /* Start ISP. */
+ risp_write(isp, ISPSTART_REG, ISPSTART_START);
+
+ ret = v4l2_subdev_call(isp->remote, video, s_stream, 1);
+ if (ret)
+ risp_power_off(isp);
+
+ return ret;
+}
+
+static void risp_stop(struct rcar_isp *isp)
+{
+ v4l2_subdev_call(isp->remote, video, s_stream, 0);
+
+ /* Stop ISP. */
+ risp_write(isp, ISPSTART_REG, ISPSTART_STOP);
+
+ risp_power_off(isp);
+}
+
+static int risp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+ int ret = 0;
+
+ mutex_lock(&isp->lock);
+
+ if (!isp->remote) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (enable && isp->stream_count == 0) {
+ ret = risp_start(isp);
+ if (ret)
+ goto out;
+ } else if (!enable && isp->stream_count == 1) {
+ risp_stop(isp);
+ }
+
+ isp->stream_count += enable ? 1 : -1;
+out:
+ mutex_unlock(&isp->lock);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops risp_video_ops = {
+ .s_stream = risp_s_stream,
+};
+
+static int risp_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ mutex_lock(&isp->lock);
+
+ if (!risp_code_to_fmt(format->format.code))
+ format->format.code = rcar_isp_formats[0].code;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ isp->mf = format->format;
+ } else {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ *framefmt = format->format;
+ }
+
+ mutex_unlock(&isp->lock);
+
+ return 0;
+}
+
+static int risp_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+
+ mutex_lock(&isp->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ format->format = isp->mf;
+ else
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+
+ mutex_unlock(&isp->lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops risp_pad_ops = {
+ .set_fmt = risp_set_pad_format,
+ .get_fmt = risp_get_pad_format,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+static const struct v4l2_subdev_ops rcar_isp_subdev_ops = {
+ .video = &risp_video_ops,
+ .pad = &risp_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Async handling and registration of subdevices and links
+ */
+
+static int risp_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct rcar_isp *isp = notifier_to_isp(notifier);
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(isp->dev, "Failed to find pad for %s\n", subdev->name);
+ return pad;
+ }
+
+ isp->remote = subdev;
+
+ dev_dbg(isp->dev, "Bound %s pad: %d\n", subdev->name, pad);
+
+ return media_create_pad_link(&subdev->entity, pad,
+ &isp->subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void risp_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct rcar_isp *isp = notifier_to_isp(notifier);
+
+ isp->remote = NULL;
+
+ dev_dbg(isp->dev, "Unbind %s\n", subdev->name);
+}
+
+static const struct v4l2_async_notifier_operations risp_notify_ops = {
+ .bound = risp_notify_bound,
+ .unbind = risp_notify_unbind,
+};
+
+static int risp_parse_dt(struct rcar_isp *isp)
+{
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *fwnode;
+ struct fwnode_handle *ep;
+ unsigned int id;
+ int ret;
+
+ for (id = 0; id < 2; id++) {
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev),
+ 0, id, 0);
+ if (ep)
+ break;
+ }
+
+ if (!ep) {
+ dev_err(isp->dev, "Not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ if (id == 1)
+ isp->csi_input = RISP_CSI_INPUT1;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ fwnode_handle_put(ep);
+
+ dev_dbg(isp->dev, "Found '%pOF'\n", to_of_node(fwnode));
+
+ v4l2_async_subdev_nf_init(&isp->notifier, &isp->subdev);
+ isp->notifier.ops = &risp_notify_ops;
+
+ asd = v4l2_async_nf_add_fwnode(&isp->notifier, fwnode,
+ struct v4l2_async_connection);
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+
+ ret = v4l2_async_nf_register(&isp->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static const struct media_entity_operations risp_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int risp_probe_resources(struct rcar_isp *isp,
+ struct platform_device *pdev)
+{
+ isp->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(isp->base))
+ return PTR_ERR(isp->base);
+
+ isp->rstc = devm_reset_control_get(&pdev->dev, NULL);
+
+ return PTR_ERR_OR_ZERO(isp->rstc);
+}
+
+static const struct of_device_id risp_of_id_table[] = {
+ { .compatible = "renesas,r8a779a0-isp" },
+ { .compatible = "renesas,r8a779g0-isp" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, risp_of_id_table);
+
+static int risp_probe(struct platform_device *pdev)
+{
+ struct rcar_isp *isp;
+ unsigned int i;
+ int ret;
+
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->dev = &pdev->dev;
+
+ mutex_init(&isp->lock);
+
+ ret = risp_probe_resources(isp, pdev);
+ if (ret) {
+ dev_err(isp->dev, "Failed to get resources\n");
+ goto error_mutex;
+ }
+
+ platform_set_drvdata(pdev, isp);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = risp_parse_dt(isp);
+ if (ret)
+ goto error_pm;
+
+ isp->subdev.owner = THIS_MODULE;
+ isp->subdev.dev = &pdev->dev;
+ v4l2_subdev_init(&isp->subdev, &rcar_isp_subdev_ops);
+ v4l2_set_subdevdata(&isp->subdev, &pdev->dev);
+ snprintf(isp->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+ KBUILD_MODNAME, dev_name(&pdev->dev));
+ isp->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ isp->subdev.entity.function = MEDIA_ENT_F_VID_MUX;
+ isp->subdev.entity.ops = &risp_entity_ops;
+
+ isp->pads[RCAR_ISP_SINK].flags = MEDIA_PAD_FL_SINK;
+ for (i = RCAR_ISP_PORT0; i < RCAR_ISP_NUM_PADS; i++)
+ isp->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&isp->subdev.entity, RCAR_ISP_NUM_PADS,
+ isp->pads);
+ if (ret)
+ goto error_notifier;
+
+ ret = v4l2_async_register_subdev(&isp->subdev);
+ if (ret < 0)
+ goto error_notifier;
+
+ dev_info(isp->dev, "Using CSI-2 input: %u\n", isp->csi_input);
+
+ return 0;
+error_notifier:
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+error_pm:
+ pm_runtime_disable(&pdev->dev);
+error_mutex:
+ mutex_destroy(&isp->lock);
+
+ return ret;
+}
+
+static void risp_remove(struct platform_device *pdev)
+{
+ struct rcar_isp *isp = platform_get_drvdata(pdev);
+
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ v4l2_async_unregister_subdev(&isp->subdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ mutex_destroy(&isp->lock);
+}
+
+static struct platform_driver rcar_isp_driver = {
+ .driver = {
+ .name = "rcar-isp",
+ .of_match_table = risp_of_id_table,
+ },
+ .probe = risp_probe,
+ .remove_new = risp_remove,
+};
+
+module_platform_driver(rcar_isp_driver);
+
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_DESCRIPTION("Renesas R-Car ISP Channel Selector driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/renesas/rcar-vin/Kconfig b/drivers/media/platform/renesas/rcar-vin/Kconfig
new file mode 100644
index 0000000000..de55fe63d8
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-vin/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+config VIDEO_RCAR_CSI2
+ tristate "R-Car MIPI CSI-2 Receiver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car MIPI CSI-2 receiver.
+ Supports R-Car Gen3 and RZ/G2 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-csi2.
+
+config VIDEO_RCAR_VIN
+ tristate "R-Car Video Input (VIN) Driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car Video Input (VIN) driver.
+ Supports R-Car Gen{2,3} and RZ/G{1,2} SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-vin.
diff --git a/drivers/media/platform/renesas/rcar-vin/Makefile b/drivers/media/platform/renesas/rcar-vin/Makefile
new file mode 100644
index 0000000000..00d809f5d2
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-vin/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+rcar-vin-objs = rcar-core.o rcar-dma.o rcar-v4l2.o
+
+obj-$(CONFIG_VIDEO_RCAR_CSI2) += rcar-csi2.o
+obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin.o
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
new file mode 100644
index 0000000000..809c3a38cc
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -0,0 +1,1456 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+
+#include "rcar-vin.h"
+
+/*
+ * The companion CSI-2 receiver driver (rcar-csi2) is known
+ * and we know it has one source pad (pad 0) and four sink
+ * pads (pad 1-4). So to translate a pad on the remote
+ * CSI-2 receiver to/from the VIN internal channel number simply
+ * subtract/add one from the pad/channel number.
+ */
+#define rvin_group_csi_pad_to_channel(pad) ((pad) - 1)
+#define rvin_group_csi_channel_to_pad(channel) ((channel) + 1)
+
+/*
+ * Not all VINs are created equal, master VINs control the
+ * routing for other VIN's. We can figure out which VIN is
+ * master by looking at a VINs id.
+ */
+#define rvin_group_id_to_master(vin) ((vin) < 4 ? 0 : 4)
+
+#define v4l2_dev_to_vin(d) container_of(d, struct rvin_dev, v4l2_dev)
+
+/* -----------------------------------------------------------------------------
+ * Gen3 Group Allocator
+ */
+
+/* FIXME: This should if we find a system that supports more
+ * than one group for the whole system be replaced with a linked
+ * list of groups. And eventually all of this should be replaced
+ * with a global device allocator API.
+ *
+ * But for now this works as on all supported systems there will
+ * be only one group for all instances.
+ */
+
+static DEFINE_MUTEX(rvin_group_lock);
+static struct rvin_group *rvin_group_data;
+
+static void rvin_group_cleanup(struct rvin_group *group)
+{
+ media_device_cleanup(&group->mdev);
+ mutex_destroy(&group->lock);
+}
+
+static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin,
+ int (*link_setup)(struct rvin_dev *),
+ const struct media_device_ops *ops)
+{
+ struct media_device *mdev = &group->mdev;
+ const struct of_device_id *match;
+ struct device_node *np;
+
+ mutex_init(&group->lock);
+
+ /* Count number of VINs in the system */
+ group->count = 0;
+ for_each_matching_node(np, vin->dev->driver->of_match_table)
+ if (of_device_is_available(np))
+ group->count++;
+
+ vin_dbg(vin, "found %u enabled VIN's in DT", group->count);
+
+ group->link_setup = link_setup;
+
+ mdev->dev = vin->dev;
+ mdev->ops = ops;
+
+ match = of_match_node(vin->dev->driver->of_match_table,
+ vin->dev->of_node);
+
+ strscpy(mdev->driver_name, KBUILD_MODNAME, sizeof(mdev->driver_name));
+ strscpy(mdev->model, match->compatible, sizeof(mdev->model));
+
+ media_device_init(mdev);
+
+ return 0;
+}
+
+static void rvin_group_release(struct kref *kref)
+{
+ struct rvin_group *group =
+ container_of(kref, struct rvin_group, refcount);
+
+ mutex_lock(&rvin_group_lock);
+
+ rvin_group_data = NULL;
+
+ rvin_group_cleanup(group);
+
+ kfree(group);
+
+ mutex_unlock(&rvin_group_lock);
+}
+
+static int rvin_group_get(struct rvin_dev *vin,
+ int (*link_setup)(struct rvin_dev *),
+ const struct media_device_ops *ops)
+{
+ struct rvin_group *group;
+ u32 id;
+ int ret;
+
+ /* Make sure VIN id is present and sane */
+ ret = of_property_read_u32(vin->dev->of_node, "renesas,id", &id);
+ if (ret) {
+ vin_err(vin, "%pOF: No renesas,id property found\n",
+ vin->dev->of_node);
+ return -EINVAL;
+ }
+
+ if (id >= RCAR_VIN_NUM) {
+ vin_err(vin, "%pOF: Invalid renesas,id '%u'\n",
+ vin->dev->of_node, id);
+ return -EINVAL;
+ }
+
+ /* Join or create a VIN group */
+ mutex_lock(&rvin_group_lock);
+ if (rvin_group_data) {
+ group = rvin_group_data;
+ kref_get(&group->refcount);
+ } else {
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group) {
+ ret = -ENOMEM;
+ goto err_group;
+ }
+
+ ret = rvin_group_init(group, vin, link_setup, ops);
+ if (ret) {
+ kfree(group);
+ vin_err(vin, "Failed to initialize group\n");
+ goto err_group;
+ }
+
+ kref_init(&group->refcount);
+
+ rvin_group_data = group;
+ }
+ mutex_unlock(&rvin_group_lock);
+
+ /* Add VIN to group */
+ mutex_lock(&group->lock);
+
+ if (group->vin[id]) {
+ vin_err(vin, "Duplicate renesas,id property value %u\n", id);
+ mutex_unlock(&group->lock);
+ kref_put(&group->refcount, rvin_group_release);
+ return -EINVAL;
+ }
+
+ group->vin[id] = vin;
+
+ vin->id = id;
+ vin->group = group;
+ vin->v4l2_dev.mdev = &group->mdev;
+
+ mutex_unlock(&group->lock);
+
+ return 0;
+err_group:
+ mutex_unlock(&rvin_group_lock);
+ return ret;
+}
+
+static void rvin_group_put(struct rvin_dev *vin)
+{
+ struct rvin_group *group = vin->group;
+
+ mutex_lock(&group->lock);
+
+ vin->group = NULL;
+ vin->v4l2_dev.mdev = NULL;
+
+ if (WARN_ON(group->vin[vin->id] != vin))
+ goto out;
+
+ group->vin[vin->id] = NULL;
+out:
+ mutex_unlock(&group->lock);
+
+ kref_put(&group->refcount, rvin_group_release);
+}
+
+/* group lock should be held when calling this function. */
+static int rvin_group_entity_to_remote_id(struct rvin_group *group,
+ struct media_entity *entity)
+{
+ struct v4l2_subdev *sd;
+ unsigned int i;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ for (i = 0; i < RVIN_REMOTES_MAX; i++)
+ if (group->remotes[i].subdev == sd)
+ return i;
+
+ return -ENODEV;
+}
+
+static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+ int ret;
+
+ ret = media_device_register(&vin->group->mdev);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
+ if (ret) {
+ vin_err(vin, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ /* Register all video nodes for the group. */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i] &&
+ !video_is_registered(&vin->group->vin[i]->vdev)) {
+ ret = rvin_v4l2_register(vin->group->vin[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return vin->group->link_setup(vin);
+}
+
+static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ for (i = 0; i < RCAR_VIN_NUM; i++)
+ if (vin->group->vin[i])
+ rvin_v4l2_unregister(vin->group->vin[i]);
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->remotes[i].asc != asc)
+ continue;
+ vin->group->remotes[i].subdev = NULL;
+ vin_dbg(vin, "Unbind %s from slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ media_device_unregister(&vin->group->mdev);
+}
+
+static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->remotes[i].asc != asc)
+ continue;
+ vin->group->remotes[i].subdev = subdev;
+ vin_dbg(vin, "Bound %s to slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations rvin_group_notify_ops = {
+ .bound = rvin_group_notify_bound,
+ .unbind = rvin_group_notify_unbind,
+ .complete = rvin_group_notify_complete,
+};
+
+static int rvin_group_parse_of(struct rvin_dev *vin, unsigned int port,
+ unsigned int id)
+{
+ struct fwnode_handle *ep, *fwnode;
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_connection *asc;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), port, id, 0);
+ if (!ep)
+ return 0;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ asc = v4l2_async_nf_add_fwnode(&vin->group->notifier, fwnode,
+ struct v4l2_async_connection);
+ if (IS_ERR(asc)) {
+ ret = PTR_ERR(asc);
+ goto out;
+ }
+
+ vin->group->remotes[vep.base.id].asc = asc;
+
+ vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
+ to_of_node(fwnode), vep.base.id);
+out:
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+static void rvin_group_notifier_cleanup(struct rvin_dev *vin)
+{
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_nf_unregister(&vin->group->notifier);
+ v4l2_async_nf_cleanup(&vin->group->notifier);
+ }
+}
+
+static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
+ unsigned int max_id)
+{
+ unsigned int count = 0, vin_mask = 0;
+ unsigned int i, id;
+ int ret;
+
+ mutex_lock(&vin->group->lock);
+
+ /* If not all VIN's are registered don't register the notifier. */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i]) {
+ count++;
+ vin_mask |= BIT(i);
+ }
+ }
+
+ if (vin->group->count != count) {
+ mutex_unlock(&vin->group->lock);
+ return 0;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ v4l2_async_nf_init(&vin->group->notifier, &vin->v4l2_dev);
+
+ /*
+ * Some subdevices may overlap but the parser function can handle it and
+ * each subdevice will only be registered once with the group notifier.
+ */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (!(vin_mask & BIT(i)))
+ continue;
+
+ for (id = 0; id < max_id; id++) {
+ if (vin->group->remotes[id].asc)
+ continue;
+
+ ret = rvin_group_parse_of(vin->group->vin[i], port, id);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (list_empty(&vin->group->notifier.waiting_list))
+ return 0;
+
+ vin->group->notifier.ops = &rvin_group_notify_ops;
+ ret = v4l2_async_nf_register(&vin->group->notifier);
+ if (ret < 0) {
+ vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_nf_cleanup(&vin->group->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static int rvin_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rvin_dev *vin =
+ container_of(ctrl->handler, struct rvin_dev, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ rvin_set_alpha(vin, ctrl->val);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops rvin_ctrl_ops = {
+ .s_ctrl = rvin_s_ctrl,
+};
+
+static void rvin_free_controls(struct rvin_dev *vin)
+{
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ vin->vdev.ctrl_handler = NULL;
+}
+
+static int rvin_create_controls(struct rvin_dev *vin, struct v4l2_subdev *subdev)
+{
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ if (ret < 0)
+ return ret;
+
+ /* The VIN directly deals with alpha component. */
+ v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (vin->ctrl_handler.error) {
+ ret = vin->ctrl_handler.error;
+ rvin_free_controls(vin);
+ return ret;
+ }
+
+ /* For the non-MC mode add controls from the subdevice. */
+ if (subdev) {
+ ret = v4l2_ctrl_add_handler(&vin->ctrl_handler,
+ subdev->ctrl_handler, NULL, true);
+ if (ret < 0) {
+ rvin_free_controls(vin);
+ return ret;
+ }
+ }
+
+ vin->vdev.ctrl_handler = &vin->ctrl_handler;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Async notifier
+ */
+
+static int rvin_find_pad(struct v4l2_subdev *sd, int direction)
+{
+ unsigned int pad;
+
+ if (sd->entity.num_pads <= 1)
+ return 0;
+
+ for (pad = 0; pad < sd->entity.num_pads; pad++)
+ if (sd->entity.pads[pad].flags & direction)
+ return pad;
+
+ return -EINVAL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Parallel async notifier
+ */
+
+/* The vin lock should be held when calling the subdevice attach and detach */
+static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
+ struct v4l2_subdev *subdev)
+{
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ /* Find source and sink pad of remote subdevice */
+ ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE);
+ if (ret < 0)
+ return ret;
+ vin->parallel.source_pad = ret;
+
+ ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK);
+ vin->parallel.sink_pad = ret < 0 ? 0 : ret;
+
+ if (vin->info->use_mc) {
+ vin->parallel.subdev = subdev;
+ return 0;
+ }
+
+ /* Find compatible subdevices mbus format */
+ vin->mbus_code = 0;
+ code.index = 0;
+ code.pad = vin->parallel.source_pad;
+ while (!vin->mbus_code &&
+ !v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code)) {
+ code.index++;
+ switch (code.code) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ vin->mbus_code = code.code;
+ vin_dbg(vin, "Found media bus format for %s: %d\n",
+ subdev->name, vin->mbus_code);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!vin->mbus_code) {
+ vin_err(vin, "Unsupported media bus format for %s\n",
+ subdev->name);
+ return -EINVAL;
+ }
+
+ /* Read tvnorms */
+ ret = v4l2_subdev_call(subdev, video, g_tvnorms, &vin->vdev.tvnorms);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ /* Read standard */
+ vin->std = V4L2_STD_UNKNOWN;
+ ret = v4l2_subdev_call(subdev, video, g_std, &vin->std);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ /* Add the controls */
+ ret = rvin_create_controls(vin, subdev);
+ if (ret < 0)
+ return ret;
+
+ vin->parallel.subdev = subdev;
+
+ return 0;
+}
+
+static void rvin_parallel_subdevice_detach(struct rvin_dev *vin)
+{
+ rvin_v4l2_unregister(vin);
+ vin->parallel.subdev = NULL;
+
+ if (!vin->info->use_mc)
+ rvin_free_controls(vin);
+}
+
+static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ struct media_entity *source;
+ struct media_entity *sink;
+ int ret;
+
+ ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
+ if (ret < 0) {
+ vin_err(vin, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ if (!video_is_registered(&vin->vdev)) {
+ ret = rvin_v4l2_register(vin);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!vin->info->use_mc)
+ return 0;
+
+ /* If we're running with media-controller, link the subdevs. */
+ source = &vin->parallel.subdev->entity;
+ sink = &vin->vdev.entity;
+
+ ret = media_create_pad_link(source, vin->parallel.source_pad,
+ sink, vin->parallel.sink_pad, 0);
+ if (ret)
+ vin_err(vin, "Error adding link from %s to %s: %d\n",
+ source->name, sink->name, ret);
+
+ return ret;
+}
+
+static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+
+ vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name);
+
+ mutex_lock(&vin->lock);
+ rvin_parallel_subdevice_detach(vin);
+ mutex_unlock(&vin->lock);
+}
+
+static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ int ret;
+
+ mutex_lock(&vin->lock);
+ ret = rvin_parallel_subdevice_attach(vin, subdev);
+ mutex_unlock(&vin->lock);
+ if (ret)
+ return ret;
+
+ v4l2_set_subdev_hostdata(subdev, vin);
+
+ vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n",
+ subdev->name, vin->parallel.source_pad,
+ vin->parallel.sink_pad);
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations rvin_parallel_notify_ops = {
+ .bound = rvin_parallel_notify_bound,
+ .unbind = rvin_parallel_notify_unbind,
+ .complete = rvin_parallel_notify_complete,
+};
+
+static int rvin_parallel_parse_of(struct rvin_dev *vin)
+{
+ struct fwnode_handle *ep, *fwnode;
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_UNKNOWN,
+ };
+ struct v4l2_async_connection *asc;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), 0, 0, 0);
+ if (!ep)
+ return 0;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (vep.bus_type) {
+ case V4L2_MBUS_PARALLEL:
+ case V4L2_MBUS_BT656:
+ vin_dbg(vin, "Found %s media bus\n",
+ vep.bus_type == V4L2_MBUS_PARALLEL ?
+ "PARALLEL" : "BT656");
+ vin->parallel.mbus_type = vep.bus_type;
+ vin->parallel.bus = vep.bus.parallel;
+ break;
+ default:
+ vin_err(vin, "Unknown media bus type\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ asc = v4l2_async_nf_add_fwnode(&vin->notifier, fwnode,
+ struct v4l2_async_connection);
+ if (IS_ERR(asc)) {
+ ret = PTR_ERR(asc);
+ goto out;
+ }
+
+ vin->parallel.asc = asc;
+
+ vin_dbg(vin, "Add parallel OF device %pOF\n", to_of_node(fwnode));
+out:
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+static void rvin_parallel_cleanup(struct rvin_dev *vin)
+{
+ v4l2_async_nf_unregister(&vin->notifier);
+ v4l2_async_nf_cleanup(&vin->notifier);
+}
+
+static int rvin_parallel_init(struct rvin_dev *vin)
+{
+ int ret;
+
+ v4l2_async_nf_init(&vin->notifier, &vin->v4l2_dev);
+
+ ret = rvin_parallel_parse_of(vin);
+ if (ret)
+ return ret;
+
+ if (!vin->parallel.asc)
+ return -ENODEV;
+
+ vin_dbg(vin, "Found parallel subdevice %pOF\n",
+ to_of_node(vin->parallel.asc->match.fwnode));
+
+ vin->notifier.ops = &rvin_parallel_notify_ops;
+ ret = v4l2_async_nf_register(&vin->notifier);
+ if (ret < 0) {
+ vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_nf_cleanup(&vin->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * CSI-2
+ */
+
+/*
+ * Link setup for the links between a VIN and a CSI-2 receiver is a bit
+ * complex. The reason for this is that the register controlling routing
+ * is not present in each VIN instance. There are special VINs which
+ * control routing for themselves and other VINs. There are not many
+ * different possible links combinations that can be enabled at the same
+ * time, therefor all already enabled links which are controlled by a
+ * master VIN need to be taken into account when making the decision
+ * if a new link can be enabled or not.
+ *
+ * 1. Find out which VIN the link the user tries to enable is connected to.
+ * 2. Lookup which master VIN controls the links for this VIN.
+ * 3. Start with a bitmask with all bits set.
+ * 4. For each previously enabled link from the master VIN bitwise AND its
+ * route mask (see documentation for mask in struct rvin_group_route)
+ * with the bitmask.
+ * 5. Bitwise AND the mask for the link the user tries to enable to the bitmask.
+ * 6. If the bitmask is not empty at this point the new link can be enabled
+ * while keeping all previous links enabled. Update the CHSEL value of the
+ * master VIN and inform the user that the link could be enabled.
+ *
+ * Please note that no link can be enabled if any VIN in the group is
+ * currently open.
+ */
+static int rvin_csi2_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct rvin_group *group = container_of(link->graph_obj.mdev,
+ struct rvin_group, mdev);
+ struct media_entity *entity;
+ struct video_device *vdev;
+ struct rvin_dev *vin;
+ unsigned int i;
+ int csi_id, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
+ if (ret)
+ return ret;
+
+ /* Only care about link enablement for VIN nodes. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED) ||
+ !is_media_entity_v4l2_video_device(link->sink->entity))
+ return 0;
+
+ /*
+ * Don't allow link changes if any stream in the graph is active as
+ * modifying the CHSEL register fields can disrupt running streams.
+ */
+ media_device_for_each_entity(entity, &group->mdev)
+ if (media_entity_is_streaming(entity))
+ return -EBUSY;
+
+ /* Find the master VIN that controls the routes. */
+ vdev = media_entity_to_video_device(link->sink->entity);
+ vin = container_of(vdev, struct rvin_dev, vdev);
+
+ mutex_lock(&group->lock);
+
+ csi_id = rvin_group_entity_to_remote_id(group, link->source->entity);
+ if (csi_id == -ENODEV) {
+ struct v4l2_subdev *sd;
+
+ /*
+ * Make sure the source entity subdevice is registered as
+ * a parallel input of one of the enabled VINs if it is not
+ * one of the CSI-2 subdevices.
+ *
+ * No hardware configuration required for parallel inputs,
+ * we can return here.
+ */
+ sd = media_entity_to_v4l2_subdev(link->source->entity);
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (group->vin[i] &&
+ group->vin[i]->parallel.subdev == sd) {
+ group->vin[i]->is_csi = false;
+ ret = 0;
+ goto out;
+ }
+ }
+
+ vin_err(vin, "Subdevice %s not registered to any VIN\n",
+ link->source->entity->name);
+ ret = -ENODEV;
+ } else {
+ const struct rvin_group_route *route;
+ unsigned int chsel = UINT_MAX;
+ unsigned int master_id;
+
+ master_id = rvin_group_id_to_master(vin->id);
+
+ if (WARN_ON(!group->vin[master_id])) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Make sure group is connected to same CSI-2 */
+ for (i = master_id; i < master_id + 4; i++) {
+ struct media_pad *csi_pad;
+
+ if (!group->vin[i])
+ continue;
+
+ /* Get remote CSI-2, if any. */
+ csi_pad = media_pad_remote_pad_first(
+ &group->vin[i]->vdev.entity.pads[0]);
+ if (!csi_pad)
+ continue;
+
+ if (csi_pad->entity != link->source->entity) {
+ vin_dbg(vin, "Already attached to %s\n",
+ csi_pad->entity->name);
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ for (route = vin->info->routes; route->chsel; route++) {
+ if (route->master == master_id && route->csi == csi_id) {
+ chsel = route->chsel;
+ break;
+ }
+ }
+
+ if (chsel == UINT_MAX) {
+ vin_err(vin, "No CHSEL value found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = rvin_set_channel_routing(group->vin[master_id], chsel);
+ if (ret)
+ goto out;
+
+ vin->is_csi = true;
+ }
+out:
+ mutex_unlock(&group->lock);
+
+ return ret;
+}
+
+static const struct media_device_ops rvin_csi2_media_ops = {
+ .link_notify = rvin_csi2_link_notify,
+};
+
+static int rvin_csi2_create_link(struct rvin_group *group, unsigned int id,
+ const struct rvin_group_route *route)
+{
+ struct media_entity *source = &group->remotes[route->csi].subdev->entity;
+ struct media_entity *sink = &group->vin[id]->vdev.entity;
+ struct media_pad *sink_pad = &sink->pads[0];
+ unsigned int channel;
+ int ret;
+
+ for (channel = 0; channel < 4; channel++) {
+ unsigned int source_idx = rvin_group_csi_channel_to_pad(channel);
+ struct media_pad *source_pad = &source->pads[source_idx];
+
+ /* Skip if link already exists. */
+ if (media_entity_find_link(source_pad, sink_pad))
+ continue;
+
+ ret = media_create_pad_link(source, source_idx, sink, 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rvin_csi2_setup_links(struct rvin_dev *vin)
+{
+ const struct rvin_group_route *route;
+ unsigned int id;
+ int ret = -EINVAL;
+
+ /* Create all media device links between VINs and CSI-2's. */
+ mutex_lock(&vin->group->lock);
+ for (route = vin->info->routes; route->chsel; route++) {
+ /* Check that VIN' master is part of the group. */
+ if (!vin->group->vin[route->master])
+ continue;
+
+ /* Check that CSI-2 is part of the group. */
+ if (!vin->group->remotes[route->csi].subdev)
+ continue;
+
+ for (id = route->master; id < route->master + 4; id++) {
+ /* Check that VIN is part of the group. */
+ if (!vin->group->vin[id])
+ continue;
+
+ ret = rvin_csi2_create_link(vin->group, id, route);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&vin->group->lock);
+
+ return ret;
+}
+
+static void rvin_csi2_cleanup(struct rvin_dev *vin)
+{
+ rvin_parallel_cleanup(vin);
+ rvin_group_notifier_cleanup(vin);
+ rvin_group_put(vin);
+ rvin_free_controls(vin);
+}
+
+static int rvin_csi2_init(struct rvin_dev *vin)
+{
+ int ret;
+
+ vin->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
+ if (ret)
+ return ret;
+
+ ret = rvin_create_controls(vin, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = rvin_group_get(vin, rvin_csi2_setup_links, &rvin_csi2_media_ops);
+ if (ret)
+ goto err_controls;
+
+ /* It's OK to not have a parallel subdevice. */
+ ret = rvin_parallel_init(vin);
+ if (ret && ret != -ENODEV)
+ goto err_group;
+
+ ret = rvin_group_notifier_init(vin, 1, RVIN_CSI_MAX);
+ if (ret)
+ goto err_parallel;
+
+ return 0;
+err_parallel:
+ rvin_parallel_cleanup(vin);
+err_group:
+ rvin_group_put(vin);
+err_controls:
+ rvin_free_controls(vin);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * ISP
+ */
+
+static int rvin_isp_setup_links(struct rvin_dev *vin)
+{
+ unsigned int i;
+ int ret = -EINVAL;
+
+ /* Create all media device links between VINs and ISP's. */
+ mutex_lock(&vin->group->lock);
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ struct media_pad *source_pad, *sink_pad;
+ struct media_entity *source, *sink;
+ unsigned int source_slot = i / 8;
+ unsigned int source_idx = i % 8 + 1;
+
+ if (!vin->group->vin[i])
+ continue;
+
+ /* Check that ISP is part of the group. */
+ if (!vin->group->remotes[source_slot].subdev)
+ continue;
+
+ source = &vin->group->remotes[source_slot].subdev->entity;
+ source_pad = &source->pads[source_idx];
+
+ sink = &vin->group->vin[i]->vdev.entity;
+ sink_pad = &sink->pads[0];
+
+ /* Skip if link already exists. */
+ if (media_entity_find_link(source_pad, sink_pad))
+ continue;
+
+ ret = media_create_pad_link(source, source_idx, sink, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ vin_err(vin, "Error adding link from %s to %s\n",
+ source->name, sink->name);
+ break;
+ }
+ }
+ mutex_unlock(&vin->group->lock);
+
+ return ret;
+}
+
+static void rvin_isp_cleanup(struct rvin_dev *vin)
+{
+ rvin_group_notifier_cleanup(vin);
+ rvin_group_put(vin);
+ rvin_free_controls(vin);
+}
+
+static int rvin_isp_init(struct rvin_dev *vin)
+{
+ int ret;
+
+ vin->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
+ if (ret)
+ return ret;
+
+ ret = rvin_create_controls(vin, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = rvin_group_get(vin, rvin_isp_setup_links, NULL);
+ if (ret)
+ goto err_controls;
+
+ ret = rvin_group_notifier_init(vin, 2, RVIN_ISP_MAX);
+ if (ret)
+ goto err_group;
+
+ return 0;
+err_group:
+ rvin_group_put(vin);
+err_controls:
+ rvin_free_controls(vin);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Suspend / Resume
+ */
+
+static int __maybe_unused rvin_suspend(struct device *dev)
+{
+ struct rvin_dev *vin = dev_get_drvdata(dev);
+
+ if (vin->state != RUNNING)
+ return 0;
+
+ rvin_stop_streaming(vin);
+
+ vin->state = SUSPENDED;
+
+ return 0;
+}
+
+static int __maybe_unused rvin_resume(struct device *dev)
+{
+ struct rvin_dev *vin = dev_get_drvdata(dev);
+
+ if (vin->state != SUSPENDED)
+ return 0;
+
+ /*
+ * Restore group master CHSEL setting.
+ *
+ * This needs to be done by every VIN resuming not only the master
+ * as we don't know if and in which order the master VINs will
+ * be resumed.
+ */
+ if (vin->info->use_mc) {
+ unsigned int master_id = rvin_group_id_to_master(vin->id);
+ struct rvin_dev *master = vin->group->vin[master_id];
+ int ret;
+
+ if (WARN_ON(!master))
+ return -ENODEV;
+
+ ret = rvin_set_channel_routing(master, master->chsel);
+ if (ret)
+ return ret;
+ }
+
+ return rvin_start_streaming(vin);
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static const struct rvin_info rcar_info_h1 = {
+ .model = RCAR_H1,
+ .use_mc = false,
+ .max_width = 2048,
+ .max_height = 2048,
+ .scaler = rvin_scaler_gen2,
+};
+
+static const struct rvin_info rcar_info_m1 = {
+ .model = RCAR_M1,
+ .use_mc = false,
+ .max_width = 2048,
+ .max_height = 2048,
+ .scaler = rvin_scaler_gen2,
+};
+
+static const struct rvin_info rcar_info_gen2 = {
+ .model = RCAR_GEN2,
+ .use_mc = false,
+ .max_width = 2048,
+ .max_height = 2048,
+ .scaler = rvin_scaler_gen2,
+};
+
+static const struct rvin_group_route rcar_info_r8a774e1_routes[] = {
+ { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a774e1 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a774e1_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a7795_routes[] = {
+ { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 4, .csi = RVIN_CSI41, .chsel = 0x03 },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a7795 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a7795_routes,
+ .scaler = rvin_scaler_gen3,
+};
+
+static const struct rvin_group_route rcar_info_r8a7796_routes[] = {
+ { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 4, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a7796 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a7796_routes,
+ .scaler = rvin_scaler_gen3,
+};
+
+static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
+ { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 },
+ { .master = 4, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77965 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77965_routes,
+ .scaler = rvin_scaler_gen3,
+};
+
+static const struct rvin_group_route rcar_info_r8a77970_routes[] = {
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77970 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77970_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77980_routes[] = {
+ { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { .master = 4, .csi = RVIN_CSI41, .chsel = 0x03 },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77980 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77980_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77990_routes[] = {
+ { .master = 4, .csi = RVIN_CSI40, .chsel = 0x03 },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77990 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77990_routes,
+ .scaler = rvin_scaler_gen3,
+};
+
+static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77995 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77995_routes,
+ .scaler = rvin_scaler_gen3,
+};
+
+static const struct rvin_info rcar_info_r8a779a0 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .use_isp = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+};
+
+static const struct rvin_info rcar_info_r8a779g0 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .use_isp = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+};
+
+static const struct of_device_id rvin_of_id_table[] = {
+ {
+ .compatible = "renesas,vin-r8a774a1",
+ .data = &rcar_info_r8a7796,
+ },
+ {
+ .compatible = "renesas,vin-r8a774b1",
+ .data = &rcar_info_r8a77965,
+ },
+ {
+ .compatible = "renesas,vin-r8a774c0",
+ .data = &rcar_info_r8a77990,
+ },
+ {
+ .compatible = "renesas,vin-r8a774e1",
+ .data = &rcar_info_r8a774e1,
+ },
+ {
+ .compatible = "renesas,vin-r8a7778",
+ .data = &rcar_info_m1,
+ },
+ {
+ .compatible = "renesas,vin-r8a7779",
+ .data = &rcar_info_h1,
+ },
+ {
+ .compatible = "renesas,rcar-gen2-vin",
+ .data = &rcar_info_gen2,
+ },
+ {
+ .compatible = "renesas,vin-r8a7795",
+ .data = &rcar_info_r8a7795,
+ },
+ {
+ .compatible = "renesas,vin-r8a7796",
+ .data = &rcar_info_r8a7796,
+ },
+ {
+ .compatible = "renesas,vin-r8a77961",
+ .data = &rcar_info_r8a7796,
+ },
+ {
+ .compatible = "renesas,vin-r8a77965",
+ .data = &rcar_info_r8a77965,
+ },
+ {
+ .compatible = "renesas,vin-r8a77970",
+ .data = &rcar_info_r8a77970,
+ },
+ {
+ .compatible = "renesas,vin-r8a77980",
+ .data = &rcar_info_r8a77980,
+ },
+ {
+ .compatible = "renesas,vin-r8a77990",
+ .data = &rcar_info_r8a77990,
+ },
+ {
+ .compatible = "renesas,vin-r8a77995",
+ .data = &rcar_info_r8a77995,
+ },
+ {
+ .compatible = "renesas,vin-r8a779a0",
+ .data = &rcar_info_r8a779a0,
+ },
+ {
+ .compatible = "renesas,vin-r8a779g0",
+ .data = &rcar_info_r8a779g0,
+ },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rvin_of_id_table);
+
+static int rcar_vin_probe(struct platform_device *pdev)
+{
+ struct rvin_dev *vin;
+ int irq, ret;
+
+ vin = devm_kzalloc(&pdev->dev, sizeof(*vin), GFP_KERNEL);
+ if (!vin)
+ return -ENOMEM;
+
+ vin->dev = &pdev->dev;
+ vin->info = of_device_get_match_data(&pdev->dev);
+ vin->alpha = 0xff;
+
+ vin->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vin->base))
+ return PTR_ERR(vin->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = rvin_dma_register(vin, irq);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, vin);
+
+ if (vin->info->use_isp) {
+ ret = rvin_isp_init(vin);
+ } else if (vin->info->use_mc) {
+ ret = rvin_csi2_init(vin);
+
+ if (vin->info->scaler &&
+ rvin_group_id_to_master(vin->id) == vin->id)
+ vin->scaler = vin->info->scaler;
+ } else {
+ ret = rvin_parallel_init(vin);
+
+ if (vin->info->scaler)
+ vin->scaler = vin->info->scaler;
+ }
+
+ if (ret) {
+ rvin_dma_unregister(vin);
+ return ret;
+ }
+
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+}
+
+static void rcar_vin_remove(struct platform_device *pdev)
+{
+ struct rvin_dev *vin = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ rvin_v4l2_unregister(vin);
+
+ if (vin->info->use_isp)
+ rvin_isp_cleanup(vin);
+ else if (vin->info->use_mc)
+ rvin_csi2_cleanup(vin);
+ else
+ rvin_parallel_cleanup(vin);
+
+ rvin_dma_unregister(vin);
+}
+
+static SIMPLE_DEV_PM_OPS(rvin_pm_ops, rvin_suspend, rvin_resume);
+
+static struct platform_driver rcar_vin_driver = {
+ .driver = {
+ .name = "rcar-vin",
+ .suppress_bind_attrs = true,
+ .pm = &rvin_pm_ops,
+ .of_match_table = rvin_of_id_table,
+ },
+ .probe = rcar_vin_probe,
+ .remove_new = rcar_vin_remove,
+};
+
+module_platform_driver(rcar_vin_driver);
+
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
new file mode 100644
index 0000000000..f6326df0b0
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
@@ -0,0 +1,1959 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Renesas R-Car MIPI CSI-2 Receiver
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sys_soc.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+
+struct rcar_csi2;
+
+/* Register offsets and bits */
+
+/* Control Timing Select */
+#define TREF_REG 0x00
+#define TREF_TREF BIT(0)
+
+/* Software Reset */
+#define SRST_REG 0x04
+#define SRST_SRST BIT(0)
+
+/* PHY Operation Control */
+#define PHYCNT_REG 0x08
+#define PHYCNT_SHUTDOWNZ BIT(17)
+#define PHYCNT_RSTZ BIT(16)
+#define PHYCNT_ENABLECLK BIT(4)
+#define PHYCNT_ENABLE_3 BIT(3)
+#define PHYCNT_ENABLE_2 BIT(2)
+#define PHYCNT_ENABLE_1 BIT(1)
+#define PHYCNT_ENABLE_0 BIT(0)
+
+/* Checksum Control */
+#define CHKSUM_REG 0x0c
+#define CHKSUM_ECC_EN BIT(1)
+#define CHKSUM_CRC_EN BIT(0)
+
+/*
+ * Channel Data Type Select
+ * VCDT[0-15]: Channel 0 VCDT[16-31]: Channel 1
+ * VCDT2[0-15]: Channel 2 VCDT2[16-31]: Channel 3
+ */
+#define VCDT_REG 0x10
+#define VCDT2_REG 0x14
+#define VCDT_VCDTN_EN BIT(15)
+#define VCDT_SEL_VC(n) (((n) & 0x3) << 8)
+#define VCDT_SEL_DTN_ON BIT(6)
+#define VCDT_SEL_DT(n) (((n) & 0x3f) << 0)
+
+/* Frame Data Type Select */
+#define FRDT_REG 0x18
+
+/* Field Detection Control */
+#define FLD_REG 0x1c
+#define FLD_FLD_NUM(n) (((n) & 0xff) << 16)
+#define FLD_DET_SEL(n) (((n) & 0x3) << 4)
+#define FLD_FLD_EN4 BIT(3)
+#define FLD_FLD_EN3 BIT(2)
+#define FLD_FLD_EN2 BIT(1)
+#define FLD_FLD_EN BIT(0)
+
+/* Automatic Standby Control */
+#define ASTBY_REG 0x20
+
+/* Long Data Type Setting 0 */
+#define LNGDT0_REG 0x28
+
+/* Long Data Type Setting 1 */
+#define LNGDT1_REG 0x2c
+
+/* Interrupt Enable */
+#define INTEN_REG 0x30
+#define INTEN_INT_AFIFO_OF BIT(27)
+#define INTEN_INT_ERRSOTHS BIT(4)
+#define INTEN_INT_ERRSOTSYNCHS BIT(3)
+
+/* Interrupt Source Mask */
+#define INTCLOSE_REG 0x34
+
+/* Interrupt Status Monitor */
+#define INTSTATE_REG 0x38
+#define INTSTATE_INT_ULPS_START BIT(7)
+#define INTSTATE_INT_ULPS_END BIT(6)
+
+/* Interrupt Error Status Monitor */
+#define INTERRSTATE_REG 0x3c
+
+/* Short Packet Data */
+#define SHPDAT_REG 0x40
+
+/* Short Packet Count */
+#define SHPCNT_REG 0x44
+
+/* LINK Operation Control */
+#define LINKCNT_REG 0x48
+#define LINKCNT_MONITOR_EN BIT(31)
+#define LINKCNT_REG_MONI_PACT_EN BIT(25)
+#define LINKCNT_ICLK_NONSTOP BIT(24)
+
+/* Lane Swap */
+#define LSWAP_REG 0x4c
+#define LSWAP_L3SEL(n) (((n) & 0x3) << 6)
+#define LSWAP_L2SEL(n) (((n) & 0x3) << 4)
+#define LSWAP_L1SEL(n) (((n) & 0x3) << 2)
+#define LSWAP_L0SEL(n) (((n) & 0x3) << 0)
+
+/* PHY Test Interface Write Register */
+#define PHTW_REG 0x50
+#define PHTW_DWEN BIT(24)
+#define PHTW_TESTDIN_DATA(n) (((n & 0xff)) << 16)
+#define PHTW_CWEN BIT(8)
+#define PHTW_TESTDIN_CODE(n) ((n & 0xff))
+
+#define PHYFRX_REG 0x64
+#define PHYFRX_FORCERX_MODE_3 BIT(3)
+#define PHYFRX_FORCERX_MODE_2 BIT(2)
+#define PHYFRX_FORCERX_MODE_1 BIT(1)
+#define PHYFRX_FORCERX_MODE_0 BIT(0)
+
+/* V4H BASE registers */
+#define V4H_N_LANES_REG 0x0004
+#define V4H_CSI2_RESETN_REG 0x0008
+#define V4H_PHY_MODE_REG 0x001c
+#define V4H_PHY_SHUTDOWNZ_REG 0x0040
+#define V4H_DPHY_RSTZ_REG 0x0044
+#define V4H_FLDC_REG 0x0804
+#define V4H_FLDD_REG 0x0808
+#define V4H_IDIC_REG 0x0810
+#define V4H_PHY_EN_REG 0x2000
+
+#define V4H_ST_PHYST_REG 0x2814
+#define V4H_ST_PHYST_ST_PHY_READY BIT(31)
+#define V4H_ST_PHYST_ST_STOPSTATE_3 BIT(3)
+#define V4H_ST_PHYST_ST_STOPSTATE_2 BIT(2)
+#define V4H_ST_PHYST_ST_STOPSTATE_1 BIT(1)
+#define V4H_ST_PHYST_ST_STOPSTATE_0 BIT(0)
+
+/* V4H PPI registers */
+#define V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(n) (0x21800 + ((n) * 2)) /* n = 0 - 9 */
+#define V4H_PPI_STARTUP_RW_COMMON_STARTUP_1_1_REG 0x21822
+#define V4H_PPI_CALIBCTRL_RW_COMMON_BG_0_REG 0x2184c
+#define V4H_PPI_RW_LPDCOCAL_TIMEBASE_REG 0x21c02
+#define V4H_PPI_RW_LPDCOCAL_NREF_REG 0x21c04
+#define V4H_PPI_RW_LPDCOCAL_NREF_RANGE_REG 0x21c06
+#define V4H_PPI_RW_LPDCOCAL_TWAIT_CONFIG_REG 0x21c0a
+#define V4H_PPI_RW_LPDCOCAL_VT_CONFIG_REG 0x21c0c
+#define V4H_PPI_RW_LPDCOCAL_COARSE_CFG_REG 0x21c10
+#define V4H_PPI_RW_COMMON_CFG_REG 0x21c6c
+#define V4H_PPI_RW_TERMCAL_CFG_0_REG 0x21c80
+#define V4H_PPI_RW_OFFSETCAL_CFG_0_REG 0x21ca0
+
+/* V4H CORE registers */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(n) (0x22040 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(n) (0x22440 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(n) (0x22840 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_REG(n) (0x22c40 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_REG(n) (0x23040 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(n) (0x23840 + ((n) * 2)) /* n = 0 - 11 */
+#define V4H_CORE_DIG_RW_COMMON_REG(n) (0x23880 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(n) (0x239e0 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG 0x2a400
+#define V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG 0x2a60c
+
+/* V4H C-PHY */
+#define V4H_CORE_DIG_RW_TRIO0_REG(n) (0x22100 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_RW_TRIO1_REG(n) (0x22500 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_RW_TRIO2_REG(n) (0x22900 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_CLANE_0_RW_LP_0_REG 0x2a080
+#define V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(n) (0x2a100 + ((n) * 2)) /* n = 0 - 6 */
+#define V4H_CORE_DIG_CLANE_1_RW_LP_0_REG 0x2a480
+#define V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(n) (0x2a500 + ((n) * 2)) /* n = 0 - 6 */
+#define V4H_CORE_DIG_CLANE_2_RW_LP_0_REG 0x2a880
+#define V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(n) (0x2a900 + ((n) * 2)) /* n = 0 - 6 */
+
+struct rcsi2_cphy_setting {
+ u16 msps;
+ u16 rx2;
+ u16 trio0;
+ u16 trio1;
+ u16 trio2;
+ u16 lane27;
+ u16 lane29;
+};
+
+static const struct rcsi2_cphy_setting cphy_setting_table_r8a779g0[] = {
+ { .msps = 80, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0134, .trio2 = 0x6a, .lane27 = 0x0000, .lane29 = 0x0a24 },
+ { .msps = 100, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x00f5, .trio2 = 0x55, .lane27 = 0x0000, .lane29 = 0x0a24 },
+ { .msps = 200, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0077, .trio2 = 0x2b, .lane27 = 0x0000, .lane29 = 0x0a44 },
+ { .msps = 300, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x004d, .trio2 = 0x1d, .lane27 = 0x0000, .lane29 = 0x0a44 },
+ { .msps = 400, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0038, .trio2 = 0x16, .lane27 = 0x0000, .lane29 = 0x0a64 },
+ { .msps = 500, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x002b, .trio2 = 0x12, .lane27 = 0x0000, .lane29 = 0x0a64 },
+ { .msps = 600, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0023, .trio2 = 0x0f, .lane27 = 0x0000, .lane29 = 0x0a64 },
+ { .msps = 700, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x001d, .trio2 = 0x0d, .lane27 = 0x0000, .lane29 = 0x0a84 },
+ { .msps = 800, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0018, .trio2 = 0x0c, .lane27 = 0x0000, .lane29 = 0x0a84 },
+ { .msps = 900, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0015, .trio2 = 0x0b, .lane27 = 0x0000, .lane29 = 0x0a84 },
+ { .msps = 1000, .rx2 = 0x3e, .trio0 = 0x024a, .trio1 = 0x0012, .trio2 = 0x0a, .lane27 = 0x0400, .lane29 = 0x0a84 },
+ { .msps = 1100, .rx2 = 0x44, .trio0 = 0x024a, .trio1 = 0x000f, .trio2 = 0x09, .lane27 = 0x0800, .lane29 = 0x0a84 },
+ { .msps = 1200, .rx2 = 0x4a, .trio0 = 0x024a, .trio1 = 0x000e, .trio2 = 0x08, .lane27 = 0x0c00, .lane29 = 0x0a84 },
+ { .msps = 1300, .rx2 = 0x51, .trio0 = 0x024a, .trio1 = 0x000c, .trio2 = 0x08, .lane27 = 0x0c00, .lane29 = 0x0aa4 },
+ { .msps = 1400, .rx2 = 0x57, .trio0 = 0x024a, .trio1 = 0x000b, .trio2 = 0x07, .lane27 = 0x1000, .lane29 = 0x0aa4 },
+ { .msps = 1500, .rx2 = 0x5d, .trio0 = 0x044a, .trio1 = 0x0009, .trio2 = 0x07, .lane27 = 0x1000, .lane29 = 0x0aa4 },
+ { .msps = 1600, .rx2 = 0x63, .trio0 = 0x044a, .trio1 = 0x0008, .trio2 = 0x07, .lane27 = 0x1400, .lane29 = 0x0aa4 },
+ { .msps = 1700, .rx2 = 0x6a, .trio0 = 0x044a, .trio1 = 0x0007, .trio2 = 0x06, .lane27 = 0x1400, .lane29 = 0x0aa4 },
+ { .msps = 1800, .rx2 = 0x70, .trio0 = 0x044a, .trio1 = 0x0007, .trio2 = 0x06, .lane27 = 0x1400, .lane29 = 0x0aa4 },
+ { .msps = 1900, .rx2 = 0x76, .trio0 = 0x044a, .trio1 = 0x0006, .trio2 = 0x06, .lane27 = 0x1400, .lane29 = 0x0aa4 },
+ { .msps = 2000, .rx2 = 0x7c, .trio0 = 0x044a, .trio1 = 0x0005, .trio2 = 0x06, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2100, .rx2 = 0x83, .trio0 = 0x044a, .trio1 = 0x0005, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2200, .rx2 = 0x89, .trio0 = 0x064a, .trio1 = 0x0004, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2300, .rx2 = 0x8f, .trio0 = 0x064a, .trio1 = 0x0003, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2400, .rx2 = 0x95, .trio0 = 0x064a, .trio1 = 0x0003, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2500, .rx2 = 0x9c, .trio0 = 0x064a, .trio1 = 0x0003, .trio2 = 0x05, .lane27 = 0x1c00, .lane29 = 0x0aa4 },
+ { .msps = 2600, .rx2 = 0xa2, .trio0 = 0x064a, .trio1 = 0x0002, .trio2 = 0x05, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 2700, .rx2 = 0xa8, .trio0 = 0x064a, .trio1 = 0x0002, .trio2 = 0x05, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 2800, .rx2 = 0xae, .trio0 = 0x064a, .trio1 = 0x0002, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 2900, .rx2 = 0xb5, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3000, .rx2 = 0xbb, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3100, .rx2 = 0xc1, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3200, .rx2 = 0xc7, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3300, .rx2 = 0xce, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3400, .rx2 = 0xd4, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3500, .rx2 = 0xda, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { /* sentinel */ },
+};
+
+struct phtw_value {
+ u16 data;
+ u16 code;
+};
+
+struct rcsi2_mbps_reg {
+ u16 mbps;
+ u16 reg;
+};
+
+static const struct rcsi2_mbps_reg phtw_mbps_v3u[] = {
+ { .mbps = 1500, .reg = 0xcc },
+ { .mbps = 1550, .reg = 0x1d },
+ { .mbps = 1600, .reg = 0x27 },
+ { .mbps = 1650, .reg = 0x30 },
+ { .mbps = 1700, .reg = 0x39 },
+ { .mbps = 1750, .reg = 0x42 },
+ { .mbps = 1800, .reg = 0x4b },
+ { .mbps = 1850, .reg = 0x55 },
+ { .mbps = 1900, .reg = 0x5e },
+ { .mbps = 1950, .reg = 0x67 },
+ { .mbps = 2000, .reg = 0x71 },
+ { .mbps = 2050, .reg = 0x79 },
+ { .mbps = 2100, .reg = 0x83 },
+ { .mbps = 2150, .reg = 0x8c },
+ { .mbps = 2200, .reg = 0x95 },
+ { .mbps = 2250, .reg = 0x9e },
+ { .mbps = 2300, .reg = 0xa7 },
+ { .mbps = 2350, .reg = 0xb0 },
+ { .mbps = 2400, .reg = 0xba },
+ { .mbps = 2450, .reg = 0xc3 },
+ { .mbps = 2500, .reg = 0xcc },
+ { /* sentinel */ },
+};
+
+static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = {
+ { .mbps = 80, .reg = 0x86 },
+ { .mbps = 90, .reg = 0x86 },
+ { .mbps = 100, .reg = 0x87 },
+ { .mbps = 110, .reg = 0x87 },
+ { .mbps = 120, .reg = 0x88 },
+ { .mbps = 130, .reg = 0x88 },
+ { .mbps = 140, .reg = 0x89 },
+ { .mbps = 150, .reg = 0x89 },
+ { .mbps = 160, .reg = 0x8a },
+ { .mbps = 170, .reg = 0x8a },
+ { .mbps = 180, .reg = 0x8b },
+ { .mbps = 190, .reg = 0x8b },
+ { .mbps = 205, .reg = 0x8c },
+ { .mbps = 220, .reg = 0x8d },
+ { .mbps = 235, .reg = 0x8e },
+ { .mbps = 250, .reg = 0x8e },
+ { /* sentinel */ },
+};
+
+static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x20 },
+ { .mbps = 100, .reg = 0x40 },
+ { .mbps = 110, .reg = 0x02 },
+ { .mbps = 130, .reg = 0x22 },
+ { .mbps = 140, .reg = 0x42 },
+ { .mbps = 150, .reg = 0x04 },
+ { .mbps = 170, .reg = 0x24 },
+ { .mbps = 180, .reg = 0x44 },
+ { .mbps = 200, .reg = 0x06 },
+ { .mbps = 220, .reg = 0x26 },
+ { .mbps = 240, .reg = 0x46 },
+ { .mbps = 250, .reg = 0x08 },
+ { .mbps = 270, .reg = 0x28 },
+ { .mbps = 300, .reg = 0x0a },
+ { .mbps = 330, .reg = 0x2a },
+ { .mbps = 360, .reg = 0x4a },
+ { .mbps = 400, .reg = 0x0c },
+ { .mbps = 450, .reg = 0x2c },
+ { .mbps = 500, .reg = 0x0e },
+ { .mbps = 550, .reg = 0x2e },
+ { .mbps = 600, .reg = 0x10 },
+ { .mbps = 650, .reg = 0x30 },
+ { .mbps = 700, .reg = 0x12 },
+ { .mbps = 750, .reg = 0x32 },
+ { .mbps = 800, .reg = 0x52 },
+ { .mbps = 850, .reg = 0x72 },
+ { .mbps = 900, .reg = 0x14 },
+ { .mbps = 950, .reg = 0x34 },
+ { .mbps = 1000, .reg = 0x54 },
+ { .mbps = 1050, .reg = 0x74 },
+ { .mbps = 1125, .reg = 0x16 },
+ { /* sentinel */ },
+};
+
+/* PHY Test Interface Clear */
+#define PHTC_REG 0x58
+#define PHTC_TESTCLR BIT(0)
+
+/* PHY Frequency Control */
+#define PHYPLL_REG 0x68
+#define PHYPLL_HSFREQRANGE(n) ((n) << 16)
+
+static const struct rcsi2_mbps_reg hsfreqrange_v3u[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x10 },
+ { .mbps = 100, .reg = 0x20 },
+ { .mbps = 110, .reg = 0x30 },
+ { .mbps = 120, .reg = 0x01 },
+ { .mbps = 130, .reg = 0x11 },
+ { .mbps = 140, .reg = 0x21 },
+ { .mbps = 150, .reg = 0x31 },
+ { .mbps = 160, .reg = 0x02 },
+ { .mbps = 170, .reg = 0x12 },
+ { .mbps = 180, .reg = 0x22 },
+ { .mbps = 190, .reg = 0x32 },
+ { .mbps = 205, .reg = 0x03 },
+ { .mbps = 220, .reg = 0x13 },
+ { .mbps = 235, .reg = 0x23 },
+ { .mbps = 250, .reg = 0x33 },
+ { .mbps = 275, .reg = 0x04 },
+ { .mbps = 300, .reg = 0x14 },
+ { .mbps = 325, .reg = 0x25 },
+ { .mbps = 350, .reg = 0x35 },
+ { .mbps = 400, .reg = 0x05 },
+ { .mbps = 450, .reg = 0x16 },
+ { .mbps = 500, .reg = 0x26 },
+ { .mbps = 550, .reg = 0x37 },
+ { .mbps = 600, .reg = 0x07 },
+ { .mbps = 650, .reg = 0x18 },
+ { .mbps = 700, .reg = 0x28 },
+ { .mbps = 750, .reg = 0x39 },
+ { .mbps = 800, .reg = 0x09 },
+ { .mbps = 850, .reg = 0x19 },
+ { .mbps = 900, .reg = 0x29 },
+ { .mbps = 950, .reg = 0x3a },
+ { .mbps = 1000, .reg = 0x0a },
+ { .mbps = 1050, .reg = 0x1a },
+ { .mbps = 1100, .reg = 0x2a },
+ { .mbps = 1150, .reg = 0x3b },
+ { .mbps = 1200, .reg = 0x0b },
+ { .mbps = 1250, .reg = 0x1b },
+ { .mbps = 1300, .reg = 0x2b },
+ { .mbps = 1350, .reg = 0x3c },
+ { .mbps = 1400, .reg = 0x0c },
+ { .mbps = 1450, .reg = 0x1c },
+ { .mbps = 1500, .reg = 0x2c },
+ { .mbps = 1550, .reg = 0x3d },
+ { .mbps = 1600, .reg = 0x0d },
+ { .mbps = 1650, .reg = 0x1d },
+ { .mbps = 1700, .reg = 0x2e },
+ { .mbps = 1750, .reg = 0x3e },
+ { .mbps = 1800, .reg = 0x0e },
+ { .mbps = 1850, .reg = 0x1e },
+ { .mbps = 1900, .reg = 0x2f },
+ { .mbps = 1950, .reg = 0x3f },
+ { .mbps = 2000, .reg = 0x0f },
+ { .mbps = 2050, .reg = 0x40 },
+ { .mbps = 2100, .reg = 0x41 },
+ { .mbps = 2150, .reg = 0x42 },
+ { .mbps = 2200, .reg = 0x43 },
+ { .mbps = 2300, .reg = 0x45 },
+ { .mbps = 2350, .reg = 0x46 },
+ { .mbps = 2400, .reg = 0x47 },
+ { .mbps = 2450, .reg = 0x48 },
+ { .mbps = 2500, .reg = 0x49 },
+ { /* sentinel */ },
+};
+
+static const struct rcsi2_mbps_reg hsfreqrange_h3_v3h_m3n[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x10 },
+ { .mbps = 100, .reg = 0x20 },
+ { .mbps = 110, .reg = 0x30 },
+ { .mbps = 120, .reg = 0x01 },
+ { .mbps = 130, .reg = 0x11 },
+ { .mbps = 140, .reg = 0x21 },
+ { .mbps = 150, .reg = 0x31 },
+ { .mbps = 160, .reg = 0x02 },
+ { .mbps = 170, .reg = 0x12 },
+ { .mbps = 180, .reg = 0x22 },
+ { .mbps = 190, .reg = 0x32 },
+ { .mbps = 205, .reg = 0x03 },
+ { .mbps = 220, .reg = 0x13 },
+ { .mbps = 235, .reg = 0x23 },
+ { .mbps = 250, .reg = 0x33 },
+ { .mbps = 275, .reg = 0x04 },
+ { .mbps = 300, .reg = 0x14 },
+ { .mbps = 325, .reg = 0x25 },
+ { .mbps = 350, .reg = 0x35 },
+ { .mbps = 400, .reg = 0x05 },
+ { .mbps = 450, .reg = 0x16 },
+ { .mbps = 500, .reg = 0x26 },
+ { .mbps = 550, .reg = 0x37 },
+ { .mbps = 600, .reg = 0x07 },
+ { .mbps = 650, .reg = 0x18 },
+ { .mbps = 700, .reg = 0x28 },
+ { .mbps = 750, .reg = 0x39 },
+ { .mbps = 800, .reg = 0x09 },
+ { .mbps = 850, .reg = 0x19 },
+ { .mbps = 900, .reg = 0x29 },
+ { .mbps = 950, .reg = 0x3a },
+ { .mbps = 1000, .reg = 0x0a },
+ { .mbps = 1050, .reg = 0x1a },
+ { .mbps = 1100, .reg = 0x2a },
+ { .mbps = 1150, .reg = 0x3b },
+ { .mbps = 1200, .reg = 0x0b },
+ { .mbps = 1250, .reg = 0x1b },
+ { .mbps = 1300, .reg = 0x2b },
+ { .mbps = 1350, .reg = 0x3c },
+ { .mbps = 1400, .reg = 0x0c },
+ { .mbps = 1450, .reg = 0x1c },
+ { .mbps = 1500, .reg = 0x2c },
+ { /* sentinel */ },
+};
+
+static const struct rcsi2_mbps_reg hsfreqrange_m3w[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x10 },
+ { .mbps = 100, .reg = 0x20 },
+ { .mbps = 110, .reg = 0x30 },
+ { .mbps = 120, .reg = 0x01 },
+ { .mbps = 130, .reg = 0x11 },
+ { .mbps = 140, .reg = 0x21 },
+ { .mbps = 150, .reg = 0x31 },
+ { .mbps = 160, .reg = 0x02 },
+ { .mbps = 170, .reg = 0x12 },
+ { .mbps = 180, .reg = 0x22 },
+ { .mbps = 190, .reg = 0x32 },
+ { .mbps = 205, .reg = 0x03 },
+ { .mbps = 220, .reg = 0x13 },
+ { .mbps = 235, .reg = 0x23 },
+ { .mbps = 250, .reg = 0x33 },
+ { .mbps = 275, .reg = 0x04 },
+ { .mbps = 300, .reg = 0x14 },
+ { .mbps = 325, .reg = 0x05 },
+ { .mbps = 350, .reg = 0x15 },
+ { .mbps = 400, .reg = 0x25 },
+ { .mbps = 450, .reg = 0x06 },
+ { .mbps = 500, .reg = 0x16 },
+ { .mbps = 550, .reg = 0x07 },
+ { .mbps = 600, .reg = 0x17 },
+ { .mbps = 650, .reg = 0x08 },
+ { .mbps = 700, .reg = 0x18 },
+ { .mbps = 750, .reg = 0x09 },
+ { .mbps = 800, .reg = 0x19 },
+ { .mbps = 850, .reg = 0x29 },
+ { .mbps = 900, .reg = 0x39 },
+ { .mbps = 950, .reg = 0x0a },
+ { .mbps = 1000, .reg = 0x1a },
+ { .mbps = 1050, .reg = 0x2a },
+ { .mbps = 1100, .reg = 0x3a },
+ { .mbps = 1150, .reg = 0x0b },
+ { .mbps = 1200, .reg = 0x1b },
+ { .mbps = 1250, .reg = 0x2b },
+ { .mbps = 1300, .reg = 0x3b },
+ { .mbps = 1350, .reg = 0x0c },
+ { .mbps = 1400, .reg = 0x1c },
+ { .mbps = 1450, .reg = 0x2c },
+ { .mbps = 1500, .reg = 0x3c },
+ { /* sentinel */ },
+};
+
+/* PHY ESC Error Monitor */
+#define PHEERM_REG 0x74
+
+/* PHY Clock Lane Monitor */
+#define PHCLM_REG 0x78
+#define PHCLM_STOPSTATECKL BIT(0)
+
+/* PHY Data Lane Monitor */
+#define PHDLM_REG 0x7c
+
+/* CSI0CLK Frequency Configuration Preset Register */
+#define CSI0CLKFCPR_REG 0x260
+#define CSI0CLKFREQRANGE(n) ((n & 0x3f) << 16)
+
+struct rcar_csi2_format {
+ u32 code;
+ unsigned int datatype;
+ unsigned int bpp;
+};
+
+static const struct rcar_csi2_format rcar_csi2_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .datatype = MIPI_CSI2_DT_RGB888,
+ .bpp = 24,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_YUYV10_2X10,
+ .datatype = MIPI_CSI2_DT_YUV422_8B,
+ .bpp = 20,
+ }, {
+ .code = MEDIA_BUS_FMT_Y10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ },
+};
+
+static const struct rcar_csi2_format *rcsi2_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rcar_csi2_formats); i++)
+ if (rcar_csi2_formats[i].code == code)
+ return &rcar_csi2_formats[i];
+
+ return NULL;
+}
+
+enum rcar_csi2_pads {
+ RCAR_CSI2_SINK,
+ RCAR_CSI2_SOURCE_VC0,
+ RCAR_CSI2_SOURCE_VC1,
+ RCAR_CSI2_SOURCE_VC2,
+ RCAR_CSI2_SOURCE_VC3,
+ NR_OF_RCAR_CSI2_PAD,
+};
+
+struct rcar_csi2_info {
+ int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps);
+ int (*phy_post_init)(struct rcar_csi2 *priv);
+ int (*start_receiver)(struct rcar_csi2 *priv);
+ void (*enter_standby)(struct rcar_csi2 *priv);
+ const struct rcsi2_mbps_reg *hsfreqrange;
+ unsigned int csi0clkfreqrange;
+ unsigned int num_channels;
+ bool clear_ulps;
+ bool use_isp;
+ bool support_dphy;
+ bool support_cphy;
+};
+
+struct rcar_csi2 {
+ struct device *dev;
+ void __iomem *base;
+ const struct rcar_csi2_info *info;
+ struct reset_control *rstc;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[NR_OF_RCAR_CSI2_PAD];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *remote;
+ unsigned int remote_pad;
+
+ int channel_vc[4];
+
+ struct mutex lock; /* Protects mf and stream_count. */
+ struct v4l2_mbus_framefmt mf;
+ int stream_count;
+
+ bool cphy;
+ unsigned short lanes;
+ unsigned char lane_swap[4];
+};
+
+static inline struct rcar_csi2 *sd_to_csi2(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rcar_csi2, subdev);
+}
+
+static inline struct rcar_csi2 *notifier_to_csi2(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct rcar_csi2, notifier);
+}
+
+static u32 rcsi2_read(struct rcar_csi2 *priv, unsigned int reg)
+{
+ return ioread32(priv->base + reg);
+}
+
+static void rcsi2_write(struct rcar_csi2 *priv, unsigned int reg, u32 data)
+{
+ iowrite32(data, priv->base + reg);
+}
+
+static void rcsi2_write16(struct rcar_csi2 *priv, unsigned int reg, u16 data)
+{
+ iowrite16(data, priv->base + reg);
+}
+
+static void rcsi2_enter_standby_gen3(struct rcar_csi2 *priv)
+{
+ rcsi2_write(priv, PHYCNT_REG, 0);
+ rcsi2_write(priv, PHTC_REG, PHTC_TESTCLR);
+}
+
+static void rcsi2_enter_standby(struct rcar_csi2 *priv)
+{
+ if (priv->info->enter_standby)
+ priv->info->enter_standby(priv);
+
+ reset_control_assert(priv->rstc);
+ usleep_range(100, 150);
+ pm_runtime_put(priv->dev);
+}
+
+static int rcsi2_exit_standby(struct rcar_csi2 *priv)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret < 0)
+ return ret;
+
+ reset_control_deassert(priv->rstc);
+
+ return 0;
+}
+
+static int rcsi2_wait_phy_start(struct rcar_csi2 *priv,
+ unsigned int lanes)
+{
+ unsigned int timeout;
+
+ /* Wait for the clock and data lanes to enter LP-11 state. */
+ for (timeout = 0; timeout <= 20; timeout++) {
+ const u32 lane_mask = (1 << lanes) - 1;
+
+ if ((rcsi2_read(priv, PHCLM_REG) & PHCLM_STOPSTATECKL) &&
+ (rcsi2_read(priv, PHDLM_REG) & lane_mask) == lane_mask)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_err(priv->dev, "Timeout waiting for LP-11 state\n");
+
+ return -ETIMEDOUT;
+}
+
+static int rcsi2_set_phypll(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ const struct rcsi2_mbps_reg *hsfreq;
+ const struct rcsi2_mbps_reg *hsfreq_prev = NULL;
+
+ if (mbps < priv->info->hsfreqrange->mbps)
+ dev_warn(priv->dev, "%u Mbps less than min PHY speed %u Mbps",
+ mbps, priv->info->hsfreqrange->mbps);
+
+ for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++) {
+ if (hsfreq->mbps >= mbps)
+ break;
+ hsfreq_prev = hsfreq;
+ }
+
+ if (!hsfreq->mbps) {
+ dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps);
+ return -ERANGE;
+ }
+
+ if (hsfreq_prev &&
+ ((mbps - hsfreq_prev->mbps) <= (hsfreq->mbps - mbps)))
+ hsfreq = hsfreq_prev;
+
+ rcsi2_write(priv, PHYPLL_REG, PHYPLL_HSFREQRANGE(hsfreq->reg));
+
+ return 0;
+}
+
+static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
+ unsigned int lanes)
+{
+ struct v4l2_subdev *source;
+ struct v4l2_ctrl *ctrl;
+ u64 mbps;
+
+ if (!priv->remote)
+ return -ENODEV;
+
+ source = priv->remote;
+
+ /* Read the pixel rate control from remote. */
+ ctrl = v4l2_ctrl_find(source->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl) {
+ dev_err(priv->dev, "no pixel rate control in subdev %s\n",
+ source->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Calculate the phypll in mbps.
+ * link_freq = (pixel_rate * bits_per_sample) / (2 * nr_of_lanes)
+ * bps = link_freq * 2
+ */
+ mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * bpp;
+ do_div(mbps, lanes * 1000000);
+
+ /* Adjust for C-PHY, divide by 2.8. */
+ if (priv->cphy)
+ mbps = div_u64(mbps * 5, 14);
+
+ return mbps;
+}
+
+static int rcsi2_get_active_lanes(struct rcar_csi2 *priv,
+ unsigned int *lanes)
+{
+ struct v4l2_mbus_config mbus_config = { 0 };
+ int ret;
+
+ *lanes = priv->lanes;
+
+ ret = v4l2_subdev_call(priv->remote, pad, get_mbus_config,
+ priv->remote_pad, &mbus_config);
+ if (ret == -ENOIOCTLCMD) {
+ dev_dbg(priv->dev, "No remote mbus configuration available\n");
+ return 0;
+ }
+
+ if (ret) {
+ dev_err(priv->dev, "Failed to get remote mbus configuration\n");
+ return ret;
+ }
+
+ switch (mbus_config.type) {
+ case V4L2_MBUS_CSI2_CPHY:
+ if (!priv->cphy)
+ return -EINVAL;
+ break;
+ case V4L2_MBUS_CSI2_DPHY:
+ if (priv->cphy)
+ return -EINVAL;
+ break;
+ default:
+ dev_err(priv->dev, "Unsupported media bus type %u\n",
+ mbus_config.type);
+ return -EINVAL;
+ }
+
+ if (mbus_config.bus.mipi_csi2.num_data_lanes > priv->lanes) {
+ dev_err(priv->dev,
+ "Unsupported mbus config: too many data lanes %u\n",
+ mbus_config.bus.mipi_csi2.num_data_lanes);
+ return -EINVAL;
+ }
+
+ *lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
+
+ return 0;
+}
+
+static int rcsi2_start_receiver_gen3(struct rcar_csi2 *priv)
+{
+ const struct rcar_csi2_format *format;
+ u32 phycnt, vcdt = 0, vcdt2 = 0, fld = 0;
+ unsigned int lanes;
+ unsigned int i;
+ int mbps, ret;
+
+ dev_dbg(priv->dev, "Input size (%ux%u%c)\n",
+ priv->mf.width, priv->mf.height,
+ priv->mf.field == V4L2_FIELD_NONE ? 'p' : 'i');
+
+ /* Code is validated in set_fmt. */
+ format = rcsi2_code_to_fmt(priv->mf.code);
+ if (!format)
+ return -EINVAL;
+
+ /*
+ * Enable all supported CSI-2 channels with virtual channel and
+ * data type matching.
+ *
+ * NOTE: It's not possible to get individual datatype for each
+ * source virtual channel. Once this is possible in V4L2
+ * it should be used here.
+ */
+ for (i = 0; i < priv->info->num_channels; i++) {
+ u32 vcdt_part;
+
+ if (priv->channel_vc[i] < 0)
+ continue;
+
+ vcdt_part = VCDT_SEL_VC(priv->channel_vc[i]) | VCDT_VCDTN_EN |
+ VCDT_SEL_DTN_ON | VCDT_SEL_DT(format->datatype);
+
+ /* Store in correct reg and offset. */
+ if (i < 2)
+ vcdt |= vcdt_part << ((i % 2) * 16);
+ else
+ vcdt2 |= vcdt_part << ((i % 2) * 16);
+ }
+
+ if (priv->mf.field == V4L2_FIELD_ALTERNATE) {
+ fld = FLD_DET_SEL(1) | FLD_FLD_EN4 | FLD_FLD_EN3 | FLD_FLD_EN2
+ | FLD_FLD_EN;
+
+ if (priv->mf.height == 240)
+ fld |= FLD_FLD_NUM(0);
+ else
+ fld |= FLD_FLD_NUM(1);
+ }
+
+ /*
+ * Get the number of active data lanes inspecting the remote mbus
+ * configuration.
+ */
+ ret = rcsi2_get_active_lanes(priv, &lanes);
+ if (ret)
+ return ret;
+
+ phycnt = PHYCNT_ENABLECLK;
+ phycnt |= (1 << lanes) - 1;
+
+ mbps = rcsi2_calc_mbps(priv, format->bpp, lanes);
+ if (mbps < 0)
+ return mbps;
+
+ /* Enable interrupts. */
+ rcsi2_write(priv, INTEN_REG, INTEN_INT_AFIFO_OF | INTEN_INT_ERRSOTHS
+ | INTEN_INT_ERRSOTSYNCHS);
+
+ /* Init */
+ rcsi2_write(priv, TREF_REG, TREF_TREF);
+ rcsi2_write(priv, PHTC_REG, 0);
+
+ /* Configure */
+ if (!priv->info->use_isp) {
+ rcsi2_write(priv, VCDT_REG, vcdt);
+ if (vcdt2)
+ rcsi2_write(priv, VCDT2_REG, vcdt2);
+ }
+
+ /* Lanes are zero indexed. */
+ rcsi2_write(priv, LSWAP_REG,
+ LSWAP_L0SEL(priv->lane_swap[0] - 1) |
+ LSWAP_L1SEL(priv->lane_swap[1] - 1) |
+ LSWAP_L2SEL(priv->lane_swap[2] - 1) |
+ LSWAP_L3SEL(priv->lane_swap[3] - 1));
+
+ /* Start */
+ if (priv->info->init_phtw) {
+ ret = priv->info->init_phtw(priv, mbps);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->info->hsfreqrange) {
+ ret = rcsi2_set_phypll(priv, mbps);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->info->csi0clkfreqrange)
+ rcsi2_write(priv, CSI0CLKFCPR_REG,
+ CSI0CLKFREQRANGE(priv->info->csi0clkfreqrange));
+
+ if (priv->info->use_isp)
+ rcsi2_write(priv, PHYFRX_REG,
+ PHYFRX_FORCERX_MODE_3 | PHYFRX_FORCERX_MODE_2 |
+ PHYFRX_FORCERX_MODE_1 | PHYFRX_FORCERX_MODE_0);
+
+ rcsi2_write(priv, PHYCNT_REG, phycnt);
+ rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN |
+ LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP);
+ rcsi2_write(priv, FLD_REG, fld);
+ rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ);
+ rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ | PHYCNT_RSTZ);
+
+ ret = rcsi2_wait_phy_start(priv, lanes);
+ if (ret)
+ return ret;
+
+ if (priv->info->use_isp)
+ rcsi2_write(priv, PHYFRX_REG, 0);
+
+ /* Run post PHY start initialization, if needed. */
+ if (priv->info->phy_post_init) {
+ ret = priv->info->phy_post_init(priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear Ultra Low Power interrupt. */
+ if (priv->info->clear_ulps)
+ rcsi2_write(priv, INTSTATE_REG,
+ INTSTATE_INT_ULPS_START |
+ INTSTATE_INT_ULPS_END);
+ return 0;
+}
+
+static int rcsi2_wait_phy_start_v4h(struct rcar_csi2 *priv, u32 match)
+{
+ unsigned int timeout;
+ u32 status;
+
+ for (timeout = 0; timeout <= 10; timeout++) {
+ status = rcsi2_read(priv, V4H_ST_PHYST_REG);
+ if ((status & match) == match)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps)
+{
+ const struct rcsi2_cphy_setting *conf;
+
+ for (conf = cphy_setting_table_r8a779g0; conf->msps != 0; conf++) {
+ if (conf->msps > msps)
+ break;
+ }
+
+ if (!conf->msps) {
+ dev_err(priv->dev, "Unsupported PHY speed for msps setting (%u Msps)", msps);
+ return -ERANGE;
+ }
+
+ /* C-PHY specific */
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_COMMON_REG(7), 0x0155);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(7), 0x0068);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(8), 0x0010);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_LP_0_REG, 0x463c);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_LP_0_REG, 0x463c);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_LP_0_REG, 0x463c);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(0), 0x00d5);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(0), 0x00d5);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(0), 0x00d5);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(1), 0x0013);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(1), 0x0013);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(1), 0x0013);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(5), 0x0013);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(5), 0x0013);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(5), 0x0013);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(6), 0x000a);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(6), 0x000a);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(6), 0x000a);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(2), conf->rx2);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(2), conf->rx2);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(2), conf->rx2);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(2), 0);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_REG(2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_REG(2), 0);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(0), conf->trio0);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(0), conf->trio0);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(0), conf->trio0);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(2), conf->trio2);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(2), conf->trio2);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(2), conf->trio2);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(1), conf->trio1);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(1), conf->trio1);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(1), conf->trio1);
+
+ /*
+ * Configure pin-swap.
+ * TODO: This registers is not documented yet, the values should depend
+ * on the 'clock-lanes' and 'data-lanes' devicetree properties.
+ */
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG, 0xf5);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG, 0x5000);
+
+ /* Leave Shutdown mode */
+ rcsi2_write(priv, V4H_DPHY_RSTZ_REG, BIT(0));
+ rcsi2_write(priv, V4H_PHY_SHUTDOWNZ_REG, BIT(0));
+
+ /* Wait for calibration */
+ if (rcsi2_wait_phy_start_v4h(priv, V4H_ST_PHYST_ST_PHY_READY)) {
+ dev_err(priv->dev, "PHY calibration failed\n");
+ return -ETIMEDOUT;
+ }
+
+ /* C-PHY setting - analog programing*/
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(9), conf->lane29);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(7), conf->lane27);
+
+ return 0;
+}
+
+static int rcsi2_start_receiver_v4h(struct rcar_csi2 *priv)
+{
+ const struct rcar_csi2_format *format;
+ unsigned int lanes;
+ int msps;
+ int ret;
+
+ /* Calculate parameters */
+ format = rcsi2_code_to_fmt(priv->mf.code);
+ if (!format)
+ return -EINVAL;
+
+ ret = rcsi2_get_active_lanes(priv, &lanes);
+ if (ret)
+ return ret;
+
+ msps = rcsi2_calc_mbps(priv, format->bpp, lanes);
+ if (msps < 0)
+ return msps;
+
+ /* Reset LINK and PHY*/
+ rcsi2_write(priv, V4H_CSI2_RESETN_REG, 0);
+ rcsi2_write(priv, V4H_DPHY_RSTZ_REG, 0);
+ rcsi2_write(priv, V4H_PHY_SHUTDOWNZ_REG, 0);
+
+ /* PHY static setting */
+ rcsi2_write(priv, V4H_PHY_EN_REG, BIT(0));
+ rcsi2_write(priv, V4H_FLDC_REG, 0);
+ rcsi2_write(priv, V4H_FLDD_REG, 0);
+ rcsi2_write(priv, V4H_IDIC_REG, 0);
+ rcsi2_write(priv, V4H_PHY_MODE_REG, BIT(0));
+ rcsi2_write(priv, V4H_N_LANES_REG, lanes - 1);
+
+ /* Reset CSI2 */
+ rcsi2_write(priv, V4H_CSI2_RESETN_REG, BIT(0));
+
+ /* Registers static setting through APB */
+ /* Common setting */
+ rcsi2_write16(priv, V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(0), 0x1bfd);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_STARTUP_1_1_REG, 0x0233);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(6), 0x0027);
+ rcsi2_write16(priv, V4H_PPI_CALIBCTRL_RW_COMMON_BG_0_REG, 0x01f4);
+ rcsi2_write16(priv, V4H_PPI_RW_TERMCAL_CFG_0_REG, 0x0013);
+ rcsi2_write16(priv, V4H_PPI_RW_OFFSETCAL_CFG_0_REG, 0x0003);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_TIMEBASE_REG, 0x004f);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_NREF_REG, 0x0320);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_NREF_RANGE_REG, 0x000f);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_TWAIT_CONFIG_REG, 0xfe18);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_VT_CONFIG_REG, 0x0c3c);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_COARSE_CFG_REG, 0x0105);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(6), 0x1000);
+ rcsi2_write16(priv, V4H_PPI_RW_COMMON_CFG_REG, 0x0003);
+
+ /* C-PHY settings */
+ ret = rcsi2_c_phy_setting_v4h(priv, msps);
+ if (ret)
+ return ret;
+
+ rcsi2_wait_phy_start_v4h(priv, V4H_ST_PHYST_ST_STOPSTATE_0 |
+ V4H_ST_PHYST_ST_STOPSTATE_1 |
+ V4H_ST_PHYST_ST_STOPSTATE_2);
+
+ return 0;
+}
+
+static int rcsi2_start(struct rcar_csi2 *priv)
+{
+ int ret;
+
+ ret = rcsi2_exit_standby(priv);
+ if (ret < 0)
+ return ret;
+
+ ret = priv->info->start_receiver(priv);
+ if (ret) {
+ rcsi2_enter_standby(priv);
+ return ret;
+ }
+
+ ret = v4l2_subdev_call(priv->remote, video, s_stream, 1);
+ if (ret) {
+ rcsi2_enter_standby(priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rcsi2_stop(struct rcar_csi2 *priv)
+{
+ rcsi2_enter_standby(priv);
+ v4l2_subdev_call(priv->remote, video, s_stream, 0);
+}
+
+static int rcsi2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->remote) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (enable && priv->stream_count == 0) {
+ ret = rcsi2_start(priv);
+ if (ret)
+ goto out;
+ } else if (!enable && priv->stream_count == 1) {
+ rcsi2_stop(priv);
+ }
+
+ priv->stream_count += enable ? 1 : -1;
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ mutex_lock(&priv->lock);
+
+ if (!rcsi2_code_to_fmt(format->format.code))
+ format->format.code = rcar_csi2_formats[0].code;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ priv->mf = format->format;
+ } else {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ *framefmt = format->format;
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+
+ mutex_lock(&priv->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ format->format = priv->mf;
+ else
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops rcar_csi2_video_ops = {
+ .s_stream = rcsi2_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops rcar_csi2_pad_ops = {
+ .set_fmt = rcsi2_set_pad_format,
+ .get_fmt = rcsi2_get_pad_format,
+};
+
+static const struct v4l2_subdev_ops rcar_csi2_subdev_ops = {
+ .video = &rcar_csi2_video_ops,
+ .pad = &rcar_csi2_pad_ops,
+};
+
+static irqreturn_t rcsi2_irq(int irq, void *data)
+{
+ struct rcar_csi2 *priv = data;
+ u32 status, err_status;
+
+ status = rcsi2_read(priv, INTSTATE_REG);
+ err_status = rcsi2_read(priv, INTERRSTATE_REG);
+
+ if (!status)
+ return IRQ_HANDLED;
+
+ rcsi2_write(priv, INTSTATE_REG, status);
+
+ if (!err_status)
+ return IRQ_HANDLED;
+
+ rcsi2_write(priv, INTERRSTATE_REG, err_status);
+
+ dev_info(priv->dev, "Transfer error, restarting CSI-2 receiver\n");
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t rcsi2_irq_thread(int irq, void *data)
+{
+ struct rcar_csi2 *priv = data;
+
+ mutex_lock(&priv->lock);
+ rcsi2_stop(priv);
+ usleep_range(1000, 2000);
+ if (rcsi2_start(priv))
+ dev_warn(priv->dev, "Failed to restart CSI-2 receiver\n");
+ mutex_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * Async handling and registration of subdevices and links.
+ */
+
+static int rcsi2_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct rcar_csi2 *priv = notifier_to_csi2(notifier);
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asc->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(priv->dev, "Failed to find pad for %s\n", subdev->name);
+ return pad;
+ }
+
+ priv->remote = subdev;
+ priv->remote_pad = pad;
+
+ dev_dbg(priv->dev, "Bound %s pad: %d\n", subdev->name, pad);
+
+ return media_create_pad_link(&subdev->entity, pad,
+ &priv->subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void rcsi2_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct rcar_csi2 *priv = notifier_to_csi2(notifier);
+
+ priv->remote = NULL;
+
+ dev_dbg(priv->dev, "Unbind %s\n", subdev->name);
+}
+
+static const struct v4l2_async_notifier_operations rcar_csi2_notify_ops = {
+ .bound = rcsi2_notify_bound,
+ .unbind = rcsi2_notify_unbind,
+};
+
+static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ unsigned int i;
+
+ /* Only port 0 endpoint 0 is valid. */
+ if (vep->base.port || vep->base.id)
+ return -ENOTCONN;
+
+ priv->lanes = vep->bus.mipi_csi2.num_data_lanes;
+
+ switch (vep->bus_type) {
+ case V4L2_MBUS_CSI2_DPHY:
+ if (!priv->info->support_dphy) {
+ dev_err(priv->dev, "D-PHY not supported\n");
+ return -EINVAL;
+ }
+
+ if (priv->lanes != 1 && priv->lanes != 2 && priv->lanes != 4) {
+ dev_err(priv->dev,
+ "Unsupported number of data-lanes for D-PHY: %u\n",
+ priv->lanes);
+ return -EINVAL;
+ }
+
+ priv->cphy = false;
+ break;
+ case V4L2_MBUS_CSI2_CPHY:
+ if (!priv->info->support_cphy) {
+ dev_err(priv->dev, "C-PHY not supported\n");
+ return -EINVAL;
+ }
+
+ if (priv->lanes != 3) {
+ dev_err(priv->dev,
+ "Unsupported number of data-lanes for C-PHY: %u\n",
+ priv->lanes);
+ return -EINVAL;
+ }
+
+ priv->cphy = true;
+ break;
+ default:
+ dev_err(priv->dev, "Unsupported bus: %u\n", vep->bus_type);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->lane_swap); i++) {
+ priv->lane_swap[i] = i < priv->lanes ?
+ vep->bus.mipi_csi2.data_lanes[i] : i;
+
+ /* Check for valid lane number. */
+ if (priv->lane_swap[i] < 1 || priv->lane_swap[i] > 4) {
+ dev_err(priv->dev, "data-lanes must be in 1-4 range\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rcsi2_parse_dt(struct rcar_csi2 *priv)
+{
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *fwnode;
+ struct fwnode_handle *ep;
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_UNKNOWN,
+ };
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(priv->dev), 0, 0, 0);
+ if (!ep) {
+ dev_err(priv->dev, "Not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
+ if (ret) {
+ dev_err(priv->dev, "Could not parse v4l2 endpoint\n");
+ fwnode_handle_put(ep);
+ return -EINVAL;
+ }
+
+ ret = rcsi2_parse_v4l2(priv, &v4l2_ep);
+ if (ret) {
+ fwnode_handle_put(ep);
+ return ret;
+ }
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ fwnode_handle_put(ep);
+
+ dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
+
+ v4l2_async_subdev_nf_init(&priv->notifier, &priv->subdev);
+ priv->notifier.ops = &rcar_csi2_notify_ops;
+
+ asc = v4l2_async_nf_add_fwnode(&priv->notifier, fwnode,
+ struct v4l2_async_connection);
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asc))
+ return PTR_ERR(asc);
+
+ ret = v4l2_async_nf_register(&priv->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&priv->notifier);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * PHTW initialization sequences.
+ *
+ * NOTE: Magic values are from the datasheet and lack documentation.
+ */
+
+static int rcsi2_phtw_write(struct rcar_csi2 *priv, u16 data, u16 code)
+{
+ unsigned int timeout;
+
+ rcsi2_write(priv, PHTW_REG,
+ PHTW_DWEN | PHTW_TESTDIN_DATA(data) |
+ PHTW_CWEN | PHTW_TESTDIN_CODE(code));
+
+ /* Wait for DWEN and CWEN to be cleared by hardware. */
+ for (timeout = 0; timeout <= 20; timeout++) {
+ if (!(rcsi2_read(priv, PHTW_REG) & (PHTW_DWEN | PHTW_CWEN)))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_err(priv->dev, "Timeout waiting for PHTW_DWEN and/or PHTW_CWEN\n");
+
+ return -ETIMEDOUT;
+}
+
+static int rcsi2_phtw_write_array(struct rcar_csi2 *priv,
+ const struct phtw_value *values)
+{
+ const struct phtw_value *value;
+ int ret;
+
+ for (value = values; value->data || value->code; value++) {
+ ret = rcsi2_phtw_write(priv, value->data, value->code);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rcsi2_phtw_write_mbps(struct rcar_csi2 *priv, unsigned int mbps,
+ const struct rcsi2_mbps_reg *values, u16 code)
+{
+ const struct rcsi2_mbps_reg *value;
+ const struct rcsi2_mbps_reg *prev_value = NULL;
+
+ for (value = values; value->mbps; value++) {
+ if (value->mbps >= mbps)
+ break;
+ prev_value = value;
+ }
+
+ if (prev_value &&
+ ((mbps - prev_value->mbps) <= (value->mbps - mbps)))
+ value = prev_value;
+
+ if (!value->mbps) {
+ dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps);
+ return -ERANGE;
+ }
+
+ return rcsi2_phtw_write(priv, value->reg, code);
+}
+
+static int __rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv,
+ unsigned int mbps)
+{
+ static const struct phtw_value step1[] = {
+ { .data = 0xcc, .code = 0xe2 },
+ { .data = 0x01, .code = 0xe3 },
+ { .data = 0x11, .code = 0xe4 },
+ { .data = 0x01, .code = 0xe5 },
+ { .data = 0x10, .code = 0x04 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step2[] = {
+ { .data = 0x38, .code = 0x08 },
+ { .data = 0x01, .code = 0x00 },
+ { .data = 0x4b, .code = 0xac },
+ { .data = 0x03, .code = 0x00 },
+ { .data = 0x80, .code = 0x07 },
+ { /* sentinel */ },
+ };
+
+ int ret;
+
+ ret = rcsi2_phtw_write_array(priv, step1);
+ if (ret)
+ return ret;
+
+ if (mbps != 0 && mbps <= 250) {
+ ret = rcsi2_phtw_write(priv, 0x39, 0x05);
+ if (ret)
+ return ret;
+
+ ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_h3_v3h_m3n,
+ 0xf1);
+ if (ret)
+ return ret;
+ }
+
+ return rcsi2_phtw_write_array(priv, step2);
+}
+
+static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ return __rcsi2_init_phtw_h3_v3h_m3n(priv, mbps);
+}
+
+static int rcsi2_init_phtw_h3es2(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ return __rcsi2_init_phtw_h3_v3h_m3n(priv, 0);
+}
+
+static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
+{
+ return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
+}
+
+static int rcsi2_phy_post_init_v3m_e3(struct rcar_csi2 *priv)
+{
+ static const struct phtw_value step1[] = {
+ { .data = 0xee, .code = 0x34 },
+ { .data = 0xee, .code = 0x44 },
+ { .data = 0xee, .code = 0x54 },
+ { .data = 0xee, .code = 0x84 },
+ { .data = 0xee, .code = 0x94 },
+ { /* sentinel */ },
+ };
+
+ return rcsi2_phtw_write_array(priv, step1);
+}
+
+static int rcsi2_init_phtw_v3u(struct rcar_csi2 *priv,
+ unsigned int mbps)
+{
+ /* In case of 1500Mbps or less */
+ static const struct phtw_value step1[] = {
+ { .data = 0xcc, .code = 0xe2 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step2[] = {
+ { .data = 0x01, .code = 0xe3 },
+ { .data = 0x11, .code = 0xe4 },
+ { .data = 0x01, .code = 0xe5 },
+ { /* sentinel */ },
+ };
+
+ /* In case of 1500Mbps or less */
+ static const struct phtw_value step3[] = {
+ { .data = 0x38, .code = 0x08 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step4[] = {
+ { .data = 0x01, .code = 0x00 },
+ { .data = 0x4b, .code = 0xac },
+ { .data = 0x03, .code = 0x00 },
+ { .data = 0x80, .code = 0x07 },
+ { /* sentinel */ },
+ };
+
+ int ret;
+
+ if (mbps != 0 && mbps <= 1500)
+ ret = rcsi2_phtw_write_array(priv, step1);
+ else
+ ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3u, 0xe2);
+ if (ret)
+ return ret;
+
+ ret = rcsi2_phtw_write_array(priv, step2);
+ if (ret)
+ return ret;
+
+ if (mbps != 0 && mbps <= 1500) {
+ ret = rcsi2_phtw_write_array(priv, step3);
+ if (ret)
+ return ret;
+ }
+
+ ret = rcsi2_phtw_write_array(priv, step4);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver.
+ */
+
+static int rcsi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+ struct video_device *vdev;
+ int channel, vc;
+ u32 id;
+
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ dev_err(priv->dev, "Remote is not a video device\n");
+ return -EINVAL;
+ }
+
+ vdev = media_entity_to_video_device(remote->entity);
+
+ if (of_property_read_u32(vdev->dev_parent->of_node, "renesas,id", &id)) {
+ dev_err(priv->dev, "No renesas,id, can't configure routing\n");
+ return -EINVAL;
+ }
+
+ channel = id % 4;
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (media_pad_remote_pad_first(local)) {
+ dev_dbg(priv->dev,
+ "Each VC can only be routed to one output channel\n");
+ return -EINVAL;
+ }
+
+ vc = local->index - 1;
+
+ dev_dbg(priv->dev, "Route VC%d to VIN%u on output channel %d\n",
+ vc, id, channel);
+ } else {
+ vc = -1;
+ }
+
+ priv->channel_vc[channel] = vc;
+
+ return 0;
+}
+
+static const struct media_entity_operations rcar_csi2_entity_ops = {
+ .link_setup = rcsi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int rcsi2_probe_resources(struct rcar_csi2 *priv,
+ struct platform_device *pdev)
+{
+ int irq, ret;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, rcsi2_irq,
+ rcsi2_irq_thread, IRQF_SHARED,
+ KBUILD_MODNAME, priv);
+ if (ret)
+ return ret;
+
+ priv->rstc = devm_reset_control_get(&pdev->dev, NULL);
+
+ return PTR_ERR_OR_ZERO(priv->rstc);
+}
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a7795 = {
+ .init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
+ .hsfreqrange = hsfreqrange_h3_v3h_m3n,
+ .csi0clkfreqrange = 0x20,
+ .num_channels = 4,
+ .clear_ulps = true,
+ .support_dphy = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a7795es2 = {
+ .init_phtw = rcsi2_init_phtw_h3es2,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
+ .hsfreqrange = hsfreqrange_h3_v3h_m3n,
+ .csi0clkfreqrange = 0x20,
+ .num_channels = 4,
+ .clear_ulps = true,
+ .support_dphy = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a7796 = {
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
+ .hsfreqrange = hsfreqrange_m3w,
+ .num_channels = 4,
+ .support_dphy = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a77961 = {
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
+ .hsfreqrange = hsfreqrange_m3w,
+ .num_channels = 4,
+ .support_dphy = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {
+ .init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
+ .hsfreqrange = hsfreqrange_h3_v3h_m3n,
+ .csi0clkfreqrange = 0x20,
+ .num_channels = 4,
+ .clear_ulps = true,
+ .support_dphy = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = {
+ .init_phtw = rcsi2_init_phtw_v3m_e3,
+ .phy_post_init = rcsi2_phy_post_init_v3m_e3,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
+ .num_channels = 4,
+ .support_dphy = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a77980 = {
+ .init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
+ .hsfreqrange = hsfreqrange_h3_v3h_m3n,
+ .csi0clkfreqrange = 0x20,
+ .clear_ulps = true,
+ .support_dphy = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = {
+ .init_phtw = rcsi2_init_phtw_v3m_e3,
+ .phy_post_init = rcsi2_phy_post_init_v3m_e3,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
+ .num_channels = 2,
+ .support_dphy = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a779a0 = {
+ .init_phtw = rcsi2_init_phtw_v3u,
+ .start_receiver = rcsi2_start_receiver_gen3,
+ .enter_standby = rcsi2_enter_standby_gen3,
+ .hsfreqrange = hsfreqrange_v3u,
+ .csi0clkfreqrange = 0x20,
+ .clear_ulps = true,
+ .use_isp = true,
+ .support_dphy = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a779g0 = {
+ .start_receiver = rcsi2_start_receiver_v4h,
+ .use_isp = true,
+ .support_cphy = true,
+};
+
+static const struct of_device_id rcar_csi2_of_table[] = {
+ {
+ .compatible = "renesas,r8a774a1-csi2",
+ .data = &rcar_csi2_info_r8a7796,
+ },
+ {
+ .compatible = "renesas,r8a774b1-csi2",
+ .data = &rcar_csi2_info_r8a77965,
+ },
+ {
+ .compatible = "renesas,r8a774c0-csi2",
+ .data = &rcar_csi2_info_r8a77990,
+ },
+ {
+ .compatible = "renesas,r8a774e1-csi2",
+ .data = &rcar_csi2_info_r8a7795,
+ },
+ {
+ .compatible = "renesas,r8a7795-csi2",
+ .data = &rcar_csi2_info_r8a7795,
+ },
+ {
+ .compatible = "renesas,r8a7796-csi2",
+ .data = &rcar_csi2_info_r8a7796,
+ },
+ {
+ .compatible = "renesas,r8a77961-csi2",
+ .data = &rcar_csi2_info_r8a77961,
+ },
+ {
+ .compatible = "renesas,r8a77965-csi2",
+ .data = &rcar_csi2_info_r8a77965,
+ },
+ {
+ .compatible = "renesas,r8a77970-csi2",
+ .data = &rcar_csi2_info_r8a77970,
+ },
+ {
+ .compatible = "renesas,r8a77980-csi2",
+ .data = &rcar_csi2_info_r8a77980,
+ },
+ {
+ .compatible = "renesas,r8a77990-csi2",
+ .data = &rcar_csi2_info_r8a77990,
+ },
+ {
+ .compatible = "renesas,r8a779a0-csi2",
+ .data = &rcar_csi2_info_r8a779a0,
+ },
+ {
+ .compatible = "renesas,r8a779g0-csi2",
+ .data = &rcar_csi2_info_r8a779g0,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rcar_csi2_of_table);
+
+static const struct soc_device_attribute r8a7795[] = {
+ {
+ .soc_id = "r8a7795", .revision = "ES2.*",
+ .data = &rcar_csi2_info_r8a7795es2,
+ },
+ { /* sentinel */ }
+};
+
+static int rcsi2_probe(struct platform_device *pdev)
+{
+ const struct soc_device_attribute *attr;
+ struct rcar_csi2 *priv;
+ unsigned int i, num_pads;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->info = of_device_get_match_data(&pdev->dev);
+
+ /*
+ * The different ES versions of r8a7795 (H3) behave differently but
+ * share the same compatible string.
+ */
+ attr = soc_device_match(r8a7795);
+ if (attr)
+ priv->info = attr->data;
+
+ priv->dev = &pdev->dev;
+
+ mutex_init(&priv->lock);
+ priv->stream_count = 0;
+
+ ret = rcsi2_probe_resources(priv, pdev);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get resources\n");
+ goto error_mutex;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = rcsi2_parse_dt(priv);
+ if (ret)
+ goto error_mutex;
+
+ priv->subdev.owner = THIS_MODULE;
+ priv->subdev.dev = &pdev->dev;
+ v4l2_subdev_init(&priv->subdev, &rcar_csi2_subdev_ops);
+ v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
+ snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+ KBUILD_MODNAME, dev_name(&pdev->dev));
+ priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ priv->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ priv->subdev.entity.ops = &rcar_csi2_entity_ops;
+
+ num_pads = priv->info->use_isp ? 2 : NR_OF_RCAR_CSI2_PAD;
+
+ priv->pads[RCAR_CSI2_SINK].flags = MEDIA_PAD_FL_SINK;
+ for (i = RCAR_CSI2_SOURCE_VC0; i < num_pads; i++)
+ priv->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->subdev.entity, num_pads,
+ priv->pads);
+ if (ret)
+ goto error_async;
+
+ for (i = 0; i < ARRAY_SIZE(priv->channel_vc); i++)
+ priv->channel_vc[i] = -1;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = v4l2_async_register_subdev(&priv->subdev);
+ if (ret < 0)
+ goto error_async;
+
+ dev_info(priv->dev, "%d lanes found\n", priv->lanes);
+
+ return 0;
+
+error_async:
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+error_mutex:
+ mutex_destroy(&priv->lock);
+
+ return ret;
+}
+
+static void rcsi2_remove(struct platform_device *pdev)
+{
+ struct rcar_csi2 *priv = platform_get_drvdata(pdev);
+
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+ v4l2_async_unregister_subdev(&priv->subdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ mutex_destroy(&priv->lock);
+}
+
+static struct platform_driver rcar_csi2_pdrv = {
+ .remove_new = rcsi2_remove,
+ .probe = rcsi2_probe,
+ .driver = {
+ .name = "rcar-csi2",
+ .suppress_bind_attrs = true,
+ .of_match_table = rcar_csi2_of_table,
+ },
+};
+
+module_platform_driver(rcar_csi2_pdrv);
+
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_DESCRIPTION("Renesas R-Car MIPI CSI-2 receiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
new file mode 100644
index 0000000000..2a77353f10
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -0,0 +1,1672 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "rcar-vin.h"
+
+/* -----------------------------------------------------------------------------
+ * HW Functions
+ */
+
+/* Register offsets for R-Car VIN */
+#define VNMC_REG 0x00 /* Video n Main Control Register */
+#define VNMS_REG 0x04 /* Video n Module Status Register */
+#define VNFC_REG 0x08 /* Video n Frame Capture Register */
+#define VNSLPRC_REG 0x0C /* Video n Start Line Pre-Clip Register */
+#define VNELPRC_REG 0x10 /* Video n End Line Pre-Clip Register */
+#define VNSPPRC_REG 0x14 /* Video n Start Pixel Pre-Clip Register */
+#define VNEPPRC_REG 0x18 /* Video n End Pixel Pre-Clip Register */
+#define VNIS_REG 0x2C /* Video n Image Stride Register */
+#define VNMB_REG(m) (0x30 + ((m) << 2)) /* Video n Memory Base m Register */
+#define VNIE_REG 0x40 /* Video n Interrupt Enable Register */
+#define VNINTS_REG 0x44 /* Video n Interrupt Status Register */
+#define VNSI_REG 0x48 /* Video n Scanline Interrupt Register */
+#define VNMTC_REG 0x4C /* Video n Memory Transfer Control Register */
+#define VNDMR_REG 0x58 /* Video n Data Mode Register */
+#define VNDMR2_REG 0x5C /* Video n Data Mode Register 2 */
+#define VNUVAOF_REG 0x60 /* Video n UV Address Offset Register */
+
+/* Register offsets specific for Gen2 */
+#define VNSLPOC_REG 0x1C /* Video n Start Line Post-Clip Register */
+#define VNELPOC_REG 0x20 /* Video n End Line Post-Clip Register */
+#define VNSPPOC_REG 0x24 /* Video n Start Pixel Post-Clip Register */
+#define VNEPPOC_REG 0x28 /* Video n End Pixel Post-Clip Register */
+#define VNYS_REG 0x50 /* Video n Y Scale Register */
+#define VNXS_REG 0x54 /* Video n X Scale Register */
+#define VNC1A_REG 0x80 /* Video n Coefficient Set C1A Register */
+#define VNC1B_REG 0x84 /* Video n Coefficient Set C1B Register */
+#define VNC1C_REG 0x88 /* Video n Coefficient Set C1C Register */
+#define VNC2A_REG 0x90 /* Video n Coefficient Set C2A Register */
+#define VNC2B_REG 0x94 /* Video n Coefficient Set C2B Register */
+#define VNC2C_REG 0x98 /* Video n Coefficient Set C2C Register */
+#define VNC3A_REG 0xA0 /* Video n Coefficient Set C3A Register */
+#define VNC3B_REG 0xA4 /* Video n Coefficient Set C3B Register */
+#define VNC3C_REG 0xA8 /* Video n Coefficient Set C3C Register */
+#define VNC4A_REG 0xB0 /* Video n Coefficient Set C4A Register */
+#define VNC4B_REG 0xB4 /* Video n Coefficient Set C4B Register */
+#define VNC4C_REG 0xB8 /* Video n Coefficient Set C4C Register */
+#define VNC5A_REG 0xC0 /* Video n Coefficient Set C5A Register */
+#define VNC5B_REG 0xC4 /* Video n Coefficient Set C5B Register */
+#define VNC5C_REG 0xC8 /* Video n Coefficient Set C5C Register */
+#define VNC6A_REG 0xD0 /* Video n Coefficient Set C6A Register */
+#define VNC6B_REG 0xD4 /* Video n Coefficient Set C6B Register */
+#define VNC6C_REG 0xD8 /* Video n Coefficient Set C6C Register */
+#define VNC7A_REG 0xE0 /* Video n Coefficient Set C7A Register */
+#define VNC7B_REG 0xE4 /* Video n Coefficient Set C7B Register */
+#define VNC7C_REG 0xE8 /* Video n Coefficient Set C7C Register */
+#define VNC8A_REG 0xF0 /* Video n Coefficient Set C8A Register */
+#define VNC8B_REG 0xF4 /* Video n Coefficient Set C8B Register */
+#define VNC8C_REG 0xF8 /* Video n Coefficient Set C8C Register */
+
+/* Register offsets specific for Gen3 */
+#define VNCSI_IFMD_REG 0x20 /* Video n CSI2 Interface Mode Register */
+#define VNUDS_CTRL_REG 0x80 /* Video n scaling control register */
+#define VNUDS_SCALE_REG 0x84 /* Video n scaling factor register */
+#define VNUDS_PASS_BWIDTH_REG 0x90 /* Video n passband register */
+#define VNUDS_CLIP_SIZE_REG 0xa4 /* Video n UDS output size clipping reg */
+
+/* Register bit fields for R-Car VIN */
+/* Video n Main Control Register bits */
+#define VNMC_INF_MASK (7 << 16)
+#define VNMC_DPINE (1 << 27) /* Gen3 specific */
+#define VNMC_SCLE (1 << 26) /* Gen3 specific */
+#define VNMC_FOC (1 << 21)
+#define VNMC_YCAL (1 << 19)
+#define VNMC_INF_YUV8_BT656 (0 << 16)
+#define VNMC_INF_YUV8_BT601 (1 << 16)
+#define VNMC_INF_YUV10_BT656 (2 << 16)
+#define VNMC_INF_YUV10_BT601 (3 << 16)
+#define VNMC_INF_RAW8 (4 << 16)
+#define VNMC_INF_YUV16 (5 << 16)
+#define VNMC_INF_RGB888 (6 << 16)
+#define VNMC_INF_RGB666 (7 << 16)
+#define VNMC_VUP (1 << 10)
+#define VNMC_IM_ODD (0 << 3)
+#define VNMC_IM_ODD_EVEN (1 << 3)
+#define VNMC_IM_EVEN (2 << 3)
+#define VNMC_IM_FULL (3 << 3)
+#define VNMC_BPS (1 << 1)
+#define VNMC_ME (1 << 0)
+
+/* Video n Module Status Register bits */
+#define VNMS_FBS_MASK (3 << 3)
+#define VNMS_FBS_SHIFT 3
+#define VNMS_FS (1 << 2)
+#define VNMS_AV (1 << 1)
+#define VNMS_CA (1 << 0)
+
+/* Video n Frame Capture Register bits */
+#define VNFC_C_FRAME (1 << 1)
+#define VNFC_S_FRAME (1 << 0)
+
+/* Video n Interrupt Enable Register bits */
+#define VNIE_FIE (1 << 4)
+#define VNIE_EFE (1 << 1)
+
+/* Video n Interrupt Status Register bits */
+#define VNINTS_FIS (1 << 4)
+
+/* Video n Data Mode Register bits */
+#define VNDMR_A8BIT(n) (((n) & 0xff) << 24)
+#define VNDMR_A8BIT_MASK (0xff << 24)
+#define VNDMR_YMODE_Y8 (1 << 12)
+#define VNDMR_EXRGB (1 << 8)
+#define VNDMR_BPSM (1 << 4)
+#define VNDMR_ABIT (1 << 2)
+#define VNDMR_DTMD_YCSEP (1 << 1)
+#define VNDMR_DTMD_ARGB (1 << 0)
+#define VNDMR_DTMD_YCSEP_420 (3 << 0)
+
+/* Video n Data Mode Register 2 bits */
+#define VNDMR2_VPS (1 << 30)
+#define VNDMR2_HPS (1 << 29)
+#define VNDMR2_CES (1 << 28)
+#define VNDMR2_YDS (1 << 22)
+#define VNDMR2_FTEV (1 << 17)
+#define VNDMR2_VLV(n) ((n & 0xf) << 12)
+
+/* Video n CSI2 Interface Mode Register (Gen3) */
+#define VNCSI_IFMD_DES1 (1 << 26)
+#define VNCSI_IFMD_DES0 (1 << 25)
+#define VNCSI_IFMD_CSI_CHSEL(n) (((n) & 0xf) << 0)
+
+/* Video n scaling control register (Gen3) */
+#define VNUDS_CTRL_AMD (1 << 30)
+
+struct rvin_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+#define to_buf_list(vb2_buffer) (&container_of(vb2_buffer, \
+ struct rvin_buffer, \
+ vb)->list)
+
+static void rvin_write(struct rvin_dev *vin, u32 value, u32 offset)
+{
+ iowrite32(value, vin->base + offset);
+}
+
+static u32 rvin_read(struct rvin_dev *vin, u32 offset)
+{
+ return ioread32(vin->base + offset);
+}
+
+/* -----------------------------------------------------------------------------
+ * Crop and Scaling
+ */
+
+static bool rvin_scaler_needed(const struct rvin_dev *vin)
+{
+ return !(vin->crop.width == vin->format.width &&
+ vin->compose.width == vin->format.width &&
+ vin->crop.height == vin->format.height &&
+ vin->compose.height == vin->format.height);
+}
+
+struct vin_coeff {
+ unsigned short xs_value;
+ u32 coeff_set[24];
+};
+
+static const struct vin_coeff vin_coeff_set[] = {
+ { 0x0000, {
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000 },
+ },
+ { 0x1000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x000003f8, 0x00000403, 0x3de0d9f0,
+ 0x001fffed, 0x00000804, 0x3cc1f9c3,
+ 0x001003de, 0x00000c01, 0x3cb34d7f,
+ 0x002003d2, 0x00000c00, 0x3d24a92d,
+ 0x00200bca, 0x00000bff, 0x3df600d2,
+ 0x002013cc, 0x000007ff, 0x3ed70c7e,
+ 0x00100fde, 0x00000000, 0x3f87c036 },
+ },
+ { 0x1200, {
+ 0x002ffff1, 0x002ffff1, 0x02a0a9c8,
+ 0x002003e7, 0x001ffffa, 0x000185bc,
+ 0x002007dc, 0x000003ff, 0x3e52859c,
+ 0x00200bd4, 0x00000002, 0x3d53996b,
+ 0x00100fd0, 0x00000403, 0x3d04ad2d,
+ 0x00000bd5, 0x00000403, 0x3d35ace7,
+ 0x3ff003e4, 0x00000801, 0x3dc674a1,
+ 0x3fffe800, 0x00000800, 0x3e76f461 },
+ },
+ { 0x1400, {
+ 0x00100be3, 0x00100be3, 0x04d1359a,
+ 0x00000fdb, 0x002003ed, 0x0211fd93,
+ 0x00000fd6, 0x002003f4, 0x0002d97b,
+ 0x000007d6, 0x002ffffb, 0x3e93b956,
+ 0x3ff003da, 0x001003ff, 0x3db49926,
+ 0x3fffefe9, 0x00100001, 0x3d655cee,
+ 0x3fffd400, 0x00000003, 0x3d65f4b6,
+ 0x000fb421, 0x00000402, 0x3dc6547e },
+ },
+ { 0x1600, {
+ 0x00000bdd, 0x00000bdd, 0x06519578,
+ 0x3ff007da, 0x00000be3, 0x03c24973,
+ 0x3ff003d9, 0x00000be9, 0x01b30d5f,
+ 0x3ffff7df, 0x001003f1, 0x0003c542,
+ 0x000fdfec, 0x001003f7, 0x3ec4711d,
+ 0x000fc400, 0x002ffffd, 0x3df504f1,
+ 0x001fa81a, 0x002ffc00, 0x3d957cc2,
+ 0x002f8c3c, 0x00100000, 0x3db5c891 },
+ },
+ { 0x1800, {
+ 0x3ff003dc, 0x3ff003dc, 0x0791e558,
+ 0x000ff7dd, 0x3ff007de, 0x05328554,
+ 0x000fe7e3, 0x3ff00be2, 0x03232546,
+ 0x000fd7ee, 0x000007e9, 0x0143bd30,
+ 0x001fb800, 0x000007ee, 0x00044511,
+ 0x002fa015, 0x000007f4, 0x3ef4bcee,
+ 0x002f8832, 0x001003f9, 0x3e4514c7,
+ 0x001f7853, 0x001003fd, 0x3de54c9f },
+ },
+ { 0x1a00, {
+ 0x000fefe0, 0x000fefe0, 0x08721d3c,
+ 0x001fdbe7, 0x000ffbde, 0x0652a139,
+ 0x001fcbf0, 0x000003df, 0x0463292e,
+ 0x002fb3ff, 0x3ff007e3, 0x0293a91d,
+ 0x002f9c12, 0x3ff00be7, 0x01241905,
+ 0x001f8c29, 0x000007ed, 0x3fe470eb,
+ 0x000f7c46, 0x000007f2, 0x3f04b8ca,
+ 0x3fef7865, 0x000007f6, 0x3e74e4a8 },
+ },
+ { 0x1c00, {
+ 0x001fd3e9, 0x001fd3e9, 0x08f23d26,
+ 0x002fbff3, 0x001fe3e4, 0x0712ad23,
+ 0x002fa800, 0x000ff3e0, 0x05631d1b,
+ 0x001f9810, 0x000ffbe1, 0x03b3890d,
+ 0x000f8c23, 0x000003e3, 0x0233e8fa,
+ 0x3fef843b, 0x000003e7, 0x00f430e4,
+ 0x3fbf8456, 0x3ff00bea, 0x00046cc8,
+ 0x3f8f8c72, 0x3ff00bef, 0x3f3490ac },
+ },
+ { 0x1e00, {
+ 0x001fbbf4, 0x001fbbf4, 0x09425112,
+ 0x001fa800, 0x002fc7ed, 0x0792b110,
+ 0x000f980e, 0x001fdbe6, 0x0613110a,
+ 0x3fff8c20, 0x001fe7e3, 0x04a368fd,
+ 0x3fcf8c33, 0x000ff7e2, 0x0343b8ed,
+ 0x3f9f8c4a, 0x000fffe3, 0x0203f8da,
+ 0x3f5f9c61, 0x000003e6, 0x00e428c5,
+ 0x3f1fb07b, 0x000003eb, 0x3fe440af },
+ },
+ { 0x2000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x3fff980c, 0x001fb7f5, 0x0812b0ff,
+ 0x3fdf901c, 0x001fc7ed, 0x06b2fcfa,
+ 0x3faf902d, 0x001fd3e8, 0x055348f1,
+ 0x3f7f983f, 0x001fe3e5, 0x04038ce3,
+ 0x3f3fa454, 0x001fefe3, 0x02e3c8d1,
+ 0x3f0fb86a, 0x001ff7e4, 0x01c3e8c0,
+ 0x3ecfd880, 0x000fffe6, 0x00c404ac },
+ },
+ { 0x2200, {
+ 0x3fdf9c0b, 0x3fdf9c0b, 0x09725cf4,
+ 0x3fbf9818, 0x3fffa400, 0x0842a8f1,
+ 0x3f8f9827, 0x000fb3f7, 0x0702f0ec,
+ 0x3f5fa037, 0x000fc3ef, 0x05d330e4,
+ 0x3f2fac49, 0x001fcfea, 0x04a364d9,
+ 0x3effc05c, 0x001fdbe7, 0x038394ca,
+ 0x3ecfdc6f, 0x001fe7e6, 0x0273b0bb,
+ 0x3ea00083, 0x001fefe6, 0x0183c0a9 },
+ },
+ { 0x2400, {
+ 0x3f9fa014, 0x3f9fa014, 0x098260e6,
+ 0x3f7f9c23, 0x3fcf9c0a, 0x08629ce5,
+ 0x3f4fa431, 0x3fefa400, 0x0742d8e1,
+ 0x3f1fb440, 0x3fffb3f8, 0x062310d9,
+ 0x3eefc850, 0x000fbbf2, 0x050340d0,
+ 0x3ecfe062, 0x000fcbec, 0x041364c2,
+ 0x3ea00073, 0x001fd3ea, 0x03037cb5,
+ 0x3e902086, 0x001fdfe8, 0x022388a5 },
+ },
+ { 0x2600, {
+ 0x3f5fa81e, 0x3f5fa81e, 0x096258da,
+ 0x3f3fac2b, 0x3f8fa412, 0x088290d8,
+ 0x3f0fbc38, 0x3fafa408, 0x0772c8d5,
+ 0x3eefcc47, 0x3fcfa800, 0x0672f4ce,
+ 0x3ecfe456, 0x3fefaffa, 0x05531cc6,
+ 0x3eb00066, 0x3fffbbf3, 0x047334bb,
+ 0x3ea01c77, 0x000fc7ee, 0x039348ae,
+ 0x3ea04486, 0x000fd3eb, 0x02b350a1 },
+ },
+ { 0x2800, {
+ 0x3f2fb426, 0x3f2fb426, 0x094250ce,
+ 0x3f0fc032, 0x3f4fac1b, 0x086284cd,
+ 0x3eefd040, 0x3f7fa811, 0x0782acc9,
+ 0x3ecfe84c, 0x3f9fa807, 0x06a2d8c4,
+ 0x3eb0005b, 0x3fbfac00, 0x05b2f4bc,
+ 0x3eb0186a, 0x3fdfb3fa, 0x04c308b4,
+ 0x3eb04077, 0x3fefbbf4, 0x03f31ca8,
+ 0x3ec06884, 0x000fbff2, 0x03031c9e },
+ },
+ { 0x2a00, {
+ 0x3f0fc42d, 0x3f0fc42d, 0x090240c4,
+ 0x3eefd439, 0x3f2fb822, 0x08526cc2,
+ 0x3edfe845, 0x3f4fb018, 0x078294bf,
+ 0x3ec00051, 0x3f6fac0f, 0x06b2b4bb,
+ 0x3ec0185f, 0x3f8fac07, 0x05e2ccb4,
+ 0x3ec0386b, 0x3fafac00, 0x0502e8ac,
+ 0x3ed05c77, 0x3fcfb3fb, 0x0432f0a3,
+ 0x3ef08482, 0x3fdfbbf6, 0x0372f898 },
+ },
+ { 0x2c00, {
+ 0x3eefdc31, 0x3eefdc31, 0x08e238b8,
+ 0x3edfec3d, 0x3f0fc828, 0x082258b9,
+ 0x3ed00049, 0x3f1fc01e, 0x077278b6,
+ 0x3ed01455, 0x3f3fb815, 0x06c294b2,
+ 0x3ed03460, 0x3f5fb40d, 0x0602acac,
+ 0x3ef0506c, 0x3f7fb006, 0x0542c0a4,
+ 0x3f107476, 0x3f9fb400, 0x0472c89d,
+ 0x3f309c80, 0x3fbfb7fc, 0x03b2cc94 },
+ },
+ { 0x2e00, {
+ 0x3eefec37, 0x3eefec37, 0x088220b0,
+ 0x3ee00041, 0x3effdc2d, 0x07f244ae,
+ 0x3ee0144c, 0x3f0fd023, 0x07625cad,
+ 0x3ef02c57, 0x3f1fc81a, 0x06c274a9,
+ 0x3f004861, 0x3f3fbc13, 0x060288a6,
+ 0x3f20686b, 0x3f5fb80c, 0x05529c9e,
+ 0x3f408c74, 0x3f6fb805, 0x04b2ac96,
+ 0x3f80ac7e, 0x3f8fb800, 0x0402ac8e },
+ },
+ { 0x3000, {
+ 0x3ef0003a, 0x3ef0003a, 0x084210a6,
+ 0x3ef01045, 0x3effec32, 0x07b228a7,
+ 0x3f00284e, 0x3f0fdc29, 0x073244a4,
+ 0x3f104058, 0x3f0fd420, 0x06a258a2,
+ 0x3f305c62, 0x3f2fc818, 0x0612689d,
+ 0x3f508069, 0x3f3fc011, 0x05728496,
+ 0x3f80a072, 0x3f4fc00a, 0x04d28c90,
+ 0x3fc0c07b, 0x3f6fbc04, 0x04429088 },
+ },
+ { 0x3200, {
+ 0x3f00103e, 0x3f00103e, 0x07f1fc9e,
+ 0x3f102447, 0x3f000035, 0x0782149d,
+ 0x3f203c4f, 0x3f0ff02c, 0x07122c9c,
+ 0x3f405458, 0x3f0fe424, 0x06924099,
+ 0x3f607061, 0x3f1fd41d, 0x06024c97,
+ 0x3f909068, 0x3f2fcc16, 0x05726490,
+ 0x3fc0b070, 0x3f3fc80f, 0x04f26c8a,
+ 0x0000d077, 0x3f4fc409, 0x04627484 },
+ },
+ { 0x3400, {
+ 0x3f202040, 0x3f202040, 0x07a1e898,
+ 0x3f303449, 0x3f100c38, 0x0741fc98,
+ 0x3f504c50, 0x3f10002f, 0x06e21495,
+ 0x3f706459, 0x3f1ff028, 0x06722492,
+ 0x3fa08060, 0x3f1fe421, 0x05f2348f,
+ 0x3fd09c67, 0x3f1fdc19, 0x05824c89,
+ 0x0000bc6e, 0x3f2fd014, 0x04f25086,
+ 0x0040dc74, 0x3f3fcc0d, 0x04825c7f },
+ },
+ { 0x3600, {
+ 0x3f403042, 0x3f403042, 0x0761d890,
+ 0x3f504848, 0x3f301c3b, 0x0701f090,
+ 0x3f805c50, 0x3f200c33, 0x06a2008f,
+ 0x3fa07458, 0x3f10002b, 0x06520c8d,
+ 0x3fd0905e, 0x3f1ff424, 0x05e22089,
+ 0x0000ac65, 0x3f1fe81d, 0x05823483,
+ 0x0030cc6a, 0x3f2fdc18, 0x04f23c81,
+ 0x0080e871, 0x3f2fd412, 0x0482407c },
+ },
+ { 0x3800, {
+ 0x3f604043, 0x3f604043, 0x0721c88a,
+ 0x3f80544a, 0x3f502c3c, 0x06d1d88a,
+ 0x3fb06851, 0x3f301c35, 0x0681e889,
+ 0x3fd08456, 0x3f30082f, 0x0611fc88,
+ 0x00009c5d, 0x3f200027, 0x05d20884,
+ 0x0030b863, 0x3f2ff421, 0x05621880,
+ 0x0070d468, 0x3f2fe81b, 0x0502247c,
+ 0x00c0ec6f, 0x3f2fe015, 0x04a22877 },
+ },
+ { 0x3a00, {
+ 0x3f904c44, 0x3f904c44, 0x06e1b884,
+ 0x3fb0604a, 0x3f70383e, 0x0691c885,
+ 0x3fe07451, 0x3f502c36, 0x0661d483,
+ 0x00009055, 0x3f401831, 0x0601ec81,
+ 0x0030a85b, 0x3f300c2a, 0x05b1f480,
+ 0x0070c061, 0x3f300024, 0x0562047a,
+ 0x00b0d867, 0x3f3ff41e, 0x05020c77,
+ 0x00f0f46b, 0x3f2fec19, 0x04a21474 },
+ },
+ { 0x3c00, {
+ 0x3fb05c43, 0x3fb05c43, 0x06c1b07e,
+ 0x3fe06c4b, 0x3f902c3f, 0x0681c081,
+ 0x0000844f, 0x3f703838, 0x0631cc7d,
+ 0x00309855, 0x3f602433, 0x05d1d47e,
+ 0x0060b459, 0x3f50142e, 0x0581e47b,
+ 0x00a0c85f, 0x3f400828, 0x0531f078,
+ 0x00e0e064, 0x3f300021, 0x0501fc73,
+ 0x00b0fc6a, 0x3f3ff41d, 0x04a20873 },
+ },
+ { 0x3e00, {
+ 0x3fe06444, 0x3fe06444, 0x0681a07a,
+ 0x00007849, 0x3fc0503f, 0x0641b07a,
+ 0x0020904d, 0x3fa0403a, 0x05f1c07a,
+ 0x0060a453, 0x3f803034, 0x05c1c878,
+ 0x0090b858, 0x3f70202f, 0x0571d477,
+ 0x00d0d05d, 0x3f501829, 0x0531e073,
+ 0x0110e462, 0x3f500825, 0x04e1e471,
+ 0x01510065, 0x3f40001f, 0x04a1f06d },
+ },
+ { 0x4000, {
+ 0x00007044, 0x00007044, 0x06519476,
+ 0x00208448, 0x3fe05c3f, 0x0621a476,
+ 0x0050984d, 0x3fc04c3a, 0x05e1b075,
+ 0x0080ac52, 0x3fa03c35, 0x05a1b875,
+ 0x00c0c056, 0x3f803030, 0x0561c473,
+ 0x0100d45b, 0x3f70202b, 0x0521d46f,
+ 0x0140e860, 0x3f601427, 0x04d1d46e,
+ 0x01810064, 0x3f500822, 0x0491dc6b },
+ },
+ { 0x5000, {
+ 0x0110a442, 0x0110a442, 0x0551545e,
+ 0x0140b045, 0x00e0983f, 0x0531585f,
+ 0x0160c047, 0x00c08c3c, 0x0511645e,
+ 0x0190cc4a, 0x00908039, 0x04f1685f,
+ 0x01c0dc4c, 0x00707436, 0x04d1705e,
+ 0x0200e850, 0x00506833, 0x04b1785b,
+ 0x0230f453, 0x00305c30, 0x0491805a,
+ 0x02710056, 0x0010542d, 0x04718059 },
+ },
+ { 0x6000, {
+ 0x01c0bc40, 0x01c0bc40, 0x04c13052,
+ 0x01e0c841, 0x01a0b43d, 0x04c13851,
+ 0x0210cc44, 0x0180a83c, 0x04a13453,
+ 0x0230d845, 0x0160a03a, 0x04913c52,
+ 0x0260e047, 0x01409838, 0x04714052,
+ 0x0280ec49, 0x01208c37, 0x04514c50,
+ 0x02b0f44b, 0x01008435, 0x04414c50,
+ 0x02d1004c, 0x00e07c33, 0x0431544f },
+ },
+ { 0x7000, {
+ 0x0230c83e, 0x0230c83e, 0x04711c4c,
+ 0x0250d03f, 0x0210c43c, 0x0471204b,
+ 0x0270d840, 0x0200b83c, 0x0451244b,
+ 0x0290dc42, 0x01e0b43a, 0x0441244c,
+ 0x02b0e443, 0x01c0b038, 0x0441284b,
+ 0x02d0ec44, 0x01b0a438, 0x0421304a,
+ 0x02f0f445, 0x0190a036, 0x04213449,
+ 0x0310f847, 0x01709c34, 0x04213848 },
+ },
+ { 0x8000, {
+ 0x0280d03d, 0x0280d03d, 0x04310c48,
+ 0x02a0d43e, 0x0270c83c, 0x04311047,
+ 0x02b0dc3e, 0x0250c83a, 0x04311447,
+ 0x02d0e040, 0x0240c03a, 0x04211446,
+ 0x02e0e840, 0x0220bc39, 0x04111847,
+ 0x0300e842, 0x0210b438, 0x04012445,
+ 0x0310f043, 0x0200b037, 0x04012045,
+ 0x0330f444, 0x01e0ac36, 0x03f12445 },
+ },
+ { 0xefff, {
+ 0x0340dc3a, 0x0340dc3a, 0x03b0ec40,
+ 0x0340e03a, 0x0330e039, 0x03c0f03e,
+ 0x0350e03b, 0x0330dc39, 0x03c0ec3e,
+ 0x0350e43a, 0x0320dc38, 0x03c0f43e,
+ 0x0360e43b, 0x0320d839, 0x03b0f03e,
+ 0x0360e83b, 0x0310d838, 0x03c0fc3b,
+ 0x0370e83b, 0x0310d439, 0x03a0f83d,
+ 0x0370e83c, 0x0300d438, 0x03b0fc3c },
+ }
+};
+
+static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs)
+{
+ int i;
+ const struct vin_coeff *p_prev_set = NULL;
+ const struct vin_coeff *p_set = NULL;
+
+ /* Look for suitable coefficient values */
+ for (i = 0; i < ARRAY_SIZE(vin_coeff_set); i++) {
+ p_prev_set = p_set;
+ p_set = &vin_coeff_set[i];
+
+ if (xs < p_set->xs_value)
+ break;
+ }
+
+ /* Use previous value if its XS value is closer */
+ if (p_prev_set &&
+ xs - p_prev_set->xs_value < p_set->xs_value - xs)
+ p_set = p_prev_set;
+
+ /* Set coefficient registers */
+ rvin_write(vin, p_set->coeff_set[0], VNC1A_REG);
+ rvin_write(vin, p_set->coeff_set[1], VNC1B_REG);
+ rvin_write(vin, p_set->coeff_set[2], VNC1C_REG);
+
+ rvin_write(vin, p_set->coeff_set[3], VNC2A_REG);
+ rvin_write(vin, p_set->coeff_set[4], VNC2B_REG);
+ rvin_write(vin, p_set->coeff_set[5], VNC2C_REG);
+
+ rvin_write(vin, p_set->coeff_set[6], VNC3A_REG);
+ rvin_write(vin, p_set->coeff_set[7], VNC3B_REG);
+ rvin_write(vin, p_set->coeff_set[8], VNC3C_REG);
+
+ rvin_write(vin, p_set->coeff_set[9], VNC4A_REG);
+ rvin_write(vin, p_set->coeff_set[10], VNC4B_REG);
+ rvin_write(vin, p_set->coeff_set[11], VNC4C_REG);
+
+ rvin_write(vin, p_set->coeff_set[12], VNC5A_REG);
+ rvin_write(vin, p_set->coeff_set[13], VNC5B_REG);
+ rvin_write(vin, p_set->coeff_set[14], VNC5C_REG);
+
+ rvin_write(vin, p_set->coeff_set[15], VNC6A_REG);
+ rvin_write(vin, p_set->coeff_set[16], VNC6B_REG);
+ rvin_write(vin, p_set->coeff_set[17], VNC6C_REG);
+
+ rvin_write(vin, p_set->coeff_set[18], VNC7A_REG);
+ rvin_write(vin, p_set->coeff_set[19], VNC7B_REG);
+ rvin_write(vin, p_set->coeff_set[20], VNC7C_REG);
+
+ rvin_write(vin, p_set->coeff_set[21], VNC8A_REG);
+ rvin_write(vin, p_set->coeff_set[22], VNC8B_REG);
+ rvin_write(vin, p_set->coeff_set[23], VNC8C_REG);
+}
+
+void rvin_scaler_gen2(struct rvin_dev *vin)
+{
+ unsigned int crop_height;
+ u32 xs, ys;
+
+ /* Set scaling coefficient */
+ crop_height = vin->crop.height;
+ if (V4L2_FIELD_HAS_BOTH(vin->format.field))
+ crop_height *= 2;
+
+ ys = 0;
+ if (crop_height != vin->compose.height)
+ ys = (4096 * crop_height) / vin->compose.height;
+ rvin_write(vin, ys, VNYS_REG);
+
+ xs = 0;
+ if (vin->crop.width != vin->compose.width)
+ xs = (4096 * vin->crop.width) / vin->compose.width;
+
+ /* Horizontal upscaling is up to double size */
+ if (xs > 0 && xs < 2048)
+ xs = 2048;
+
+ rvin_write(vin, xs, VNXS_REG);
+
+ /* Horizontal upscaling is done out by scaling down from double size */
+ if (xs < 4096)
+ xs *= 2;
+
+ rvin_set_coeff(vin, xs);
+
+ /* Set Start/End Pixel/Line Post-Clip */
+ rvin_write(vin, 0, VNSPPOC_REG);
+ rvin_write(vin, 0, VNSLPOC_REG);
+ rvin_write(vin, vin->format.width - 1, VNEPPOC_REG);
+
+ if (V4L2_FIELD_HAS_BOTH(vin->format.field))
+ rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG);
+ else
+ rvin_write(vin, vin->format.height - 1, VNELPOC_REG);
+
+ vin_dbg(vin,
+ "Pre-Clip: %ux%u@%u:%u YS: %d XS: %d Post-Clip: %ux%u@%u:%u\n",
+ vin->crop.width, vin->crop.height, vin->crop.left,
+ vin->crop.top, ys, xs, vin->format.width, vin->format.height,
+ 0, 0);
+}
+
+static unsigned int rvin_uds_scale_ratio(unsigned int in, unsigned int out)
+{
+ unsigned int ratio;
+
+ ratio = in * 4096 / out;
+ return ratio >= 0x10000 ? 0xffff : ratio;
+}
+
+static unsigned int rvin_uds_filter_width(unsigned int ratio)
+{
+ if (ratio >= 0x1000)
+ return 64 * (ratio & 0xf000) / ratio;
+
+ return 64;
+}
+
+void rvin_scaler_gen3(struct rvin_dev *vin)
+{
+ unsigned int ratio_h, ratio_v;
+ unsigned int bwidth_h, bwidth_v;
+ u32 vnmc, clip_size;
+
+ vnmc = rvin_read(vin, VNMC_REG);
+
+ /* Disable scaler if not needed. */
+ if (!rvin_scaler_needed(vin)) {
+ rvin_write(vin, vnmc & ~VNMC_SCLE, VNMC_REG);
+ return;
+ }
+
+ ratio_h = rvin_uds_scale_ratio(vin->crop.width, vin->compose.width);
+ bwidth_h = rvin_uds_filter_width(ratio_h);
+
+ ratio_v = rvin_uds_scale_ratio(vin->crop.height, vin->compose.height);
+ bwidth_v = rvin_uds_filter_width(ratio_v);
+
+ clip_size = vin->compose.width << 16;
+
+ switch (vin->format.field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ clip_size |= vin->compose.height / 2;
+ break;
+ default:
+ clip_size |= vin->compose.height;
+ break;
+ }
+
+ rvin_write(vin, vnmc | VNMC_SCLE, VNMC_REG);
+ rvin_write(vin, VNUDS_CTRL_AMD, VNUDS_CTRL_REG);
+ rvin_write(vin, (ratio_h << 16) | ratio_v, VNUDS_SCALE_REG);
+ rvin_write(vin, (bwidth_h << 16) | bwidth_v, VNUDS_PASS_BWIDTH_REG);
+ rvin_write(vin, clip_size, VNUDS_CLIP_SIZE_REG);
+
+ vin_dbg(vin, "Pre-Clip: %ux%u@%u:%u Post-Clip: %ux%u@%u:%u\n",
+ vin->crop.width, vin->crop.height, vin->crop.left,
+ vin->crop.top, vin->compose.width, vin->compose.height,
+ vin->compose.left, vin->compose.top);
+}
+
+void rvin_crop_scale_comp(struct rvin_dev *vin)
+{
+ const struct rvin_video_format *fmt;
+ u32 stride;
+
+ /* Set Start/End Pixel/Line Pre-Clip */
+ rvin_write(vin, vin->crop.left, VNSPPRC_REG);
+ rvin_write(vin, vin->crop.left + vin->crop.width - 1, VNEPPRC_REG);
+ rvin_write(vin, vin->crop.top, VNSLPRC_REG);
+ rvin_write(vin, vin->crop.top + vin->crop.height - 1, VNELPRC_REG);
+
+ if (vin->scaler)
+ vin->scaler(vin);
+
+ fmt = rvin_format_from_pixel(vin, vin->format.pixelformat);
+ stride = vin->format.bytesperline / fmt->bpp;
+
+ /* For RAW8 format bpp is 1, but the hardware process RAW8
+ * format in 2 pixel unit hence configure VNIS_REG as stride / 2.
+ */
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_GREY:
+ stride /= 2;
+ break;
+ default:
+ break;
+ }
+
+ rvin_write(vin, stride, VNIS_REG);
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware setup
+ */
+
+static int rvin_setup(struct rvin_dev *vin)
+{
+ u32 vnmc, dmr, dmr2, interrupts;
+ bool progressive = false, output_is_yuv = false, input_is_yuv = false;
+
+ switch (vin->format.field) {
+ case V4L2_FIELD_TOP:
+ vnmc = VNMC_IM_ODD;
+ break;
+ case V4L2_FIELD_BOTTOM:
+ vnmc = VNMC_IM_EVEN;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ /* Default to TB */
+ vnmc = VNMC_IM_FULL;
+ /* Use BT if video standard can be read and is 60 Hz format */
+ if (!vin->info->use_mc && vin->std & V4L2_STD_525_60)
+ vnmc = VNMC_IM_FULL | VNMC_FOC;
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ vnmc = VNMC_IM_FULL;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ vnmc = VNMC_IM_FULL | VNMC_FOC;
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_ALTERNATE:
+ vnmc = VNMC_IM_ODD_EVEN;
+ progressive = true;
+ break;
+ default:
+ vnmc = VNMC_IM_ODD;
+ break;
+ }
+
+ /*
+ * Input interface
+ */
+ switch (vin->mbus_code) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ /* BT.601/BT.1358 16bit YCbCr422 */
+ vnmc |= VNMC_INF_YUV16;
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ vnmc |= VNMC_INF_YUV16 | VNMC_YCAL;
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ /* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
+ if (!vin->is_csi &&
+ vin->parallel.mbus_type == V4L2_MBUS_BT656)
+ vnmc |= VNMC_INF_YUV8_BT656;
+ else
+ vnmc |= VNMC_INF_YUV8_BT601;
+
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ vnmc |= VNMC_INF_RGB888;
+ break;
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ /* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
+ if (!vin->is_csi &&
+ vin->parallel.mbus_type == V4L2_MBUS_BT656)
+ vnmc |= VNMC_INF_YUV10_BT656;
+ else
+ vnmc |= VNMC_INF_YUV10_BT601;
+
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ vnmc |= VNMC_INF_RAW8;
+ break;
+ default:
+ break;
+ }
+
+ /* Make sure input interface and input format is valid. */
+ if (vin->info->model == RCAR_GEN3) {
+ switch (vnmc & VNMC_INF_MASK) {
+ case VNMC_INF_YUV8_BT656:
+ case VNMC_INF_YUV10_BT656:
+ case VNMC_INF_YUV16:
+ case VNMC_INF_RGB666:
+ if (vin->is_csi) {
+ vin_err(vin, "Invalid setting in MIPI CSI2\n");
+ return -EINVAL;
+ }
+ break;
+ case VNMC_INF_RAW8:
+ if (!vin->is_csi) {
+ vin_err(vin, "Invalid setting in Digital Pins\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Enable VSYNC Field Toggle mode after one VSYNC input */
+ if (vin->info->model == RCAR_GEN3)
+ dmr2 = VNDMR2_FTEV;
+ else
+ dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
+
+ if (!vin->is_csi) {
+ /* Hsync Signal Polarity Select */
+ if (!(vin->parallel.bus.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_HPS;
+
+ /* Vsync Signal Polarity Select */
+ if (!(vin->parallel.bus.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_VPS;
+
+ /* Data Enable Polarity Select */
+ if (vin->parallel.bus.flags & V4L2_MBUS_DATA_ENABLE_LOW)
+ dmr2 |= VNDMR2_CES;
+
+ switch (vin->mbus_code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ if (vin->parallel.bus.bus_width == 8 &&
+ vin->parallel.bus.data_shift == 8)
+ dmr2 |= VNDMR2_YDS;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * Output format
+ */
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ rvin_write(vin,
+ ALIGN(vin->format.bytesperline * vin->format.height,
+ 0x80), VNUVAOF_REG);
+ dmr = vin->format.pixelformat == V4L2_PIX_FMT_NV12 ?
+ VNDMR_DTMD_YCSEP_420 : VNDMR_DTMD_YCSEP;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ dmr = VNDMR_BPSM;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ dmr = 0;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_XRGB555:
+ dmr = VNDMR_DTMD_ARGB;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ dmr = 0;
+ break;
+ case V4L2_PIX_FMT_XBGR32:
+ /* Note: not supported on M1 */
+ dmr = VNDMR_EXRGB;
+ break;
+ case V4L2_PIX_FMT_ARGB555:
+ dmr = (vin->alpha ? VNDMR_ABIT : 0) | VNDMR_DTMD_ARGB;
+ break;
+ case V4L2_PIX_FMT_ABGR32:
+ dmr = VNDMR_A8BIT(vin->alpha) | VNDMR_EXRGB | VNDMR_DTMD_ARGB;
+ break;
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ dmr = 0;
+ break;
+ case V4L2_PIX_FMT_GREY:
+ if (input_is_yuv) {
+ dmr = VNDMR_DTMD_YCSEP | VNDMR_YMODE_Y8;
+ output_is_yuv = true;
+ } else {
+ dmr = 0;
+ }
+ break;
+ default:
+ vin_err(vin, "Invalid pixelformat (0x%x)\n",
+ vin->format.pixelformat);
+ return -EINVAL;
+ }
+
+ /* Always update on field change */
+ vnmc |= VNMC_VUP;
+
+ if (!vin->info->use_isp) {
+ /* If input and output use the same colorspace, use bypass mode */
+ if (input_is_yuv == output_is_yuv)
+ vnmc |= VNMC_BPS;
+
+ if (vin->info->model == RCAR_GEN3) {
+ /* Select between CSI-2 and parallel input */
+ if (vin->is_csi)
+ vnmc &= ~VNMC_DPINE;
+ else
+ vnmc |= VNMC_DPINE;
+ }
+ }
+
+ /* Progressive or interlaced mode */
+ interrupts = progressive ? VNIE_FIE : VNIE_EFE;
+
+ /* Ack interrupts */
+ rvin_write(vin, interrupts, VNINTS_REG);
+ /* Enable interrupts */
+ rvin_write(vin, interrupts, VNIE_REG);
+ /* Start capturing */
+ rvin_write(vin, dmr, VNDMR_REG);
+ rvin_write(vin, dmr2, VNDMR2_REG);
+
+ /* Enable module */
+ rvin_write(vin, vnmc | VNMC_ME, VNMC_REG);
+
+ return 0;
+}
+
+static void rvin_disable_interrupts(struct rvin_dev *vin)
+{
+ rvin_write(vin, 0, VNIE_REG);
+}
+
+static u32 rvin_get_interrupt_status(struct rvin_dev *vin)
+{
+ return rvin_read(vin, VNINTS_REG);
+}
+
+static void rvin_ack_interrupt(struct rvin_dev *vin)
+{
+ rvin_write(vin, rvin_read(vin, VNINTS_REG), VNINTS_REG);
+}
+
+static bool rvin_capture_active(struct rvin_dev *vin)
+{
+ return rvin_read(vin, VNMS_REG) & VNMS_CA;
+}
+
+static enum v4l2_field rvin_get_active_field(struct rvin_dev *vin, u32 vnms)
+{
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ /* If FS is set it is an Even field. */
+ if (vnms & VNMS_FS)
+ return V4L2_FIELD_BOTTOM;
+ return V4L2_FIELD_TOP;
+ }
+
+ return vin->format.field;
+}
+
+static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
+{
+ const struct rvin_video_format *fmt;
+ int offsetx, offsety;
+ dma_addr_t offset;
+
+ fmt = rvin_format_from_pixel(vin, vin->format.pixelformat);
+
+ /*
+ * There is no HW support for composition do the beast we can
+ * by modifying the buffer offset
+ */
+ offsetx = vin->compose.left * fmt->bpp;
+ offsety = vin->compose.top * vin->format.bytesperline;
+ offset = addr + offsetx + offsety;
+
+ /*
+ * The address needs to be 128 bytes aligned. Driver should never accept
+ * settings that do not satisfy this in the first place...
+ */
+ if (WARN_ON((offsetx | offsety | offset) & HW_BUFFER_MASK))
+ return;
+
+ rvin_write(vin, offset, VNMB_REG(slot));
+}
+
+/*
+ * Moves a buffer from the queue to the HW slot. If no buffer is
+ * available use the scratch buffer. The scratch buffer is never
+ * returned to userspace, its only function is to enable the capture
+ * loop to keep running.
+ */
+static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
+{
+ struct rvin_buffer *buf;
+ struct vb2_v4l2_buffer *vbuf;
+ dma_addr_t phys_addr;
+ int prev;
+
+ /* A already populated slot shall never be overwritten. */
+ if (WARN_ON(vin->buf_hw[slot].buffer))
+ return;
+
+ prev = (slot == 0 ? HW_BUFFER_NUM : slot) - 1;
+
+ if (vin->buf_hw[prev].type == HALF_TOP) {
+ vbuf = vin->buf_hw[prev].buffer;
+ vin->buf_hw[slot].buffer = vbuf;
+ vin->buf_hw[slot].type = HALF_BOTTOM;
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ phys_addr = vin->buf_hw[prev].phys +
+ vin->format.sizeimage / 4;
+ break;
+ default:
+ phys_addr = vin->buf_hw[prev].phys +
+ vin->format.sizeimage / 2;
+ break;
+ }
+ } else if ((vin->state != STOPPED && vin->state != RUNNING) ||
+ list_empty(&vin->buf_list)) {
+ vin->buf_hw[slot].buffer = NULL;
+ vin->buf_hw[slot].type = FULL;
+ phys_addr = vin->scratch_phys;
+ } else {
+ /* Keep track of buffer we give to HW */
+ buf = list_entry(vin->buf_list.next, struct rvin_buffer, list);
+ vbuf = &buf->vb;
+ list_del_init(to_buf_list(vbuf));
+ vin->buf_hw[slot].buffer = vbuf;
+
+ vin->buf_hw[slot].type =
+ V4L2_FIELD_IS_SEQUENTIAL(vin->format.field) ?
+ HALF_TOP : FULL;
+
+ /* Setup DMA */
+ phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ }
+
+ vin_dbg(vin, "Filling HW slot: %d type: %d buffer: %p\n",
+ slot, vin->buf_hw[slot].type, vin->buf_hw[slot].buffer);
+
+ vin->buf_hw[slot].phys = phys_addr;
+ rvin_set_slot_addr(vin, slot, phys_addr);
+}
+
+static int rvin_capture_start(struct rvin_dev *vin)
+{
+ int slot, ret;
+
+ for (slot = 0; slot < HW_BUFFER_NUM; slot++) {
+ vin->buf_hw[slot].buffer = NULL;
+ vin->buf_hw[slot].type = FULL;
+ }
+
+ for (slot = 0; slot < HW_BUFFER_NUM; slot++)
+ rvin_fill_hw_slot(vin, slot);
+
+ ret = rvin_setup(vin);
+ if (ret)
+ return ret;
+
+ rvin_crop_scale_comp(vin);
+
+ vin_dbg(vin, "Starting to capture\n");
+
+ /* Continuous Frame Capture Mode */
+ rvin_write(vin, VNFC_C_FRAME, VNFC_REG);
+
+ vin->state = STARTING;
+
+ return 0;
+}
+
+static void rvin_capture_stop(struct rvin_dev *vin)
+{
+ /* Set continuous & single transfer off */
+ rvin_write(vin, 0, VNFC_REG);
+
+ /* Disable module */
+ rvin_write(vin, rvin_read(vin, VNMC_REG) & ~VNMC_ME, VNMC_REG);
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA Functions
+ */
+
+#define RVIN_TIMEOUT_MS 100
+#define RVIN_RETRIES 10
+
+static irqreturn_t rvin_irq(int irq, void *data)
+{
+ struct rvin_dev *vin = data;
+ u32 int_status, vnms;
+ int slot;
+ unsigned int handled = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ int_status = rvin_get_interrupt_status(vin);
+ if (!int_status)
+ goto done;
+
+ rvin_ack_interrupt(vin);
+ handled = 1;
+
+ /* Nothing to do if nothing was captured. */
+ if (!(int_status & VNINTS_FIS))
+ goto done;
+
+ /* Nothing to do if capture status is 'STOPPED' */
+ if (vin->state == STOPPED) {
+ vin_dbg(vin, "IRQ while state stopped\n");
+ goto done;
+ }
+
+ /* Prepare for capture and update state */
+ vnms = rvin_read(vin, VNMS_REG);
+ slot = (vnms & VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
+
+ /*
+ * To hand buffers back in a known order to userspace start
+ * to capture first from slot 0.
+ */
+ if (vin->state == STARTING) {
+ if (slot != 0) {
+ vin_dbg(vin, "Starting sync slot: %d\n", slot);
+ goto done;
+ }
+
+ vin_dbg(vin, "Capture start synced!\n");
+ vin->state = RUNNING;
+ }
+
+ /* Capture frame */
+ if (vin->buf_hw[slot].buffer) {
+ /*
+ * Nothing to do but refill the hardware slot if
+ * capture only filled first half of vb2 buffer.
+ */
+ if (vin->buf_hw[slot].type == HALF_TOP) {
+ vin->buf_hw[slot].buffer = NULL;
+ rvin_fill_hw_slot(vin, slot);
+ goto done;
+ }
+
+ vin->buf_hw[slot].buffer->field =
+ rvin_get_active_field(vin, vnms);
+ vin->buf_hw[slot].buffer->sequence = vin->sequence;
+ vin->buf_hw[slot].buffer->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vin->buf_hw[slot].buffer->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ vin->buf_hw[slot].buffer = NULL;
+ } else {
+ /* Scratch buffer was used, dropping frame. */
+ vin_dbg(vin, "Dropping frame %u\n", vin->sequence);
+ }
+
+ vin->sequence++;
+
+ /* Prepare for next frame */
+ rvin_fill_hw_slot(vin, slot);
+done:
+ spin_unlock_irqrestore(&vin->qlock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static void return_unused_buffers(struct rvin_dev *vin,
+ enum vb2_buffer_state state)
+{
+ struct rvin_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ list_for_each_entry_safe(buf, node, &vin->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+}
+
+static int rvin_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vq);
+
+ /* Make sure the image size is large enough. */
+ if (*nplanes)
+ return sizes[0] < vin->format.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = vin->format.sizeimage;
+
+ return 0;
+};
+
+static int rvin_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = vin->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ vin_err(vin, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void rvin_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rvin_dev *vin = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ list_add_tail(to_buf_list(vbuf), &vin->buf_list);
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+}
+
+static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
+ struct media_pad *pad)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmt.pad = pad->index;
+ if (v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt))
+ return -EPIPE;
+
+ switch (fmt.format.code) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_SBGGR8)
+ return -EPIPE;
+ break;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_SGBRG8)
+ return -EPIPE;
+ break;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_SGRBG8)
+ return -EPIPE;
+ break;
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_SRGGB8)
+ return -EPIPE;
+ break;
+ case MEDIA_BUS_FMT_Y8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_GREY)
+ return -EPIPE;
+ break;
+ default:
+ return -EPIPE;
+ }
+ vin->mbus_code = fmt.format.code;
+
+ switch (fmt.format.field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ /* Supported natively */
+ break;
+ case V4L2_FIELD_ALTERNATE:
+ switch (vin->format.field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_ALTERNATE:
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ /* Use VIN hardware to combine the two fields */
+ fmt.format.height *= 2;
+ break;
+ default:
+ return -EPIPE;
+ }
+ break;
+ default:
+ return -EPIPE;
+ }
+
+ if (rvin_scaler_needed(vin)) {
+ /* Gen3 can't scale NV12 */
+ if (vin->info->model == RCAR_GEN3 &&
+ vin->format.pixelformat == V4L2_PIX_FMT_NV12)
+ return -EPIPE;
+
+ if (!vin->scaler)
+ return -EPIPE;
+ } else {
+ if (vin->format.pixelformat == V4L2_PIX_FMT_NV12) {
+ if (ALIGN(fmt.format.width, 32) != vin->format.width ||
+ ALIGN(fmt.format.height, 32) != vin->format.height)
+ return -EPIPE;
+ } else {
+ if (fmt.format.width != vin->format.width ||
+ fmt.format.height != vin->format.height)
+ return -EPIPE;
+ }
+ }
+
+ if (fmt.format.code != vin->mbus_code)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int rvin_set_stream(struct rvin_dev *vin, int on)
+{
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+ int ret;
+
+ /* No media controller used, simply pass operation to subdevice. */
+ if (!vin->info->use_mc) {
+ ret = v4l2_subdev_call(vin->parallel.subdev, video, s_stream,
+ on);
+
+ return ret == -ENOIOCTLCMD ? 0 : ret;
+ }
+
+ pad = media_pad_remote_pad_first(&vin->pad);
+ if (!pad)
+ return -EPIPE;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (!on) {
+ video_device_pipeline_stop(&vin->vdev);
+ return v4l2_subdev_call(sd, video, s_stream, 0);
+ }
+
+ ret = rvin_mc_validate_format(vin, sd, pad);
+ if (ret)
+ return ret;
+
+ ret = video_device_pipeline_alloc_start(&vin->vdev);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ if (ret)
+ video_device_pipeline_stop(&vin->vdev);
+
+ return ret;
+}
+
+int rvin_start_streaming(struct rvin_dev *vin)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = rvin_set_stream(vin, 1);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ vin->sequence = 0;
+
+ ret = rvin_capture_start(vin);
+ if (ret)
+ rvin_set_stream(vin, 0);
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+
+ return ret;
+}
+
+static int rvin_start_streaming_vq(struct vb2_queue *vq, unsigned int count)
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vq);
+ int ret = -ENOMEM;
+
+ /* Allocate scratch buffer. */
+ vin->scratch = dma_alloc_coherent(vin->dev, vin->format.sizeimage,
+ &vin->scratch_phys, GFP_KERNEL);
+ if (!vin->scratch)
+ goto err_scratch;
+
+ ret = rvin_start_streaming(vin);
+ if (ret)
+ goto err_start;
+
+ return 0;
+err_start:
+ dma_free_coherent(vin->dev, vin->format.sizeimage, vin->scratch,
+ vin->scratch_phys);
+err_scratch:
+ return_unused_buffers(vin, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+void rvin_stop_streaming(struct rvin_dev *vin)
+{
+ unsigned int i, retries;
+ unsigned long flags;
+ bool buffersFreed;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ if (vin->state == STOPPED) {
+ spin_unlock_irqrestore(&vin->qlock, flags);
+ return;
+ }
+
+ vin->state = STOPPING;
+
+ /* Wait until only scratch buffer is used, max 3 interrupts. */
+ retries = 0;
+ while (retries++ < RVIN_RETRIES) {
+ buffersFreed = true;
+ for (i = 0; i < HW_BUFFER_NUM; i++)
+ if (vin->buf_hw[i].buffer)
+ buffersFreed = false;
+
+ if (buffersFreed)
+ break;
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+ msleep(RVIN_TIMEOUT_MS);
+ spin_lock_irqsave(&vin->qlock, flags);
+ }
+
+ /* Wait for streaming to stop */
+ retries = 0;
+ while (retries++ < RVIN_RETRIES) {
+
+ rvin_capture_stop(vin);
+
+ /* Check if HW is stopped */
+ if (!rvin_capture_active(vin)) {
+ vin->state = STOPPED;
+ break;
+ }
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+ msleep(RVIN_TIMEOUT_MS);
+ spin_lock_irqsave(&vin->qlock, flags);
+ }
+
+ if (!buffersFreed || vin->state != STOPPED) {
+ /*
+ * If this happens something have gone horribly wrong.
+ * Set state to stopped to prevent the interrupt handler
+ * to make things worse...
+ */
+ vin_err(vin, "Failed stop HW, something is seriously broken\n");
+ vin->state = STOPPED;
+ }
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+
+ /* If something went wrong, free buffers with an error. */
+ if (!buffersFreed) {
+ return_unused_buffers(vin, VB2_BUF_STATE_ERROR);
+ for (i = 0; i < HW_BUFFER_NUM; i++) {
+ if (vin->buf_hw[i].buffer)
+ vb2_buffer_done(&vin->buf_hw[i].buffer->vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ }
+
+ rvin_set_stream(vin, 0);
+
+ /* disable interrupts */
+ rvin_disable_interrupts(vin);
+}
+
+static void rvin_stop_streaming_vq(struct vb2_queue *vq)
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vq);
+
+ rvin_stop_streaming(vin);
+
+ /* Free scratch buffer. */
+ dma_free_coherent(vin->dev, vin->format.sizeimage, vin->scratch,
+ vin->scratch_phys);
+
+ return_unused_buffers(vin, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops rvin_qops = {
+ .queue_setup = rvin_queue_setup,
+ .buf_prepare = rvin_buffer_prepare,
+ .buf_queue = rvin_buffer_queue,
+ .start_streaming = rvin_start_streaming_vq,
+ .stop_streaming = rvin_stop_streaming_vq,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+void rvin_dma_unregister(struct rvin_dev *vin)
+{
+ mutex_destroy(&vin->lock);
+
+ v4l2_device_unregister(&vin->v4l2_dev);
+}
+
+int rvin_dma_register(struct rvin_dev *vin, int irq)
+{
+ struct vb2_queue *q = &vin->queue;
+ int i, ret;
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(vin->dev, &vin->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&vin->lock);
+ INIT_LIST_HEAD(&vin->buf_list);
+
+ spin_lock_init(&vin->qlock);
+
+ vin->state = STOPPED;
+
+ for (i = 0; i < HW_BUFFER_NUM; i++)
+ vin->buf_hw[i].buffer = NULL;
+
+ /* buffer queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ q->lock = &vin->lock;
+ q->drv_priv = vin;
+ q->buf_struct_size = sizeof(struct rvin_buffer);
+ q->ops = &rvin_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 4;
+ q->dev = vin->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ vin_err(vin, "failed to initialize VB2 queue\n");
+ goto error;
+ }
+
+ /* irq */
+ ret = devm_request_irq(vin->dev, irq, rvin_irq, IRQF_SHARED,
+ KBUILD_MODNAME, vin);
+ if (ret) {
+ vin_err(vin, "failed to request irq\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ rvin_dma_unregister(vin);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Gen3 CHSEL manipulation
+ */
+
+/*
+ * There is no need to have locking around changing the routing
+ * as it's only possible to do so when no VIN in the group is
+ * streaming so nothing can race with the VNMC register.
+ */
+int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
+{
+ const struct rvin_group_route *route;
+ u32 ifmd = 0;
+ u32 vnmc;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(vin->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Make register writes take effect immediately. */
+ vnmc = rvin_read(vin, VNMC_REG);
+ rvin_write(vin, vnmc & ~VNMC_VUP, VNMC_REG);
+
+ /*
+ * Set data expansion mode to "pad with 0s" by inspecting the routes
+ * table to find out which bit fields are available in the IFMD
+ * register. IFMD_DES1 controls data expansion mode for CSI20/21,
+ * IFMD_DES0 controls data expansion mode for CSI40/41.
+ */
+ for (route = vin->info->routes; route->chsel; route++) {
+ if (route->csi == RVIN_CSI20 || route->csi == RVIN_CSI21)
+ ifmd |= VNCSI_IFMD_DES1;
+ else
+ ifmd |= VNCSI_IFMD_DES0;
+
+ if (ifmd == (VNCSI_IFMD_DES0 | VNCSI_IFMD_DES1))
+ break;
+ }
+
+ if (ifmd) {
+ ifmd |= VNCSI_IFMD_CSI_CHSEL(chsel);
+ rvin_write(vin, ifmd, VNCSI_IFMD_REG);
+ }
+
+ vin_dbg(vin, "Set IFMD 0x%x\n", ifmd);
+
+ vin->chsel = chsel;
+
+ /* Restore VNMC. */
+ rvin_write(vin, vnmc, VNMC_REG);
+
+ pm_runtime_put(vin->dev);
+
+ return 0;
+}
+
+void rvin_set_alpha(struct rvin_dev *vin, unsigned int alpha)
+{
+ unsigned long flags;
+ u32 dmr;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ vin->alpha = alpha;
+
+ if (vin->state == STOPPED)
+ goto out;
+
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_ARGB555:
+ dmr = rvin_read(vin, VNDMR_REG) & ~VNDMR_ABIT;
+ if (vin->alpha)
+ dmr |= VNDMR_ABIT;
+ break;
+ case V4L2_PIX_FMT_ABGR32:
+ dmr = rvin_read(vin, VNDMR_REG) & ~VNDMR_A8BIT_MASK;
+ dmr |= VNDMR_A8BIT(vin->alpha);
+ break;
+ default:
+ goto out;
+ }
+
+ rvin_write(vin, dmr, VNDMR_REG);
+out:
+ spin_unlock_irqrestore(&vin->qlock, flags);
+}
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
new file mode 100644
index 0000000000..073f70c6ac
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
@@ -0,0 +1,1148 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-rect.h>
+
+#include "rcar-vin.h"
+
+#define RVIN_DEFAULT_FORMAT V4L2_PIX_FMT_YUYV
+#define RVIN_DEFAULT_WIDTH 800
+#define RVIN_DEFAULT_HEIGHT 600
+#define RVIN_DEFAULT_FIELD V4L2_FIELD_NONE
+#define RVIN_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB
+
+/* -----------------------------------------------------------------------------
+ * Format Conversions
+ */
+
+static const struct rvin_video_format rvin_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB555,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .bpp = 4,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ .bpp = 4,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .bpp = 1,
+ },
+};
+
+const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
+ u32 pixelformat)
+{
+ int i;
+
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_XBGR32:
+ if (vin->info->model == RCAR_M1)
+ return NULL;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ /*
+ * If NV12 is supported it's only supported on channels 0, 1, 4,
+ * 5, 8, 9, 12 and 13.
+ */
+ if (!vin->info->nv12 || !(BIT(vin->id) & 0x3333))
+ return NULL;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rvin_formats); i++)
+ if (rvin_formats[i].fourcc == pixelformat)
+ return rvin_formats + i;
+
+ return NULL;
+}
+
+static u32 rvin_format_bytesperline(struct rvin_dev *vin,
+ struct v4l2_pix_format *pix)
+{
+ const struct rvin_video_format *fmt;
+ u32 align;
+
+ fmt = rvin_format_from_pixel(vin, pix->pixelformat);
+
+ if (WARN_ON(!fmt))
+ return -EINVAL;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ align = 0x20;
+ break;
+ default:
+ align = 0x10;
+ break;
+ }
+
+ if (V4L2_FIELD_IS_SEQUENTIAL(pix->field))
+ align = 0x80;
+
+ return ALIGN(pix->width, align) * fmt->bpp;
+}
+
+static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix)
+{
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ return pix->bytesperline * pix->height * 3 / 2;
+ case V4L2_PIX_FMT_NV16:
+ return pix->bytesperline * pix->height * 2;
+ default:
+ return pix->bytesperline * pix->height;
+ }
+}
+
+static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
+{
+ u32 walign;
+
+ if (!rvin_format_from_pixel(vin, pix->pixelformat))
+ pix->pixelformat = RVIN_DEFAULT_FORMAT;
+
+ switch (pix->field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_ALTERNATE:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ break;
+ default:
+ pix->field = RVIN_DEFAULT_FIELD;
+ break;
+ }
+
+ /* Hardware limits width alignment based on format. */
+ switch (pix->pixelformat) {
+ /* Multiple of 32 (2^5) for NV12/16. */
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ walign = 5;
+ break;
+ /* Multiple of 2 (2^1) for YUV. */
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ walign = 1;
+ break;
+ /* No multiple for RGB. */
+ default:
+ walign = 0;
+ break;
+ }
+
+ /* Limit to VIN capabilities */
+ v4l_bound_align_image(&pix->width, 5, vin->info->max_width, walign,
+ &pix->height, 2, vin->info->max_height, 0, 0);
+
+ pix->bytesperline = rvin_format_bytesperline(vin, pix);
+ pix->sizeimage = rvin_format_sizeimage(pix);
+
+ vin_dbg(vin, "Format %ux%u bpl: %u size: %u\n",
+ pix->width, pix->height, pix->bytesperline, pix->sizeimage);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2
+ */
+
+static int rvin_reset_format(struct rvin_dev *vin)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = vin->parallel.source_pad,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ v4l2_fill_pix_format(&vin->format, &fmt.format);
+
+ vin->crop.top = 0;
+ vin->crop.left = 0;
+ vin->crop.width = vin->format.width;
+ vin->crop.height = vin->format.height;
+
+ /* Make use of the hardware interlacer by default. */
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ vin->format.field = V4L2_FIELD_INTERLACED;
+ vin->format.height *= 2;
+ }
+
+ rvin_format_align(vin, &vin->format);
+
+ vin->compose.top = 0;
+ vin->compose.left = 0;
+ vin->compose.width = vin->format.width;
+ vin->compose.height = vin->format.height;
+
+ return 0;
+}
+
+static int rvin_try_format(struct rvin_dev *vin, u32 which,
+ struct v4l2_pix_format *pix,
+ struct v4l2_rect *src_rect)
+{
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ struct v4l2_subdev_state *sd_state;
+ static struct lock_class_key key;
+ struct v4l2_subdev_format format = {
+ .which = which,
+ .pad = vin->parallel.source_pad,
+ };
+ enum v4l2_field field;
+ u32 width, height;
+ int ret;
+
+ /*
+ * FIXME: Drop this call, drivers are not supposed to use
+ * __v4l2_subdev_state_alloc().
+ */
+ sd_state = __v4l2_subdev_state_alloc(sd, "rvin:state->lock", &key);
+ if (IS_ERR(sd_state))
+ return PTR_ERR(sd_state);
+
+ if (!rvin_format_from_pixel(vin, pix->pixelformat))
+ pix->pixelformat = RVIN_DEFAULT_FORMAT;
+
+ v4l2_fill_mbus_format(&format.format, pix, vin->mbus_code);
+
+ /* Allow the video device to override field and to scale */
+ field = pix->field;
+ width = pix->width;
+ height = pix->height;
+
+ ret = v4l2_subdev_call(sd, pad, set_fmt, sd_state, &format);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto done;
+ ret = 0;
+
+ v4l2_fill_pix_format(pix, &format.format);
+
+ if (src_rect) {
+ src_rect->top = 0;
+ src_rect->left = 0;
+ src_rect->width = pix->width;
+ src_rect->height = pix->height;
+ }
+
+ if (field != V4L2_FIELD_ANY)
+ pix->field = field;
+
+ pix->width = width;
+ pix->height = height;
+
+ rvin_format_align(vin, pix);
+done:
+ __v4l2_subdev_state_free(sd_state);
+
+ return ret;
+}
+
+static int rvin_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->card, "R_Car_VIN", sizeof(cap->card));
+ return 0;
+}
+
+static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL);
+}
+
+static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_rect fmt_rect, src_rect;
+ int ret;
+
+ if (vb2_is_busy(&vin->queue))
+ return -EBUSY;
+
+ ret = rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
+ &src_rect);
+ if (ret)
+ return ret;
+
+ vin->format = f->fmt.pix;
+
+ fmt_rect.top = 0;
+ fmt_rect.left = 0;
+ fmt_rect.width = vin->format.width;
+ fmt_rect.height = vin->format.height;
+
+ v4l2_rect_map_inside(&vin->crop, &src_rect);
+ v4l2_rect_map_inside(&vin->compose, &fmt_rect);
+
+ return 0;
+}
+
+static int rvin_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ f->fmt.pix = vin->format;
+
+ return 0;
+}
+
+static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ unsigned int i;
+ int matched;
+
+ /*
+ * If mbus_code is set only enumerate supported pixel formats for that
+ * bus code. Converting from YCbCr to RGB and RGB to YCbCr is possible
+ * with VIN, so all supported YCbCr and RGB media bus codes can produce
+ * all of the related pixel formats. If mbus_code is not set enumerate
+ * all possible pixelformats.
+ *
+ * TODO: Once raw MEDIA_BUS_FMT_SRGGB12_1X12 format is added to the
+ * driver this needs to be extended so raw media bus code only result in
+ * raw pixel format.
+ */
+ switch (f->mbus_code) {
+ case 0:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_PIX_FMT_SBGGR8;
+ return 0;
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_PIX_FMT_SGBRG8;
+ return 0;
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_PIX_FMT_SGRBG8;
+ return 0;
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_PIX_FMT_SRGGB8;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ matched = -1;
+ for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) {
+ if (rvin_format_from_pixel(vin, rvin_formats[i].fourcc))
+ matched++;
+
+ if (matched == f->index) {
+ f->pixelformat = rvin_formats[i].fourcc;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int rvin_remote_rectangle(struct rvin_dev *vin, struct v4l2_rect *rect)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_subdev *sd;
+ unsigned int index;
+ int ret;
+
+ if (vin->info->use_mc) {
+ struct media_pad *pad = media_pad_remote_pad_first(&vin->pad);
+
+ if (!pad)
+ return -EINVAL;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ index = pad->index;
+ } else {
+ sd = vin_to_source(vin);
+ index = vin->parallel.source_pad;
+ }
+
+ fmt.pad = index;
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ rect->left = rect->top = 0;
+ rect->width = fmt.format.width;
+ rect->height = fmt.format.height;
+
+ if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
+ switch (vin->format.field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ rect->height *= 2;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int rvin_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ if (!vin->scaler)
+ return -ENOIOCTLCMD;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ ret = rvin_remote_rectangle(vin, &s->r);
+ if (ret)
+ return ret;
+
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r = vin->crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.left = s->r.top = 0;
+ s->r.width = vin->format.width;
+ s->r.height = vin->format.height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r = vin->compose;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvin_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ const struct rvin_video_format *fmt;
+ struct v4l2_rect r = s->r;
+ struct v4l2_rect max_rect;
+ struct v4l2_rect min_rect = {
+ .width = 6,
+ .height = 2,
+ };
+ int ret;
+
+ if (!vin->scaler)
+ return -ENOIOCTLCMD;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ v4l2_rect_set_min_size(&r, &min_rect);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* Can't crop outside of source input */
+ ret = rvin_remote_rectangle(vin, &max_rect);
+ if (ret)
+ return ret;
+
+ v4l2_rect_map_inside(&r, &max_rect);
+
+ v4l_bound_align_image(&r.width, 6, max_rect.width, 0,
+ &r.height, 2, max_rect.height, 0, 0);
+
+ r.top = clamp_t(s32, r.top, 0, max_rect.height - r.height);
+ r.left = clamp_t(s32, r.left, 0, max_rect.width - r.width);
+
+ vin->crop = s->r = r;
+
+ vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
+ r.width, r.height, r.left, r.top,
+ max_rect.width, max_rect.height);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ /* Make sure compose rect fits inside output format */
+ max_rect.top = max_rect.left = 0;
+ max_rect.width = vin->format.width;
+ max_rect.height = vin->format.height;
+ v4l2_rect_map_inside(&r, &max_rect);
+
+ /*
+ * Composing is done by adding a offset to the buffer address,
+ * the HW wants this address to be aligned to HW_BUFFER_MASK.
+ * Make sure the top and left values meets this requirement.
+ */
+ while ((r.top * vin->format.bytesperline) & HW_BUFFER_MASK)
+ r.top--;
+
+ fmt = rvin_format_from_pixel(vin, vin->format.pixelformat);
+ while ((r.left * fmt->bpp) & HW_BUFFER_MASK)
+ r.left--;
+
+ vin->compose = s->r = r;
+
+ vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n",
+ r.width, r.height, r.left, r.top,
+ vin->format.width, vin->format.height);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* HW supports modifying configuration while running */
+ rvin_crop_scale_comp(vin);
+
+ return 0;
+}
+
+static int rvin_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_g_parm_cap(&vin->vdev, sd, parm);
+}
+
+static int rvin_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_s_parm_cap(&vin->vdev, sd, parm);
+}
+
+static int rvin_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, video, g_pixelaspect, f);
+}
+
+static int rvin_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (i->index != 0)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(sd, video, g_input_status, &i->status);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+ if (v4l2_subdev_has_op(sd, pad, dv_timings_cap)) {
+ i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+ i->std = 0;
+ } else {
+ i->capabilities = V4L2_IN_CAP_STD;
+ i->std = vin->vdev.tvnorms;
+ }
+
+ strscpy(i->name, "Camera", sizeof(i->name));
+
+ return 0;
+}
+
+static int rvin_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int rvin_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int rvin_querystd(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd, video, querystd, a);
+}
+
+static int rvin_s_std(struct file *file, void *priv, v4l2_std_id a)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ ret = v4l2_subdev_call(vin_to_source(vin), video, s_std, a);
+ if (ret < 0)
+ return ret;
+
+ vin->std = a;
+
+ /* Changing the standard will change the width/height */
+ return rvin_reset_format(vin);
+}
+
+static int rvin_g_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ if (v4l2_subdev_has_op(vin_to_source(vin), pad, dv_timings_cap))
+ return -ENOIOCTLCMD;
+
+ *a = vin->std;
+
+ return 0;
+}
+
+static int rvin_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_event_subscribe(fh, sub, 4, NULL);
+ }
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (timings->pad)
+ return -EINVAL;
+
+ timings->pad = vin->parallel.sink_pad;
+
+ ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
+
+ timings->pad = 0;
+
+ return ret;
+}
+
+static int rvin_s_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ ret = v4l2_subdev_call(sd, video, s_dv_timings, timings);
+ if (ret)
+ return ret;
+
+ /* Changing the timings will change the width/height */
+ return rvin_reset_format(vin);
+}
+
+static int rvin_g_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd, video, g_dv_timings, timings);
+}
+
+static int rvin_query_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd, video, query_dv_timings, timings);
+}
+
+static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (cap->pad)
+ return -EINVAL;
+
+ cap->pad = vin->parallel.sink_pad;
+
+ ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
+
+ cap->pad = 0;
+
+ return ret;
+}
+
+static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (edid->pad)
+ return -EINVAL;
+
+ edid->pad = vin->parallel.sink_pad;
+
+ ret = v4l2_subdev_call(sd, pad, get_edid, edid);
+
+ edid->pad = 0;
+
+ return ret;
+}
+
+static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (edid->pad)
+ return -EINVAL;
+
+ edid->pad = vin->parallel.sink_pad;
+
+ ret = v4l2_subdev_call(sd, pad, set_edid, edid);
+
+ edid->pad = 0;
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
+ .vidioc_querycap = rvin_querycap,
+ .vidioc_try_fmt_vid_cap = rvin_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = rvin_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = rvin_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap,
+
+ .vidioc_g_selection = rvin_g_selection,
+ .vidioc_s_selection = rvin_s_selection,
+
+ .vidioc_g_parm = rvin_g_parm,
+ .vidioc_s_parm = rvin_s_parm,
+
+ .vidioc_g_pixelaspect = rvin_g_pixelaspect,
+
+ .vidioc_enum_input = rvin_enum_input,
+ .vidioc_g_input = rvin_g_input,
+ .vidioc_s_input = rvin_s_input,
+
+ .vidioc_dv_timings_cap = rvin_dv_timings_cap,
+ .vidioc_enum_dv_timings = rvin_enum_dv_timings,
+ .vidioc_g_dv_timings = rvin_g_dv_timings,
+ .vidioc_s_dv_timings = rvin_s_dv_timings,
+ .vidioc_query_dv_timings = rvin_query_dv_timings,
+
+ .vidioc_g_edid = rvin_g_edid,
+ .vidioc_s_edid = rvin_s_edid,
+
+ .vidioc_querystd = rvin_querystd,
+ .vidioc_g_std = rvin_g_std,
+ .vidioc_s_std = rvin_s_std,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = rvin_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Media Controller
+ */
+
+static void rvin_mc_try_format(struct rvin_dev *vin,
+ struct v4l2_pix_format *pix)
+{
+ /*
+ * The V4L2 specification clearly documents the colorspace fields
+ * as being set by drivers for capture devices. Using the values
+ * supplied by userspace thus wouldn't comply with the API. Until
+ * the API is updated force fixed values.
+ */
+ pix->colorspace = RVIN_DEFAULT_COLORSPACE;
+ pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
+ pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
+ pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace,
+ pix->ycbcr_enc);
+
+ rvin_format_align(vin, pix);
+}
+
+static int rvin_mc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ rvin_mc_try_format(vin, &f->fmt.pix);
+
+ return 0;
+}
+
+static int rvin_mc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ if (vb2_is_busy(&vin->queue))
+ return -EBUSY;
+
+ rvin_mc_try_format(vin, &f->fmt.pix);
+
+ vin->format = f->fmt.pix;
+
+ vin->crop.top = 0;
+ vin->crop.left = 0;
+ vin->crop.width = vin->format.width;
+ vin->crop.height = vin->format.height;
+ vin->compose = vin->crop;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops rvin_mc_ioctl_ops = {
+ .vidioc_querycap = rvin_querycap,
+ .vidioc_try_fmt_vid_cap = rvin_mc_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = rvin_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = rvin_mc_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap,
+
+ .vidioc_g_selection = rvin_g_selection,
+ .vidioc_s_selection = rvin_s_selection,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = rvin_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * File Operations
+ */
+
+static int rvin_power_parallel(struct rvin_dev *vin, bool on)
+{
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int power = on ? 1 : 0;
+ int ret;
+
+ ret = v4l2_subdev_call(sd, core, s_power, power);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+static int rvin_open(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(vin->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = mutex_lock_interruptible(&vin->lock);
+ if (ret)
+ goto err_pm;
+
+ file->private_data = vin;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ goto err_unlock;
+
+ if (vin->info->use_mc)
+ ret = v4l2_pipeline_pm_get(&vin->vdev.entity);
+ else if (v4l2_fh_is_singular_file(file))
+ ret = rvin_power_parallel(vin, true);
+
+ if (ret < 0)
+ goto err_open;
+
+ ret = v4l2_ctrl_handler_setup(&vin->ctrl_handler);
+ if (ret)
+ goto err_power;
+
+ mutex_unlock(&vin->lock);
+
+ return 0;
+err_power:
+ if (vin->info->use_mc)
+ v4l2_pipeline_pm_put(&vin->vdev.entity);
+ else if (v4l2_fh_is_singular_file(file))
+ rvin_power_parallel(vin, false);
+err_open:
+ v4l2_fh_release(file);
+err_unlock:
+ mutex_unlock(&vin->lock);
+err_pm:
+ pm_runtime_put(vin->dev);
+
+ return ret;
+}
+
+static int rvin_release(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&vin->lock);
+
+ /* Save the singular status before we call the clean-up helper */
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ /* the release helper will cleanup any on-going streaming */
+ ret = _vb2_fop_release(file, NULL);
+
+ if (vin->info->use_mc) {
+ v4l2_pipeline_pm_put(&vin->vdev.entity);
+ } else {
+ if (fh_singular)
+ rvin_power_parallel(vin, false);
+ }
+
+ mutex_unlock(&vin->lock);
+
+ pm_runtime_put(vin->dev);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations rvin_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = rvin_open,
+ .release = rvin_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+void rvin_v4l2_unregister(struct rvin_dev *vin)
+{
+ if (!video_is_registered(&vin->vdev))
+ return;
+
+ v4l2_info(&vin->v4l2_dev, "Removing %s\n",
+ video_device_node_name(&vin->vdev));
+
+ /* Checks internally if vdev have been init or not */
+ video_unregister_device(&vin->vdev);
+}
+
+static void rvin_notify_video_device(struct rvin_dev *vin,
+ unsigned int notification, void *arg)
+{
+ switch (notification) {
+ case V4L2_DEVICE_NOTIFY_EVENT:
+ v4l2_event_queue(&vin->vdev, arg);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rvin_notify(struct v4l2_subdev *sd,
+ unsigned int notification, void *arg)
+{
+ struct v4l2_subdev *remote;
+ struct rvin_group *group;
+ struct media_pad *pad;
+ struct rvin_dev *vin =
+ container_of(sd->v4l2_dev, struct rvin_dev, v4l2_dev);
+ unsigned int i;
+
+ /* If no media controller, no need to route the event. */
+ if (!vin->info->use_mc) {
+ rvin_notify_video_device(vin, notification, arg);
+ return;
+ }
+
+ group = vin->group;
+
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ vin = group->vin[i];
+ if (!vin)
+ continue;
+
+ pad = media_pad_remote_pad_first(&vin->pad);
+ if (!pad)
+ continue;
+
+ remote = media_entity_to_v4l2_subdev(pad->entity);
+ if (remote != sd)
+ continue;
+
+ rvin_notify_video_device(vin, notification, arg);
+ }
+}
+
+int rvin_v4l2_register(struct rvin_dev *vin)
+{
+ struct video_device *vdev = &vin->vdev;
+ int ret;
+
+ vin->v4l2_dev.notify = rvin_notify;
+
+ /* video node */
+ vdev->v4l2_dev = &vin->v4l2_dev;
+ vdev->queue = &vin->queue;
+ snprintf(vdev->name, sizeof(vdev->name), "VIN%u output", vin->id);
+ vdev->release = video_device_release_empty;
+ vdev->lock = &vin->lock;
+ vdev->fops = &rvin_fops;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+
+ /* Set a default format */
+ vin->format.pixelformat = RVIN_DEFAULT_FORMAT;
+ vin->format.width = RVIN_DEFAULT_WIDTH;
+ vin->format.height = RVIN_DEFAULT_HEIGHT;
+ vin->format.field = RVIN_DEFAULT_FIELD;
+ vin->format.colorspace = RVIN_DEFAULT_COLORSPACE;
+
+ if (vin->info->use_mc) {
+ vdev->device_caps |= V4L2_CAP_IO_MC;
+ vdev->ioctl_ops = &rvin_mc_ioctl_ops;
+ } else {
+ vdev->ioctl_ops = &rvin_ioctl_ops;
+ rvin_reset_format(vin);
+ }
+
+ rvin_format_align(vin, &vin->format);
+
+ ret = video_register_device(&vin->vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ vin_err(vin, "Failed to register video device\n");
+ return ret;
+ }
+
+ video_set_drvdata(&vin->vdev, vin);
+
+ v4l2_info(&vin->v4l2_dev, "Device registered as %s\n",
+ video_device_node_name(&vin->vdev));
+
+ return ret;
+}
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
new file mode 100644
index 0000000000..792336dada
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ */
+
+#ifndef __RCAR_VIN__
+#define __RCAR_VIN__
+
+#include <linux/kref.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/videobuf2-v4l2.h>
+
+/* Number of HW buffers */
+#define HW_BUFFER_NUM 3
+
+/* Address alignment mask for HW buffers */
+#define HW_BUFFER_MASK 0x7f
+
+/* Max number on VIN instances that can be in a system */
+#define RCAR_VIN_NUM 32
+
+struct rvin_dev;
+struct rvin_group;
+
+enum model_id {
+ RCAR_H1,
+ RCAR_M1,
+ RCAR_GEN2,
+ RCAR_GEN3,
+};
+
+enum rvin_csi_id {
+ RVIN_CSI20,
+ RVIN_CSI21,
+ RVIN_CSI40,
+ RVIN_CSI41,
+ RVIN_CSI_MAX,
+};
+
+enum rvin_isp_id {
+ RVIN_ISP0,
+ RVIN_ISP1,
+ RVIN_ISP2,
+ RVIN_ISP4,
+ RVIN_ISP_MAX,
+};
+
+#define RVIN_REMOTES_MAX \
+ (((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \
+ RVIN_CSI_MAX : RVIN_ISP_MAX)
+
+/**
+ * enum rvin_dma_state - DMA states
+ * @STOPPED: No operation in progress
+ * @STARTING: Capture starting up
+ * @RUNNING: Operation in progress have buffers
+ * @STOPPING: Stopping operation
+ * @SUSPENDED: Capture is suspended
+ */
+enum rvin_dma_state {
+ STOPPED = 0,
+ STARTING,
+ RUNNING,
+ STOPPING,
+ SUSPENDED,
+};
+
+/**
+ * enum rvin_buffer_type
+ *
+ * Describes how a buffer is given to the hardware. To be able
+ * to capture SEQ_TB/BT it's needed to capture to the same vb2
+ * buffer twice so the type of buffer needs to be kept.
+ *
+ * @FULL: One capture fills the whole vb2 buffer
+ * @HALF_TOP: One capture fills the top half of the vb2 buffer
+ * @HALF_BOTTOM: One capture fills the bottom half of the vb2 buffer
+ */
+enum rvin_buffer_type {
+ FULL,
+ HALF_TOP,
+ HALF_BOTTOM,
+};
+
+/**
+ * struct rvin_video_format - Data format stored in memory
+ * @fourcc: Pixelformat
+ * @bpp: Bytes per pixel
+ */
+struct rvin_video_format {
+ u32 fourcc;
+ u8 bpp;
+};
+
+/**
+ * struct rvin_parallel_entity - Parallel video input endpoint descriptor
+ * @asc: async connection descriptor for async framework
+ * @subdev: subdevice matched using async framework
+ * @mbus_type: media bus type
+ * @bus: media bus parallel configuration
+ * @source_pad: source pad of remote subdevice
+ * @sink_pad: sink pad of remote subdevice
+ *
+ */
+struct rvin_parallel_entity {
+ struct v4l2_async_connection *asc;
+ struct v4l2_subdev *subdev;
+
+ enum v4l2_mbus_type mbus_type;
+ struct v4l2_mbus_config_parallel bus;
+
+ unsigned int source_pad;
+ unsigned int sink_pad;
+};
+
+/**
+ * struct rvin_group_route - describes a route from a channel of a
+ * CSI-2 receiver to a VIN
+ *
+ * @master: VIN group master ID.
+ * @csi: CSI-2 receiver ID.
+ * @chsel: CHSEL register values that connects VIN group to CSI-2.
+ *
+ * .. note::
+ * Each R-Car CSI-2 receiver has four output channels facing the VIN
+ * devices, each channel can carry one CSI-2 Virtual Channel (VC).
+ * There is no correlation between channel number and CSI-2 VC. It's
+ * up to the CSI-2 receiver driver to configure which VC is output
+ * on which channel, the VIN devices only care about output channels.
+ */
+struct rvin_group_route {
+ unsigned int master;
+ enum rvin_csi_id csi;
+ unsigned int chsel;
+};
+
+/**
+ * struct rvin_info - Information about the particular VIN implementation
+ * @model: VIN model
+ * @use_mc: use media controller instead of controlling subdevice
+ * @use_isp: the VIN is connected to the ISP and not to the CSI-2
+ * @nv12: support outputing NV12 pixel format
+ * @max_width: max input width the VIN supports
+ * @max_height: max input height the VIN supports
+ * @routes: list of possible routes from the CSI-2 recivers to
+ * all VINs. The list mush be NULL terminated.
+ * @scaler: Optional scaler
+ */
+struct rvin_info {
+ enum model_id model;
+ bool use_mc;
+ bool use_isp;
+ bool nv12;
+
+ unsigned int max_width;
+ unsigned int max_height;
+ const struct rvin_group_route *routes;
+ void (*scaler)(struct rvin_dev *vin);
+};
+
+/**
+ * struct rvin_dev - Renesas VIN device structure
+ * @dev: (OF) device
+ * @base: device I/O register space remapped to virtual memory
+ * @info: info about VIN instance
+ *
+ * @vdev: V4L2 video device associated with VIN
+ * @v4l2_dev: V4L2 device
+ * @ctrl_handler: V4L2 control handler
+ * @notifier: V4L2 asynchronous subdevs notifier
+ *
+ * @parallel: parallel input subdevice descriptor
+ *
+ * @group: Gen3 CSI group
+ * @id: Gen3 group id for this VIN
+ * @pad: media pad for the video device entity
+ *
+ * @lock: protects @queue
+ * @queue: vb2 buffers queue
+ * @scratch: cpu address for scratch buffer
+ * @scratch_phys: physical address of the scratch buffer
+ *
+ * @qlock: protects @buf_hw, @buf_list, @sequence and @state
+ * @buf_hw: Keeps track of buffers given to HW slot
+ * @buf_list: list of queued buffers
+ * @sequence: V4L2 buffers sequence number
+ * @state: keeps track of operation state
+ *
+ * @is_csi: flag to mark the VIN as using a CSI-2 subdevice
+ * @chsel: Cached value of the current CSI-2 channel selection
+ *
+ * @mbus_code: media bus format code
+ * @format: active V4L2 pixel format
+ *
+ * @crop: active cropping
+ * @compose: active composing
+ * @scaler: Optional scaler
+ * @std: active video standard of the video source
+ *
+ * @alpha: Alpha component to fill in for supported pixel formats
+ */
+struct rvin_dev {
+ struct device *dev;
+ void __iomem *base;
+ const struct rvin_info *info;
+
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_async_notifier notifier;
+
+ struct rvin_parallel_entity parallel;
+
+ struct rvin_group *group;
+ unsigned int id;
+ struct media_pad pad;
+
+ struct mutex lock;
+ struct vb2_queue queue;
+ void *scratch;
+ dma_addr_t scratch_phys;
+
+ spinlock_t qlock;
+ struct {
+ struct vb2_v4l2_buffer *buffer;
+ enum rvin_buffer_type type;
+ dma_addr_t phys;
+ } buf_hw[HW_BUFFER_NUM];
+ struct list_head buf_list;
+ unsigned int sequence;
+ enum rvin_dma_state state;
+
+ bool is_csi;
+ unsigned int chsel;
+
+ u32 mbus_code;
+ struct v4l2_pix_format format;
+
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ void (*scaler)(struct rvin_dev *vin);
+ v4l2_std_id std;
+
+ unsigned int alpha;
+};
+
+#define vin_to_source(vin) ((vin)->parallel.subdev)
+
+/* Debug */
+#define vin_dbg(d, fmt, arg...) dev_dbg(d->dev, fmt, ##arg)
+#define vin_info(d, fmt, arg...) dev_info(d->dev, fmt, ##arg)
+#define vin_warn(d, fmt, arg...) dev_warn(d->dev, fmt, ##arg)
+#define vin_err(d, fmt, arg...) dev_err(d->dev, fmt, ##arg)
+
+/**
+ * struct rvin_group - VIN CSI2 group information
+ * @refcount: number of VIN instances using the group
+ *
+ * @mdev: media device which represents the group
+ *
+ * @lock: protects the count, notifier, vin and csi members
+ * @count: number of enabled VIN instances found in DT
+ * @notifier: group notifier for CSI-2 async connections
+ * @vin: VIN instances which are part of the group
+ * @link_setup: Callback to create all links for the media graph
+ * @remotes: array of pairs of async connection and subdev pointers
+ * to all remote subdevices.
+ */
+struct rvin_group {
+ struct kref refcount;
+
+ struct media_device mdev;
+
+ struct mutex lock;
+ unsigned int count;
+ struct v4l2_async_notifier notifier;
+ struct rvin_dev *vin[RCAR_VIN_NUM];
+
+ int (*link_setup)(struct rvin_dev *vin);
+
+ struct {
+ struct v4l2_async_connection *asc;
+ struct v4l2_subdev *subdev;
+ } remotes[RVIN_REMOTES_MAX];
+};
+
+int rvin_dma_register(struct rvin_dev *vin, int irq);
+void rvin_dma_unregister(struct rvin_dev *vin);
+
+int rvin_v4l2_register(struct rvin_dev *vin);
+void rvin_v4l2_unregister(struct rvin_dev *vin);
+
+const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
+ u32 pixelformat);
+
+
+/* Cropping, composing and scaling */
+void rvin_scaler_gen2(struct rvin_dev *vin);
+void rvin_scaler_gen3(struct rvin_dev *vin);
+void rvin_crop_scale_comp(struct rvin_dev *vin);
+
+int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel);
+void rvin_set_alpha(struct rvin_dev *vin, unsigned int alpha);
+
+int rvin_start_streaming(struct rvin_dev *vin);
+void rvin_stop_streaming(struct rvin_dev *vin);
+
+#endif
diff --git a/drivers/media/platform/renesas/rcar_drif.c b/drivers/media/platform/renesas/rcar_drif.c
new file mode 100644
index 0000000000..163a4ba61c
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar_drif.c
@@ -0,0 +1,1486 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * R-Car Gen3 Digital Radio Interface (DRIF) driver
+ *
+ * Copyright (C) 2017 Renesas Electronics Corporation
+ */
+
+/*
+ * The R-Car DRIF is a receive only MSIOF like controller with an
+ * external master device driving the SCK. It receives data into a FIFO,
+ * then this driver uses the SYS-DMAC engine to move the data from
+ * the device to memory.
+ *
+ * Each DRIF channel DRIFx (as per datasheet) contains two internal
+ * channels DRIFx0 & DRIFx1 within itself with each having its own resources
+ * like module clk, register set, irq and dma. These internal channels share
+ * common CLK & SYNC from master. The two data pins D0 & D1 shall be
+ * considered to represent the two internal channels. This internal split
+ * is not visible to the master device.
+ *
+ * Depending on the master device, a DRIF channel can use
+ * (1) both internal channels (D0 & D1) to receive data in parallel (or)
+ * (2) one internal channel (D0 or D1) to receive data
+ *
+ * The primary design goal of this controller is to act as a Digital Radio
+ * Interface that receives digital samples from a tuner device. Hence the
+ * driver exposes the device as a V4L2 SDR device. In order to qualify as
+ * a V4L2 SDR device, it should possess a tuner interface as mandated by the
+ * framework. This driver expects a tuner driver (sub-device) to bind
+ * asynchronously with this device and the combined drivers shall expose
+ * a V4L2 compliant SDR device. The DRIF driver is independent of the
+ * tuner vendor.
+ *
+ * The DRIF h/w can support I2S mode and Frame start synchronization pulse mode.
+ * This driver is tested for I2S mode only because of the availability of
+ * suitable master devices. Hence, not all configurable options of DRIF h/w
+ * like lsb/msb first, syncdl, dtdl etc. are exposed via DT and I2S defaults
+ * are used. These can be exposed later if needed after testing.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/ioctl.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-vmalloc.h>
+
+/* DRIF register offsets */
+#define RCAR_DRIF_SITMDR1 0x00
+#define RCAR_DRIF_SITMDR2 0x04
+#define RCAR_DRIF_SITMDR3 0x08
+#define RCAR_DRIF_SIRMDR1 0x10
+#define RCAR_DRIF_SIRMDR2 0x14
+#define RCAR_DRIF_SIRMDR3 0x18
+#define RCAR_DRIF_SICTR 0x28
+#define RCAR_DRIF_SIFCTR 0x30
+#define RCAR_DRIF_SISTR 0x40
+#define RCAR_DRIF_SIIER 0x44
+#define RCAR_DRIF_SIRFDR 0x60
+
+#define RCAR_DRIF_RFOVF BIT(3) /* Receive FIFO overflow */
+#define RCAR_DRIF_RFUDF BIT(4) /* Receive FIFO underflow */
+#define RCAR_DRIF_RFSERR BIT(5) /* Receive frame sync error */
+#define RCAR_DRIF_REOF BIT(7) /* Frame reception end */
+#define RCAR_DRIF_RDREQ BIT(12) /* Receive data xfer req */
+#define RCAR_DRIF_RFFUL BIT(13) /* Receive FIFO full */
+
+/* SIRMDR1 */
+#define RCAR_DRIF_SIRMDR1_SYNCMD_FRAME (0 << 28)
+#define RCAR_DRIF_SIRMDR1_SYNCMD_LR (3 << 28)
+
+#define RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH (0 << 25)
+#define RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW (1 << 25)
+
+#define RCAR_DRIF_SIRMDR1_MSB_FIRST (0 << 24)
+#define RCAR_DRIF_SIRMDR1_LSB_FIRST (1 << 24)
+
+#define RCAR_DRIF_SIRMDR1_DTDL_0 (0 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_1 (1 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_2 (2 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_0PT5 (5 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_1PT5 (6 << 20)
+
+#define RCAR_DRIF_SIRMDR1_SYNCDL_0 (0 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_1 (1 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_2 (2 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_3 (3 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_0PT5 (5 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_1PT5 (6 << 20)
+
+#define RCAR_DRIF_MDR_GRPCNT(n) (((n) - 1) << 30)
+#define RCAR_DRIF_MDR_BITLEN(n) (((n) - 1) << 24)
+#define RCAR_DRIF_MDR_WDCNT(n) (((n) - 1) << 16)
+
+/* Hidden Transmit register that controls CLK & SYNC */
+#define RCAR_DRIF_SITMDR1_PCON BIT(30)
+
+#define RCAR_DRIF_SICTR_RX_RISING_EDGE BIT(26)
+#define RCAR_DRIF_SICTR_RX_EN BIT(8)
+#define RCAR_DRIF_SICTR_RESET BIT(0)
+
+/* Constants */
+#define RCAR_DRIF_NUM_HWBUFS 32
+#define RCAR_DRIF_MAX_DEVS 4
+#define RCAR_DRIF_DEFAULT_NUM_HWBUFS 16
+#define RCAR_DRIF_DEFAULT_HWBUF_SIZE (4 * PAGE_SIZE)
+#define RCAR_DRIF_MAX_CHANNEL 2
+#define RCAR_SDR_BUFFER_SIZE SZ_64K
+
+/* Internal buffer status flags */
+#define RCAR_DRIF_BUF_DONE BIT(0) /* DMA completed */
+#define RCAR_DRIF_BUF_OVERFLOW BIT(1) /* Overflow detected */
+
+#define to_rcar_drif_buf_pair(sdr, ch_num, idx) \
+ (&((sdr)->ch[!(ch_num)]->buf[(idx)]))
+
+#define for_each_rcar_drif_channel(ch, ch_mask) \
+ for_each_set_bit(ch, ch_mask, RCAR_DRIF_MAX_CHANNEL)
+
+/* Debug */
+#define rdrif_dbg(sdr, fmt, arg...) \
+ dev_dbg(sdr->v4l2_dev.dev, fmt, ## arg)
+
+#define rdrif_err(sdr, fmt, arg...) \
+ dev_err(sdr->v4l2_dev.dev, fmt, ## arg)
+
+/* Stream formats */
+struct rcar_drif_format {
+ u32 pixelformat;
+ u32 buffersize;
+ u32 bitlen;
+ u32 wdcnt;
+ u32 num_ch;
+};
+
+/* Format descriptions for capture */
+static const struct rcar_drif_format formats[] = {
+ {
+ .pixelformat = V4L2_SDR_FMT_PCU16BE,
+ .buffersize = RCAR_SDR_BUFFER_SIZE,
+ .bitlen = 16,
+ .wdcnt = 1,
+ .num_ch = 2,
+ },
+ {
+ .pixelformat = V4L2_SDR_FMT_PCU18BE,
+ .buffersize = RCAR_SDR_BUFFER_SIZE,
+ .bitlen = 18,
+ .wdcnt = 1,
+ .num_ch = 2,
+ },
+ {
+ .pixelformat = V4L2_SDR_FMT_PCU20BE,
+ .buffersize = RCAR_SDR_BUFFER_SIZE,
+ .bitlen = 20,
+ .wdcnt = 1,
+ .num_ch = 2,
+ },
+};
+
+/* Buffer for a received frame from one or both internal channels */
+struct rcar_drif_frame_buf {
+ /* Common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+/* OF graph endpoint's V4L2 async data */
+struct rcar_drif_graph_ep {
+ struct v4l2_subdev *subdev; /* Async matched subdev */
+};
+
+/* DMA buffer */
+struct rcar_drif_hwbuf {
+ void *addr; /* CPU-side address */
+ unsigned int status; /* Buffer status flags */
+};
+
+/* Internal channel */
+struct rcar_drif {
+ struct rcar_drif_sdr *sdr; /* Group device */
+ struct platform_device *pdev; /* Channel's pdev */
+ void __iomem *base; /* Base register address */
+ resource_size_t start; /* I/O resource offset */
+ struct dma_chan *dmach; /* Reserved DMA channel */
+ struct clk *clk; /* Module clock */
+ struct rcar_drif_hwbuf buf[RCAR_DRIF_NUM_HWBUFS]; /* H/W bufs */
+ dma_addr_t dma_handle; /* Handle for all bufs */
+ unsigned int num; /* Channel number */
+ bool acting_sdr; /* Channel acting as SDR device */
+};
+
+/* DRIF V4L2 SDR */
+struct rcar_drif_sdr {
+ struct device *dev; /* Platform device */
+ struct video_device *vdev; /* V4L2 SDR device */
+ struct v4l2_device v4l2_dev; /* V4L2 device */
+
+ /* Videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+ spinlock_t dma_lock; /* To serialize DMA cb of channels */
+
+ struct mutex v4l2_mutex; /* To serialize ioctls */
+ struct mutex vb_queue_mutex; /* To serialize streaming ioctls */
+ struct v4l2_ctrl_handler ctrl_hdl; /* SDR control handler */
+ struct v4l2_async_notifier notifier; /* For subdev (tuner) */
+ struct rcar_drif_graph_ep ep; /* Endpoint V4L2 async data */
+
+ /* Current V4L2 SDR format ptr */
+ const struct rcar_drif_format *fmt;
+
+ /* Device tree SYNC properties */
+ u32 mdr1;
+
+ /* Internals */
+ struct rcar_drif *ch[RCAR_DRIF_MAX_CHANNEL]; /* DRIFx0,1 */
+ unsigned long hw_ch_mask; /* Enabled channels per DT */
+ unsigned long cur_ch_mask; /* Used channels for an SDR FMT */
+ u32 num_hw_ch; /* Num of DT enabled channels */
+ u32 num_cur_ch; /* Num of used channels */
+ u32 hwbuf_size; /* Each DMA buffer size */
+ u32 produced; /* Buffers produced by sdr dev */
+};
+
+/* Register access functions */
+static void rcar_drif_write(struct rcar_drif *ch, u32 offset, u32 data)
+{
+ writel(data, ch->base + offset);
+}
+
+static u32 rcar_drif_read(struct rcar_drif *ch, u32 offset)
+{
+ return readl(ch->base + offset);
+}
+
+/* Release DMA channels */
+static void rcar_drif_release_dmachannels(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
+ if (sdr->ch[i]->dmach) {
+ dma_release_channel(sdr->ch[i]->dmach);
+ sdr->ch[i]->dmach = NULL;
+ }
+}
+
+/* Allocate DMA channels */
+static int rcar_drif_alloc_dmachannels(struct rcar_drif_sdr *sdr)
+{
+ struct dma_slave_config dma_cfg;
+ unsigned int i;
+ int ret;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ struct rcar_drif *ch = sdr->ch[i];
+
+ ch->dmach = dma_request_chan(&ch->pdev->dev, "rx");
+ if (IS_ERR(ch->dmach)) {
+ ret = PTR_ERR(ch->dmach);
+ if (ret != -EPROBE_DEFER)
+ rdrif_err(sdr,
+ "ch%u: dma channel req failed: %pe\n",
+ i, ch->dmach);
+ ch->dmach = NULL;
+ goto dmach_error;
+ }
+
+ /* Configure slave */
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = (phys_addr_t)(ch->start + RCAR_DRIF_SIRFDR);
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ret = dmaengine_slave_config(ch->dmach, &dma_cfg);
+ if (ret) {
+ rdrif_err(sdr, "ch%u: dma slave config failed\n", i);
+ goto dmach_error;
+ }
+ }
+ return 0;
+
+dmach_error:
+ rcar_drif_release_dmachannels(sdr);
+ return ret;
+}
+
+/* Release queued vb2 buffers */
+static void rcar_drif_release_queued_bufs(struct rcar_drif_sdr *sdr,
+ enum vb2_buffer_state state)
+{
+ struct rcar_drif_frame_buf *fbuf, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
+ list_for_each_entry_safe(fbuf, tmp, &sdr->queued_bufs, list) {
+ list_del(&fbuf->list);
+ vb2_buffer_done(&fbuf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+}
+
+/* Set MDR defaults */
+static inline void rcar_drif_set_mdr1(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ /* Set defaults for enabled internal channels */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ /* Refer MSIOF section in manual for this register setting */
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SITMDR1,
+ RCAR_DRIF_SITMDR1_PCON);
+
+ /* Setup MDR1 value */
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR1, sdr->mdr1);
+
+ rdrif_dbg(sdr, "ch%u: mdr1 = 0x%08x",
+ i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR1));
+ }
+}
+
+/* Set DRIF receive format */
+static int rcar_drif_set_format(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ rdrif_dbg(sdr, "setfmt: bitlen %u wdcnt %u num_ch %u\n",
+ sdr->fmt->bitlen, sdr->fmt->wdcnt, sdr->fmt->num_ch);
+
+ /* Sanity check */
+ if (sdr->fmt->num_ch > sdr->num_cur_ch) {
+ rdrif_err(sdr, "fmt num_ch %u cur_ch %u mismatch\n",
+ sdr->fmt->num_ch, sdr->num_cur_ch);
+ return -EINVAL;
+ }
+
+ /* Setup group, bitlen & wdcnt */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ u32 mdr;
+
+ /* Two groups */
+ mdr = RCAR_DRIF_MDR_GRPCNT(2) |
+ RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) |
+ RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt);
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR2, mdr);
+
+ mdr = RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) |
+ RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt);
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR3, mdr);
+
+ rdrif_dbg(sdr, "ch%u: new mdr[2,3] = 0x%08x, 0x%08x\n",
+ i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR2),
+ rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR3));
+ }
+ return 0;
+}
+
+/* Release DMA buffers */
+static void rcar_drif_release_buf(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ struct rcar_drif *ch = sdr->ch[i];
+
+ /* First entry contains the dma buf ptr */
+ if (ch->buf[0].addr) {
+ dma_free_coherent(&ch->pdev->dev,
+ sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
+ ch->buf[0].addr, ch->dma_handle);
+ ch->buf[0].addr = NULL;
+ }
+ }
+}
+
+/* Request DMA buffers */
+static int rcar_drif_request_buf(struct rcar_drif_sdr *sdr)
+{
+ int ret = -ENOMEM;
+ unsigned int i, j;
+ void *addr;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ struct rcar_drif *ch = sdr->ch[i];
+
+ /* Allocate DMA buffers */
+ addr = dma_alloc_coherent(&ch->pdev->dev,
+ sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
+ &ch->dma_handle, GFP_KERNEL);
+ if (!addr) {
+ rdrif_err(sdr,
+ "ch%u: dma alloc failed. num hwbufs %u size %u\n",
+ i, RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size);
+ goto error;
+ }
+
+ /* Split the chunk and populate bufctxt */
+ for (j = 0; j < RCAR_DRIF_NUM_HWBUFS; j++) {
+ ch->buf[j].addr = addr + (j * sdr->hwbuf_size);
+ ch->buf[j].status = 0;
+ }
+ }
+ return 0;
+error:
+ return ret;
+}
+
+/* Setup vb_queue minimum buffer requirements */
+static int rcar_drif_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+
+ /* Need at least 16 buffers */
+ if (vq->num_buffers + *num_buffers < 16)
+ *num_buffers = 16 - vq->num_buffers;
+
+ *num_planes = 1;
+ sizes[0] = PAGE_ALIGN(sdr->fmt->buffersize);
+ rdrif_dbg(sdr, "num_bufs %d sizes[0] %d\n", *num_buffers, sizes[0]);
+
+ return 0;
+}
+
+/* Enqueue buffer */
+static void rcar_drif_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vb->vb2_queue);
+ struct rcar_drif_frame_buf *fbuf =
+ container_of(vbuf, struct rcar_drif_frame_buf, vb);
+ unsigned long flags;
+
+ rdrif_dbg(sdr, "buf_queue idx %u\n", vb->index);
+ spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
+ list_add_tail(&fbuf->list, &sdr->queued_bufs);
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+}
+
+/* Get a frame buf from list */
+static struct rcar_drif_frame_buf *
+rcar_drif_get_fbuf(struct rcar_drif_sdr *sdr)
+{
+ struct rcar_drif_frame_buf *fbuf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
+ fbuf = list_first_entry_or_null(&sdr->queued_bufs, struct
+ rcar_drif_frame_buf, list);
+ if (!fbuf) {
+ /*
+ * App is late in enqueing buffers. Samples lost & there will
+ * be a gap in sequence number when app recovers
+ */
+ rdrif_dbg(sdr, "\napp late: prod %u\n", sdr->produced);
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+ return NULL;
+ }
+ list_del(&fbuf->list);
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+
+ return fbuf;
+}
+
+/* Helpers to set/clear buf pair status */
+static inline bool rcar_drif_bufs_done(struct rcar_drif_hwbuf **buf)
+{
+ return (buf[0]->status & buf[1]->status & RCAR_DRIF_BUF_DONE);
+}
+
+static inline bool rcar_drif_bufs_overflow(struct rcar_drif_hwbuf **buf)
+{
+ return ((buf[0]->status | buf[1]->status) & RCAR_DRIF_BUF_OVERFLOW);
+}
+
+static inline void rcar_drif_bufs_clear(struct rcar_drif_hwbuf **buf,
+ unsigned int bit)
+{
+ unsigned int i;
+
+ for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++)
+ buf[i]->status &= ~bit;
+}
+
+/* Channel DMA complete */
+static void rcar_drif_channel_complete(struct rcar_drif *ch, u32 idx)
+{
+ u32 str;
+
+ ch->buf[idx].status |= RCAR_DRIF_BUF_DONE;
+
+ /* Check for DRIF errors */
+ str = rcar_drif_read(ch, RCAR_DRIF_SISTR);
+ if (unlikely(str & RCAR_DRIF_RFOVF)) {
+ /* Writing the same clears it */
+ rcar_drif_write(ch, RCAR_DRIF_SISTR, str);
+
+ /* Overflow: some samples are lost */
+ ch->buf[idx].status |= RCAR_DRIF_BUF_OVERFLOW;
+ }
+}
+
+/* DMA callback for each stage */
+static void rcar_drif_dma_complete(void *dma_async_param)
+{
+ struct rcar_drif *ch = dma_async_param;
+ struct rcar_drif_sdr *sdr = ch->sdr;
+ struct rcar_drif_hwbuf *buf[RCAR_DRIF_MAX_CHANNEL];
+ struct rcar_drif_frame_buf *fbuf;
+ bool overflow = false;
+ u32 idx, produced;
+ unsigned int i;
+
+ spin_lock(&sdr->dma_lock);
+
+ /* DMA can be terminated while the callback was waiting on lock */
+ if (!vb2_is_streaming(&sdr->vb_queue)) {
+ spin_unlock(&sdr->dma_lock);
+ return;
+ }
+
+ idx = sdr->produced % RCAR_DRIF_NUM_HWBUFS;
+ rcar_drif_channel_complete(ch, idx);
+
+ if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL) {
+ buf[0] = ch->num ? to_rcar_drif_buf_pair(sdr, ch->num, idx) :
+ &ch->buf[idx];
+ buf[1] = ch->num ? &ch->buf[idx] :
+ to_rcar_drif_buf_pair(sdr, ch->num, idx);
+
+ /* Check if both DMA buffers are done */
+ if (!rcar_drif_bufs_done(buf)) {
+ spin_unlock(&sdr->dma_lock);
+ return;
+ }
+
+ /* Clear buf done status */
+ rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_DONE);
+
+ if (rcar_drif_bufs_overflow(buf)) {
+ overflow = true;
+ /* Clear the flag in status */
+ rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_OVERFLOW);
+ }
+ } else {
+ buf[0] = &ch->buf[idx];
+ if (buf[0]->status & RCAR_DRIF_BUF_OVERFLOW) {
+ overflow = true;
+ /* Clear the flag in status */
+ buf[0]->status &= ~RCAR_DRIF_BUF_OVERFLOW;
+ }
+ }
+
+ /* Buffer produced for consumption */
+ produced = sdr->produced++;
+ spin_unlock(&sdr->dma_lock);
+
+ rdrif_dbg(sdr, "ch%u: prod %u\n", ch->num, produced);
+
+ /* Get fbuf */
+ fbuf = rcar_drif_get_fbuf(sdr);
+ if (!fbuf)
+ return;
+
+ for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++)
+ memcpy(vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0) +
+ i * sdr->hwbuf_size, buf[i]->addr, sdr->hwbuf_size);
+
+ fbuf->vb.field = V4L2_FIELD_NONE;
+ fbuf->vb.sequence = produced;
+ fbuf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, sdr->fmt->buffersize);
+
+ /* Set error state on overflow */
+ vb2_buffer_done(&fbuf->vb.vb2_buf,
+ overflow ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+}
+
+static int rcar_drif_qbuf(struct rcar_drif *ch)
+{
+ struct rcar_drif_sdr *sdr = ch->sdr;
+ dma_addr_t addr = ch->dma_handle;
+ struct dma_async_tx_descriptor *rxd;
+ dma_cookie_t cookie;
+ int ret = -EIO;
+
+ /* Setup cyclic DMA with given buffers */
+ rxd = dmaengine_prep_dma_cyclic(ch->dmach, addr,
+ sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
+ sdr->hwbuf_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxd) {
+ rdrif_err(sdr, "ch%u: prep dma cyclic failed\n", ch->num);
+ return ret;
+ }
+
+ /* Submit descriptor */
+ rxd->callback = rcar_drif_dma_complete;
+ rxd->callback_param = ch;
+ cookie = dmaengine_submit(rxd);
+ if (dma_submit_error(cookie)) {
+ rdrif_err(sdr, "ch%u: dma submit failed\n", ch->num);
+ return ret;
+ }
+
+ dma_async_issue_pending(ch->dmach);
+ return 0;
+}
+
+/* Enable reception */
+static int rcar_drif_enable_rx(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+ u32 ctr;
+ int ret = -EINVAL;
+
+ /*
+ * When both internal channels are enabled, they can be synchronized
+ * only by the master
+ */
+
+ /* Enable receive */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR);
+ ctr |= (RCAR_DRIF_SICTR_RX_RISING_EDGE |
+ RCAR_DRIF_SICTR_RX_EN);
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr);
+ }
+
+ /* Check receive enabled */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR,
+ ctr, ctr & RCAR_DRIF_SICTR_RX_EN, 7, 100000);
+ if (ret) {
+ rdrif_err(sdr, "ch%u: rx en failed. ctr 0x%08x\n", i,
+ rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR));
+ break;
+ }
+ }
+ return ret;
+}
+
+/* Disable reception */
+static void rcar_drif_disable_rx(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+ u32 ctr;
+ int ret;
+
+ /* Disable receive */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR);
+ ctr &= ~RCAR_DRIF_SICTR_RX_EN;
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr);
+ }
+
+ /* Check receive disabled */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR,
+ ctr, !(ctr & RCAR_DRIF_SICTR_RX_EN), 7, 100000);
+ if (ret)
+ dev_warn(&sdr->vdev->dev,
+ "ch%u: failed to disable rx. ctr 0x%08x\n",
+ i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR));
+ }
+}
+
+/* Stop channel */
+static void rcar_drif_stop_channel(struct rcar_drif *ch)
+{
+ /* Disable DMA receive interrupt */
+ rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00000000);
+
+ /* Terminate all DMA transfers */
+ dmaengine_terminate_sync(ch->dmach);
+}
+
+/* Stop receive operation */
+static void rcar_drif_stop(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ /* Disable Rx */
+ rcar_drif_disable_rx(sdr);
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
+ rcar_drif_stop_channel(sdr->ch[i]);
+}
+
+/* Start channel */
+static int rcar_drif_start_channel(struct rcar_drif *ch)
+{
+ struct rcar_drif_sdr *sdr = ch->sdr;
+ u32 ctr, str;
+ int ret;
+
+ /* Reset receive */
+ rcar_drif_write(ch, RCAR_DRIF_SICTR, RCAR_DRIF_SICTR_RESET);
+ ret = readl_poll_timeout(ch->base + RCAR_DRIF_SICTR, ctr,
+ !(ctr & RCAR_DRIF_SICTR_RESET), 7, 100000);
+ if (ret) {
+ rdrif_err(sdr, "ch%u: failed to reset rx. ctr 0x%08x\n",
+ ch->num, rcar_drif_read(ch, RCAR_DRIF_SICTR));
+ return ret;
+ }
+
+ /* Queue buffers for DMA */
+ ret = rcar_drif_qbuf(ch);
+ if (ret)
+ return ret;
+
+ /* Clear status register flags */
+ str = RCAR_DRIF_RFFUL | RCAR_DRIF_REOF | RCAR_DRIF_RFSERR |
+ RCAR_DRIF_RFUDF | RCAR_DRIF_RFOVF;
+ rcar_drif_write(ch, RCAR_DRIF_SISTR, str);
+
+ /* Enable DMA receive interrupt */
+ rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00009000);
+
+ return ret;
+}
+
+/* Start receive operation */
+static int rcar_drif_start(struct rcar_drif_sdr *sdr)
+{
+ unsigned long enabled = 0;
+ unsigned int i;
+ int ret;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = rcar_drif_start_channel(sdr->ch[i]);
+ if (ret)
+ goto start_error;
+ enabled |= BIT(i);
+ }
+
+ ret = rcar_drif_enable_rx(sdr);
+ if (ret)
+ goto enable_error;
+
+ sdr->produced = 0;
+ return ret;
+
+enable_error:
+ rcar_drif_disable_rx(sdr);
+start_error:
+ for_each_rcar_drif_channel(i, &enabled)
+ rcar_drif_stop_channel(sdr->ch[i]);
+
+ return ret;
+}
+
+/* Start streaming */
+static int rcar_drif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+ unsigned long enabled = 0;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&sdr->v4l2_mutex);
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = clk_prepare_enable(sdr->ch[i]->clk);
+ if (ret)
+ goto error;
+ enabled |= BIT(i);
+ }
+
+ /* Set default MDRx settings */
+ rcar_drif_set_mdr1(sdr);
+
+ /* Set new format */
+ ret = rcar_drif_set_format(sdr);
+ if (ret)
+ goto error;
+
+ if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL)
+ sdr->hwbuf_size = sdr->fmt->buffersize / RCAR_DRIF_MAX_CHANNEL;
+ else
+ sdr->hwbuf_size = sdr->fmt->buffersize;
+
+ rdrif_dbg(sdr, "num hwbufs %u, hwbuf_size %u\n",
+ RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size);
+
+ /* Alloc DMA channel */
+ ret = rcar_drif_alloc_dmachannels(sdr);
+ if (ret)
+ goto error;
+
+ /* Request buffers */
+ ret = rcar_drif_request_buf(sdr);
+ if (ret)
+ goto error;
+
+ /* Start Rx */
+ ret = rcar_drif_start(sdr);
+ if (ret)
+ goto error;
+
+ mutex_unlock(&sdr->v4l2_mutex);
+
+ return ret;
+
+error:
+ rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_QUEUED);
+ rcar_drif_release_buf(sdr);
+ rcar_drif_release_dmachannels(sdr);
+ for_each_rcar_drif_channel(i, &enabled)
+ clk_disable_unprepare(sdr->ch[i]->clk);
+
+ mutex_unlock(&sdr->v4l2_mutex);
+
+ return ret;
+}
+
+/* Stop streaming */
+static void rcar_drif_stop_streaming(struct vb2_queue *vq)
+{
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+ unsigned int i;
+
+ mutex_lock(&sdr->v4l2_mutex);
+
+ /* Stop hardware streaming */
+ rcar_drif_stop(sdr);
+
+ /* Return all queued buffers to vb2 */
+ rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_ERROR);
+
+ /* Release buf */
+ rcar_drif_release_buf(sdr);
+
+ /* Release DMA channel resources */
+ rcar_drif_release_dmachannels(sdr);
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
+ clk_disable_unprepare(sdr->ch[i]->clk);
+
+ mutex_unlock(&sdr->v4l2_mutex);
+}
+
+/* Vb2 ops */
+static const struct vb2_ops rcar_drif_vb2_ops = {
+ .queue_setup = rcar_drif_queue_setup,
+ .buf_queue = rcar_drif_buf_queue,
+ .start_streaming = rcar_drif_start_streaming,
+ .stop_streaming = rcar_drif_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int rcar_drif_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->card, sdr->vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ sdr->vdev->name);
+
+ return 0;
+}
+
+static int rcar_drif_set_default_format(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ /* Matching fmt based on required channels is set as default */
+ if (sdr->num_hw_ch == formats[i].num_ch) {
+ sdr->fmt = &formats[i];
+ sdr->cur_ch_mask = sdr->hw_ch_mask;
+ sdr->num_cur_ch = sdr->num_hw_ch;
+ dev_dbg(sdr->dev, "default fmt[%u]: mask %lu num %u\n",
+ i, sdr->cur_ch_mask, sdr->num_cur_ch);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int rcar_drif_enum_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].pixelformat;
+
+ return 0;
+}
+
+static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
+ f->fmt.sdr.buffersize = sdr->fmt->buffersize;
+
+ return 0;
+}
+
+static int rcar_drif_s_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+ struct vb2_queue *q = &sdr->vb_queue;
+ unsigned int i;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ i = 0; /* Set the 1st format as default on no match */
+
+ sdr->fmt = &formats[i];
+ f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+
+ /*
+ * If a format demands one channel only out of two
+ * enabled channels, pick the 0th channel.
+ */
+ if (formats[i].num_ch < sdr->num_hw_ch) {
+ sdr->cur_ch_mask = BIT(0);
+ sdr->num_cur_ch = formats[i].num_ch;
+ } else {
+ sdr->cur_ch_mask = sdr->hw_ch_mask;
+ sdr->num_cur_ch = sdr->num_hw_ch;
+ }
+
+ rdrif_dbg(sdr, "cur: idx %u mask %lu num %u\n",
+ i, sdr->cur_ch_mask, sdr->num_cur_ch);
+
+ return 0;
+}
+
+static int rcar_drif_try_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ return 0;
+ }
+ }
+
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+ f->fmt.sdr.buffersize = formats[0].buffersize;
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+
+ return 0;
+}
+
+/* Tuner subdev ioctls */
+static int rcar_drif_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, enum_freq_bands, band);
+}
+
+static int rcar_drif_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, g_frequency, f);
+}
+
+static int rcar_drif_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, s_frequency, f);
+}
+
+static int rcar_drif_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *vt)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, g_tuner, vt);
+}
+
+static int rcar_drif_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *vt)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, s_tuner, vt);
+}
+
+static const struct v4l2_ioctl_ops rcar_drif_ioctl_ops = {
+ .vidioc_querycap = rcar_drif_querycap,
+
+ .vidioc_enum_fmt_sdr_cap = rcar_drif_enum_fmt_sdr_cap,
+ .vidioc_g_fmt_sdr_cap = rcar_drif_g_fmt_sdr_cap,
+ .vidioc_s_fmt_sdr_cap = rcar_drif_s_fmt_sdr_cap,
+ .vidioc_try_fmt_sdr_cap = rcar_drif_try_fmt_sdr_cap,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_s_frequency = rcar_drif_s_frequency,
+ .vidioc_g_frequency = rcar_drif_g_frequency,
+ .vidioc_s_tuner = rcar_drif_s_tuner,
+ .vidioc_g_tuner = rcar_drif_g_tuner,
+ .vidioc_enum_freq_bands = rcar_drif_enum_freq_bands,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+static const struct v4l2_file_operations rcar_drif_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static int rcar_drif_sdr_register(struct rcar_drif_sdr *sdr)
+{
+ int ret;
+
+ /* Init video_device structure */
+ sdr->vdev = video_device_alloc();
+ if (!sdr->vdev)
+ return -ENOMEM;
+
+ snprintf(sdr->vdev->name, sizeof(sdr->vdev->name), "R-Car DRIF");
+ sdr->vdev->fops = &rcar_drif_fops;
+ sdr->vdev->ioctl_ops = &rcar_drif_ioctl_ops;
+ sdr->vdev->release = video_device_release;
+ sdr->vdev->lock = &sdr->v4l2_mutex;
+ sdr->vdev->queue = &sdr->vb_queue;
+ sdr->vdev->queue->lock = &sdr->vb_queue_mutex;
+ sdr->vdev->ctrl_handler = &sdr->ctrl_hdl;
+ sdr->vdev->v4l2_dev = &sdr->v4l2_dev;
+ sdr->vdev->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ video_set_drvdata(sdr->vdev, sdr);
+
+ /* Register V4L2 SDR device */
+ ret = video_register_device(sdr->vdev, VFL_TYPE_SDR, -1);
+ if (ret) {
+ video_device_release(sdr->vdev);
+ sdr->vdev = NULL;
+ dev_err(sdr->dev, "failed video_register_device (%d)\n", ret);
+ }
+
+ return ret;
+}
+
+static void rcar_drif_sdr_unregister(struct rcar_drif_sdr *sdr)
+{
+ video_unregister_device(sdr->vdev);
+ sdr->vdev = NULL;
+}
+
+/* Sub-device bound callback */
+static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+
+ v4l2_set_subdev_hostdata(subdev, sdr);
+ sdr->ep.subdev = subdev;
+ rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
+
+ return 0;
+}
+
+/* Sub-device unbind callback */
+static void rcar_drif_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+
+ if (sdr->ep.subdev != subdev) {
+ rdrif_err(sdr, "subdev %s is not bound\n", subdev->name);
+ return;
+ }
+
+ /* Free ctrl handler if initialized */
+ v4l2_ctrl_handler_free(&sdr->ctrl_hdl);
+ sdr->v4l2_dev.ctrl_handler = NULL;
+ sdr->ep.subdev = NULL;
+
+ rcar_drif_sdr_unregister(sdr);
+ rdrif_dbg(sdr, "unbind asd %s\n", subdev->name);
+}
+
+/* Sub-device registered notification callback */
+static int rcar_drif_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+ int ret;
+
+ /*
+ * The subdev tested at this point uses 4 controls. Using 10 as a worst
+ * case scenario hint. When less controls are needed there will be some
+ * unused memory and when more controls are needed the framework uses
+ * hash to manage controls within this number.
+ */
+ ret = v4l2_ctrl_handler_init(&sdr->ctrl_hdl, 10);
+ if (ret)
+ return -ENOMEM;
+
+ sdr->v4l2_dev.ctrl_handler = &sdr->ctrl_hdl;
+ ret = v4l2_device_register_subdev_nodes(&sdr->v4l2_dev);
+ if (ret) {
+ rdrif_err(sdr, "failed: register subdev nodes ret %d\n", ret);
+ goto error;
+ }
+
+ ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl,
+ sdr->ep.subdev->ctrl_handler, NULL, true);
+ if (ret) {
+ rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret);
+ goto error;
+ }
+
+ ret = rcar_drif_sdr_register(sdr);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ v4l2_ctrl_handler_free(&sdr->ctrl_hdl);
+
+ return ret;
+}
+
+static const struct v4l2_async_notifier_operations rcar_drif_notify_ops = {
+ .bound = rcar_drif_notify_bound,
+ .unbind = rcar_drif_notify_unbind,
+ .complete = rcar_drif_notify_complete,
+};
+
+/* Read endpoint properties */
+static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr,
+ struct fwnode_handle *fwnode)
+{
+ u32 val;
+
+ /* Set the I2S defaults for SIRMDR1*/
+ sdr->mdr1 = RCAR_DRIF_SIRMDR1_SYNCMD_LR | RCAR_DRIF_SIRMDR1_MSB_FIRST |
+ RCAR_DRIF_SIRMDR1_DTDL_1 | RCAR_DRIF_SIRMDR1_SYNCDL_0;
+
+ /* Parse sync polarity from endpoint */
+ if (!fwnode_property_read_u32(fwnode, "sync-active", &val))
+ sdr->mdr1 |= val ? RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH :
+ RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW;
+ else
+ sdr->mdr1 |= RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH; /* default */
+
+ dev_dbg(sdr->dev, "mdr1 0x%08x\n", sdr->mdr1);
+}
+
+/* Parse sub-devs (tuner) to find a matching device */
+static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
+{
+ struct v4l2_async_notifier *notifier = &sdr->notifier;
+ struct fwnode_handle *fwnode, *ep;
+ struct v4l2_async_connection *asd;
+
+ v4l2_async_nf_init(&sdr->notifier, &sdr->v4l2_dev);
+
+ ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node),
+ NULL);
+ if (!ep)
+ return 0;
+
+ /* Get the endpoint properties */
+ rcar_drif_get_ep_properties(sdr, ep);
+
+ fwnode = fwnode_graph_get_remote_port_parent(ep);
+ fwnode_handle_put(ep);
+ if (!fwnode) {
+ dev_warn(sdr->dev, "bad remote port parent\n");
+ return -EINVAL;
+ }
+
+ asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
+ struct v4l2_async_connection);
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+
+ return 0;
+}
+
+/* Check if the given device is the primary bond */
+static bool rcar_drif_primary_bond(struct platform_device *pdev)
+{
+ return of_property_read_bool(pdev->dev.of_node, "renesas,primary-bond");
+}
+
+/* Check if both devices of the bond are enabled */
+static struct device_node *rcar_drif_bond_enabled(struct platform_device *p)
+{
+ struct device_node *np;
+
+ np = of_parse_phandle(p->dev.of_node, "renesas,bonding", 0);
+ if (np && of_device_is_available(np))
+ return np;
+
+ return NULL;
+}
+
+/* Check if the bonded device is probed */
+static int rcar_drif_bond_available(struct rcar_drif_sdr *sdr,
+ struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct rcar_drif *ch;
+ int ret = 0;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(sdr->dev, "failed to get bonded device from node\n");
+ return -ENODEV;
+ }
+
+ device_lock(&pdev->dev);
+ ch = platform_get_drvdata(pdev);
+ if (ch) {
+ /* Update sdr data in the bonded device */
+ ch->sdr = sdr;
+
+ /* Update sdr with bonded device data */
+ sdr->ch[ch->num] = ch;
+ sdr->hw_ch_mask |= BIT(ch->num);
+ } else {
+ /* Defer */
+ dev_info(sdr->dev, "defer probe\n");
+ ret = -EPROBE_DEFER;
+ }
+ device_unlock(&pdev->dev);
+
+ put_device(&pdev->dev);
+
+ return ret;
+}
+
+/* V4L2 SDR device probe */
+static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
+{
+ int ret;
+
+ /* Validate any supported format for enabled channels */
+ ret = rcar_drif_set_default_format(sdr);
+ if (ret) {
+ dev_err(sdr->dev, "failed to set default format\n");
+ return ret;
+ }
+
+ /* Set defaults */
+ sdr->hwbuf_size = RCAR_DRIF_DEFAULT_HWBUF_SIZE;
+
+ mutex_init(&sdr->v4l2_mutex);
+ mutex_init(&sdr->vb_queue_mutex);
+ spin_lock_init(&sdr->queued_bufs_lock);
+ spin_lock_init(&sdr->dma_lock);
+ INIT_LIST_HEAD(&sdr->queued_bufs);
+
+ /* Init videobuf2 queue structure */
+ sdr->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
+ sdr->vb_queue.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF;
+ sdr->vb_queue.drv_priv = sdr;
+ sdr->vb_queue.buf_struct_size = sizeof(struct rcar_drif_frame_buf);
+ sdr->vb_queue.ops = &rcar_drif_vb2_ops;
+ sdr->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ sdr->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ /* Init videobuf2 queue */
+ ret = vb2_queue_init(&sdr->vb_queue);
+ if (ret) {
+ dev_err(sdr->dev, "failed: vb2_queue_init ret %d\n", ret);
+ return ret;
+ }
+
+ /* Register the v4l2_device */
+ ret = v4l2_device_register(sdr->dev, &sdr->v4l2_dev);
+ if (ret) {
+ dev_err(sdr->dev, "failed: v4l2_device_register ret %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Parse subdevs after v4l2_device_register because if the subdev
+ * is already probed, bound and complete will be called immediately
+ */
+ ret = rcar_drif_parse_subdevs(sdr);
+ if (ret)
+ goto error;
+
+ sdr->notifier.ops = &rcar_drif_notify_ops;
+
+ /* Register notifier */
+ ret = v4l2_async_nf_register(&sdr->notifier);
+ if (ret < 0) {
+ dev_err(sdr->dev, "failed: notifier register ret %d\n", ret);
+ goto cleanup;
+ }
+
+ return ret;
+
+cleanup:
+ v4l2_async_nf_cleanup(&sdr->notifier);
+error:
+ v4l2_device_unregister(&sdr->v4l2_dev);
+
+ return ret;
+}
+
+/* V4L2 SDR device remove */
+static void rcar_drif_sdr_remove(struct rcar_drif_sdr *sdr)
+{
+ v4l2_async_nf_unregister(&sdr->notifier);
+ v4l2_async_nf_cleanup(&sdr->notifier);
+ v4l2_device_unregister(&sdr->v4l2_dev);
+}
+
+/* DRIF channel probe */
+static int rcar_drif_probe(struct platform_device *pdev)
+{
+ struct rcar_drif_sdr *sdr;
+ struct device_node *np;
+ struct rcar_drif *ch;
+ struct resource *res;
+ int ret;
+
+ /* Reserve memory for enabled channel */
+ ch = devm_kzalloc(&pdev->dev, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ ch->pdev = pdev;
+
+ /* Module clock */
+ ch->clk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(ch->clk)) {
+ ret = PTR_ERR(ch->clk);
+ dev_err(&pdev->dev, "clk get failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Register map */
+ ch->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ch->base))
+ return PTR_ERR(ch->base);
+
+ ch->start = res->start;
+ platform_set_drvdata(pdev, ch);
+
+ /* Check if both channels of the bond are enabled */
+ np = rcar_drif_bond_enabled(pdev);
+ if (np) {
+ /* Check if current channel acting as primary-bond */
+ if (!rcar_drif_primary_bond(pdev)) {
+ ch->num = 1; /* Primary bond is channel 0 always */
+ of_node_put(np);
+ return 0;
+ }
+ }
+
+ /* Reserve memory for SDR structure */
+ sdr = devm_kzalloc(&pdev->dev, sizeof(*sdr), GFP_KERNEL);
+ if (!sdr) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+ ch->sdr = sdr;
+ sdr->dev = &pdev->dev;
+
+ /* Establish links between SDR and channel(s) */
+ sdr->ch[ch->num] = ch;
+ sdr->hw_ch_mask = BIT(ch->num);
+ if (np) {
+ /* Check if bonded device is ready */
+ ret = rcar_drif_bond_available(sdr, np);
+ of_node_put(np);
+ if (ret)
+ return ret;
+ }
+ sdr->num_hw_ch = hweight_long(sdr->hw_ch_mask);
+
+ return rcar_drif_sdr_probe(sdr);
+}
+
+/* DRIF channel remove */
+static void rcar_drif_remove(struct platform_device *pdev)
+{
+ struct rcar_drif *ch = platform_get_drvdata(pdev);
+ struct rcar_drif_sdr *sdr = ch->sdr;
+
+ /* Channel 0 will be the SDR instance */
+ if (ch->num)
+ return;
+
+ /* SDR instance */
+ rcar_drif_sdr_remove(sdr);
+}
+
+/* FIXME: Implement suspend/resume support */
+static int __maybe_unused rcar_drif_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused rcar_drif_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend,
+ rcar_drif_resume);
+
+static const struct of_device_id rcar_drif_of_table[] = {
+ { .compatible = "renesas,rcar-gen3-drif" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rcar_drif_of_table);
+
+#define RCAR_DRIF_DRV_NAME "rcar_drif"
+static struct platform_driver rcar_drif_driver = {
+ .driver = {
+ .name = RCAR_DRIF_DRV_NAME,
+ .of_match_table = rcar_drif_of_table,
+ .pm = &rcar_drif_pm_ops,
+ },
+ .probe = rcar_drif_probe,
+ .remove_new = rcar_drif_remove,
+};
+
+module_platform_driver(rcar_drif_driver);
+
+MODULE_DESCRIPTION("Renesas R-Car Gen3 DRIF driver");
+MODULE_ALIAS("platform:" RCAR_DRIF_DRV_NAME);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>");
diff --git a/drivers/media/platform/renesas/rcar_fdp1.c b/drivers/media/platform/renesas/rcar_fdp1.c
new file mode 100644
index 0000000000..a2565b269f
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar_fdp1.c
@@ -0,0 +1,2460 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Renesas R-Car Fine Display Processor
+ *
+ * Video format converter and frame deinterlacer device.
+ *
+ * Author: Kieran Bingham, <kieran@bingham.xyz>
+ * Copyright (c) 2016 Renesas Electronics Corporation.
+ *
+ * This code is developed and inspired from the vim2m, rcar_jpu,
+ * m2m-deinterlace, and vsp1 drivers.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <media/rcar-fcp.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+static unsigned int debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activate debug info");
+
+/* Minimum and maximum frame width/height */
+#define FDP1_MIN_W 80U
+#define FDP1_MIN_H 80U
+
+#define FDP1_MAX_W 3840U
+#define FDP1_MAX_H 2160U
+
+#define FDP1_MAX_PLANES 3U
+#define FDP1_MAX_STRIDE 8190U
+
+/* Flags that indicate a format can be used for capture/output */
+#define FDP1_CAPTURE BIT(0)
+#define FDP1_OUTPUT BIT(1)
+
+#define DRIVER_NAME "rcar_fdp1"
+
+/* Number of Job's to have available on the processing queue */
+#define FDP1_NUMBER_JOBS 8
+
+#define dprintk(fdp1, fmt, arg...) \
+ v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+/*
+ * FDP1 registers and bits
+ */
+
+/* FDP1 start register - Imm */
+#define FD1_CTL_CMD 0x0000
+#define FD1_CTL_CMD_STRCMD BIT(0)
+
+/* Sync generator register - Imm */
+#define FD1_CTL_SGCMD 0x0004
+#define FD1_CTL_SGCMD_SGEN BIT(0)
+
+/* Register set end register - Imm */
+#define FD1_CTL_REGEND 0x0008
+#define FD1_CTL_REGEND_REGEND BIT(0)
+
+/* Channel activation register - Vupdt */
+#define FD1_CTL_CHACT 0x000c
+#define FD1_CTL_CHACT_SMW BIT(9)
+#define FD1_CTL_CHACT_WR BIT(8)
+#define FD1_CTL_CHACT_SMR BIT(3)
+#define FD1_CTL_CHACT_RD2 BIT(2)
+#define FD1_CTL_CHACT_RD1 BIT(1)
+#define FD1_CTL_CHACT_RD0 BIT(0)
+
+/* Operation Mode Register - Vupdt */
+#define FD1_CTL_OPMODE 0x0010
+#define FD1_CTL_OPMODE_PRG BIT(4)
+#define FD1_CTL_OPMODE_VIMD_INTERRUPT (0 << 0)
+#define FD1_CTL_OPMODE_VIMD_BESTEFFORT (1 << 0)
+#define FD1_CTL_OPMODE_VIMD_NOINTERRUPT (2 << 0)
+
+#define FD1_CTL_VPERIOD 0x0014
+#define FD1_CTL_CLKCTRL 0x0018
+#define FD1_CTL_CLKCTRL_CSTP_N BIT(0)
+
+/* Software reset register */
+#define FD1_CTL_SRESET 0x001c
+#define FD1_CTL_SRESET_SRST BIT(0)
+
+/* Control status register (V-update-status) */
+#define FD1_CTL_STATUS 0x0024
+#define FD1_CTL_STATUS_VINT_CNT_MASK GENMASK(31, 16)
+#define FD1_CTL_STATUS_VINT_CNT_SHIFT 16
+#define FD1_CTL_STATUS_SGREGSET BIT(10)
+#define FD1_CTL_STATUS_SGVERR BIT(9)
+#define FD1_CTL_STATUS_SGFREND BIT(8)
+#define FD1_CTL_STATUS_BSY BIT(0)
+
+#define FD1_CTL_VCYCLE_STAT 0x0028
+
+/* Interrupt enable register */
+#define FD1_CTL_IRQENB 0x0038
+/* Interrupt status register */
+#define FD1_CTL_IRQSTA 0x003c
+/* Interrupt control register */
+#define FD1_CTL_IRQFSET 0x0040
+
+/* Common IRQ Bit settings */
+#define FD1_CTL_IRQ_VERE BIT(16)
+#define FD1_CTL_IRQ_VINTE BIT(4)
+#define FD1_CTL_IRQ_FREE BIT(0)
+#define FD1_CTL_IRQ_MASK (FD1_CTL_IRQ_VERE | \
+ FD1_CTL_IRQ_VINTE | \
+ FD1_CTL_IRQ_FREE)
+
+/* RPF */
+#define FD1_RPF_SIZE 0x0060
+#define FD1_RPF_SIZE_MASK GENMASK(12, 0)
+#define FD1_RPF_SIZE_H_SHIFT 16
+#define FD1_RPF_SIZE_V_SHIFT 0
+
+#define FD1_RPF_FORMAT 0x0064
+#define FD1_RPF_FORMAT_CIPM BIT(16)
+#define FD1_RPF_FORMAT_RSPYCS BIT(13)
+#define FD1_RPF_FORMAT_RSPUVS BIT(12)
+#define FD1_RPF_FORMAT_CF BIT(8)
+
+#define FD1_RPF_PSTRIDE 0x0068
+#define FD1_RPF_PSTRIDE_Y_SHIFT 16
+#define FD1_RPF_PSTRIDE_C_SHIFT 0
+
+/* RPF0 Source Component Y Address register */
+#define FD1_RPF0_ADDR_Y 0x006c
+
+/* RPF1 Current Picture Registers */
+#define FD1_RPF1_ADDR_Y 0x0078
+#define FD1_RPF1_ADDR_C0 0x007c
+#define FD1_RPF1_ADDR_C1 0x0080
+
+/* RPF2 next picture register */
+#define FD1_RPF2_ADDR_Y 0x0084
+
+#define FD1_RPF_SMSK_ADDR 0x0090
+#define FD1_RPF_SWAP 0x0094
+
+/* WPF */
+#define FD1_WPF_FORMAT 0x00c0
+#define FD1_WPF_FORMAT_PDV_SHIFT 24
+#define FD1_WPF_FORMAT_FCNL BIT(20)
+#define FD1_WPF_FORMAT_WSPYCS BIT(15)
+#define FD1_WPF_FORMAT_WSPUVS BIT(14)
+#define FD1_WPF_FORMAT_WRTM_601_16 (0 << 9)
+#define FD1_WPF_FORMAT_WRTM_601_0 (1 << 9)
+#define FD1_WPF_FORMAT_WRTM_709_16 (2 << 9)
+#define FD1_WPF_FORMAT_CSC BIT(8)
+
+#define FD1_WPF_RNDCTL 0x00c4
+#define FD1_WPF_RNDCTL_CBRM BIT(28)
+#define FD1_WPF_RNDCTL_CLMD_NOCLIP (0 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_16_235 (1 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_1_254 (2 << 12)
+
+#define FD1_WPF_PSTRIDE 0x00c8
+#define FD1_WPF_PSTRIDE_Y_SHIFT 16
+#define FD1_WPF_PSTRIDE_C_SHIFT 0
+
+/* WPF Destination picture */
+#define FD1_WPF_ADDR_Y 0x00cc
+#define FD1_WPF_ADDR_C0 0x00d0
+#define FD1_WPF_ADDR_C1 0x00d4
+#define FD1_WPF_SWAP 0x00d8
+#define FD1_WPF_SWAP_OSWAP_SHIFT 0
+#define FD1_WPF_SWAP_SSWAP_SHIFT 4
+
+/* WPF/RPF Common */
+#define FD1_RWPF_SWAP_BYTE BIT(0)
+#define FD1_RWPF_SWAP_WORD BIT(1)
+#define FD1_RWPF_SWAP_LWRD BIT(2)
+#define FD1_RWPF_SWAP_LLWD BIT(3)
+
+/* IPC */
+#define FD1_IPC_MODE 0x0100
+#define FD1_IPC_MODE_DLI BIT(8)
+#define FD1_IPC_MODE_DIM_ADAPT2D3D (0 << 0)
+#define FD1_IPC_MODE_DIM_FIXED2D (1 << 0)
+#define FD1_IPC_MODE_DIM_FIXED3D (2 << 0)
+#define FD1_IPC_MODE_DIM_PREVFIELD (3 << 0)
+#define FD1_IPC_MODE_DIM_NEXTFIELD (4 << 0)
+
+#define FD1_IPC_SMSK_THRESH 0x0104
+#define FD1_IPC_SMSK_THRESH_CONST 0x00010002
+
+#define FD1_IPC_COMB_DET 0x0108
+#define FD1_IPC_COMB_DET_CONST 0x00200040
+
+#define FD1_IPC_MOTDEC 0x010c
+#define FD1_IPC_MOTDEC_CONST 0x00008020
+
+/* DLI registers */
+#define FD1_IPC_DLI_BLEND 0x0120
+#define FD1_IPC_DLI_BLEND_CONST 0x0080ff02
+
+#define FD1_IPC_DLI_HGAIN 0x0124
+#define FD1_IPC_DLI_HGAIN_CONST 0x001000ff
+
+#define FD1_IPC_DLI_SPRS 0x0128
+#define FD1_IPC_DLI_SPRS_CONST 0x009004ff
+
+#define FD1_IPC_DLI_ANGLE 0x012c
+#define FD1_IPC_DLI_ANGLE_CONST 0x0004080c
+
+#define FD1_IPC_DLI_ISOPIX0 0x0130
+#define FD1_IPC_DLI_ISOPIX0_CONST 0xff10ff10
+
+#define FD1_IPC_DLI_ISOPIX1 0x0134
+#define FD1_IPC_DLI_ISOPIX1_CONST 0x0000ff10
+
+/* Sensor registers */
+#define FD1_IPC_SENSOR_TH0 0x0140
+#define FD1_IPC_SENSOR_TH0_CONST 0x20208080
+
+#define FD1_IPC_SENSOR_TH1 0x0144
+#define FD1_IPC_SENSOR_TH1_CONST 0
+
+#define FD1_IPC_SENSOR_CTL0 0x0170
+#define FD1_IPC_SENSOR_CTL0_CONST 0x00002201
+
+#define FD1_IPC_SENSOR_CTL1 0x0174
+#define FD1_IPC_SENSOR_CTL1_CONST 0
+
+#define FD1_IPC_SENSOR_CTL2 0x0178
+#define FD1_IPC_SENSOR_CTL2_X_SHIFT 16
+#define FD1_IPC_SENSOR_CTL2_Y_SHIFT 0
+
+#define FD1_IPC_SENSOR_CTL3 0x017c
+#define FD1_IPC_SENSOR_CTL3_0_SHIFT 16
+#define FD1_IPC_SENSOR_CTL3_1_SHIFT 0
+
+/* Line memory pixel number register */
+#define FD1_IPC_LMEM 0x01e0
+#define FD1_IPC_LMEM_LINEAR 1024
+#define FD1_IPC_LMEM_TILE 960
+
+/* Internal Data (HW Version) */
+#define FD1_IP_INTDATA 0x0800
+/* R-Car Gen2 HW manual says zero, but actual value matches R-Car H3 ES1.x */
+#define FD1_IP_GEN2 0x02010101
+#define FD1_IP_M3W 0x02010202
+#define FD1_IP_H3 0x02010203
+#define FD1_IP_M3N 0x02010204
+#define FD1_IP_E3 0x02010205
+
+/* LUTs */
+#define FD1_LUT_DIF_ADJ 0x1000
+#define FD1_LUT_SAD_ADJ 0x1400
+#define FD1_LUT_BLD_GAIN 0x1800
+#define FD1_LUT_DIF_GAIN 0x1c00
+#define FD1_LUT_MDET 0x2000
+
+/**
+ * struct fdp1_fmt - The FDP1 internal format data
+ * @fourcc: the fourcc code, to match the V4L2 API
+ * @bpp: bits per pixel per plane
+ * @num_planes: number of planes
+ * @hsub: horizontal subsampling factor
+ * @vsub: vertical subsampling factor
+ * @fmt: 7-bit format code for the fdp1 hardware
+ * @swap_yc: the Y and C components are swapped (Y comes before C)
+ * @swap_uv: the U and V components are swapped (V comes before U)
+ * @swap: swap register control
+ * @types: types of queue this format is applicable to
+ */
+struct fdp1_fmt {
+ u32 fourcc;
+ u8 bpp[3];
+ u8 num_planes;
+ u8 hsub;
+ u8 vsub;
+ u8 fmt;
+ bool swap_yc;
+ bool swap_uv;
+ u8 swap;
+ u8 types;
+};
+
+static const struct fdp1_fmt fdp1_formats[] = {
+ /* RGB formats are only supported by the Write Pixel Formatter */
+
+ { V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+ { V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD,
+ FDP1_CAPTURE },
+
+ /* YUV Formats are supported by Read and Write Pixel Formatters */
+
+ { V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+ { V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true,
+ FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+ FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+ FDP1_CAPTURE | FDP1_OUTPUT },
+};
+
+static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt)
+{
+ return fmt->fmt <= 0x1b; /* Last RGB code */
+}
+
+/*
+ * FDP1 Lookup tables range from 0...255 only
+ *
+ * Each table must be less than 256 entries, and all tables
+ * are padded out to 256 entries by duplicating the last value.
+ */
+static const u8 fdp1_diff_adj[] = {
+ 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+ 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+ 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_sad_adj[] = {
+ 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+ 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+ 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_bld_gain[] = {
+ 0x80,
+};
+
+static const u8 fdp1_dif_gain[] = {
+ 0x80,
+};
+
+static const u8 fdp1_mdet[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+};
+
+/* Per-queue, driver-specific private data */
+struct fdp1_q_data {
+ const struct fdp1_fmt *fmt;
+ struct v4l2_pix_format_mplane format;
+
+ unsigned int vsize;
+ unsigned int stride_y;
+ unsigned int stride_c;
+};
+
+static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat)
+{
+ const struct fdp1_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) {
+ fmt = &fdp1_formats[i];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+enum fdp1_deint_mode {
+ FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */
+ FDP1_ADAPT2D3D,
+ FDP1_FIXED2D,
+ FDP1_FIXED3D,
+ FDP1_PREVFIELD,
+ FDP1_NEXTFIELD,
+};
+
+#define FDP1_DEINT_MODE_USES_NEXT(mode) \
+ (mode == FDP1_ADAPT2D3D || \
+ mode == FDP1_FIXED3D || \
+ mode == FDP1_NEXTFIELD)
+
+#define FDP1_DEINT_MODE_USES_PREV(mode) \
+ (mode == FDP1_ADAPT2D3D || \
+ mode == FDP1_FIXED3D || \
+ mode == FDP1_PREVFIELD)
+
+/*
+ * FDP1 operates on potentially 3 fields, which are tracked
+ * from the VB buffers using this context structure.
+ * Will always be a field or a full frame, never two fields.
+ */
+struct fdp1_field_buffer {
+ struct vb2_v4l2_buffer *vb;
+ dma_addr_t addrs[3];
+
+ /* Should be NONE:TOP:BOTTOM only */
+ enum v4l2_field field;
+
+ /* Flag to indicate this is the last field in the vb */
+ bool last_field;
+
+ /* Buffer queue lists */
+ struct list_head list;
+};
+
+struct fdp1_buffer {
+ struct v4l2_m2m_buffer m2m_buf;
+ struct fdp1_field_buffer fields[2];
+ unsigned int num_fields;
+};
+
+static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct fdp1_buffer, m2m_buf.vb);
+}
+
+struct fdp1_job {
+ struct fdp1_field_buffer *previous;
+ struct fdp1_field_buffer *active;
+ struct fdp1_field_buffer *next;
+ struct fdp1_field_buffer *dst;
+
+ /* A job can only be on one list at a time */
+ struct list_head list;
+};
+
+struct fdp1_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+
+ struct mutex dev_mutex;
+ spinlock_t irqlock;
+ spinlock_t device_process_lock;
+
+ void __iomem *regs;
+ unsigned int irq;
+ struct device *dev;
+
+ /* Job Queues */
+ struct fdp1_job jobs[FDP1_NUMBER_JOBS];
+ struct list_head free_job_list;
+ struct list_head queued_job_list;
+ struct list_head hw_job_list;
+
+ unsigned int clk_rate;
+
+ struct rcar_fcp_device *fcp;
+ struct v4l2_m2m_dev *m2m_dev;
+};
+
+struct fdp1_ctx {
+ struct v4l2_fh fh;
+ struct fdp1_dev *fdp1;
+
+ struct v4l2_ctrl_handler hdl;
+ unsigned int sequence;
+
+ /* Processed buffers in this transaction */
+ u8 num_processed;
+
+ /* Transaction length (i.e. how many buffers per transaction) */
+ u32 translen;
+
+ /* Abort requested by m2m */
+ int aborting;
+
+ /* Deinterlace processing mode */
+ enum fdp1_deint_mode deint_mode;
+
+ /*
+ * Adaptive 2D/3D mode uses a shared mask
+ * This is allocated at streamon, if the ADAPT2D3D mode
+ * is requested
+ */
+ unsigned int smsk_size;
+ dma_addr_t smsk_addr[2];
+ void *smsk_cpu;
+
+ /* Capture pipeline, can specify an alpha value
+ * for supported formats. 0-255 only
+ */
+ unsigned char alpha;
+
+ /* Source and destination queue data */
+ struct fdp1_q_data out_q; /* HW Source */
+ struct fdp1_q_data cap_q; /* HW Destination */
+
+ /*
+ * Field Queues
+ * Interlaced fields are used on 3 occasions, and tracked in this list.
+ *
+ * V4L2 Buffers are tracked inside the fdp1_buffer
+ * and released when the last 'field' completes
+ */
+ struct list_head fields_queue;
+ unsigned int buffers_queued;
+
+ /*
+ * For de-interlacing we need to track our previous buffer
+ * while preparing our job lists.
+ */
+ struct fdp1_field_buffer *previous;
+};
+
+static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct fdp1_ctx, fh);
+}
+
+static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->out_q;
+ else
+ return &ctx->cap_q;
+}
+
+/*
+ * list_remove_job: Take the first item off the specified job list
+ *
+ * Returns: pointer to a job, or NULL if the list is empty.
+ */
+static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1,
+ struct list_head *list)
+{
+ struct fdp1_job *job;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ job = list_first_entry_or_null(list, struct fdp1_job, list);
+ if (job)
+ list_del(&job->list);
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+ return job;
+}
+
+/*
+ * list_add_job: Add a job to the specified job list
+ *
+ * Returns: void - always succeeds
+ */
+static void list_add_job(struct fdp1_dev *fdp1,
+ struct list_head *list,
+ struct fdp1_job *job)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ list_add_tail(&job->list, list);
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+}
+
+static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->free_job_list);
+}
+
+static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ /* Ensure that all residue from previous jobs is gone */
+ memset(job, 0, sizeof(struct fdp1_job));
+
+ list_add_job(fdp1, &fdp1->free_job_list, job);
+}
+
+static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ list_add_job(fdp1, &fdp1->queued_job_list, job);
+}
+
+static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->queued_job_list);
+}
+
+static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+ list_add_job(fdp1, &fdp1->hw_job_list, job);
+}
+
+static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1)
+{
+ return list_remove_job(fdp1, &fdp1->hw_job_list);
+}
+
+/*
+ * Buffer lists handling
+ */
+static void fdp1_field_complete(struct fdp1_ctx *ctx,
+ struct fdp1_field_buffer *fbuf)
+{
+ /* job->previous may be on the first field */
+ if (!fbuf)
+ return;
+
+ if (fbuf->last_field)
+ v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE);
+}
+
+static void fdp1_queue_field(struct fdp1_ctx *ctx,
+ struct fdp1_field_buffer *fbuf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ list_add_tail(&fbuf->list, &ctx->fields_queue);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ ctx->buffers_queued++;
+}
+
+static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx)
+{
+ struct fdp1_field_buffer *fbuf;
+ unsigned long flags;
+
+ ctx->buffers_queued--;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ fbuf = list_first_entry_or_null(&ctx->fields_queue,
+ struct fdp1_field_buffer, list);
+ if (fbuf)
+ list_del(&fbuf->list);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ return fbuf;
+}
+
+/*
+ * Return the next field in the queue - or NULL,
+ * without removing the item from the list
+ */
+static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx)
+{
+ struct fdp1_field_buffer *fbuf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ fbuf = list_first_entry_or_null(&ctx->fields_queue,
+ struct fdp1_field_buffer, list);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+ return fbuf;
+}
+
+static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg)
+{
+ u32 value = ioread32(fdp1->regs + reg);
+
+ if (debug >= 2)
+ dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg);
+
+ return value;
+}
+
+static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg)
+{
+ if (debug >= 2)
+ dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg);
+
+ iowrite32(val, fdp1->regs + reg);
+}
+
+/* IPC registers are to be programmed with constant values */
+static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+
+ fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST, FD1_IPC_SMSK_THRESH);
+ fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST, FD1_IPC_COMB_DET);
+ fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST, FD1_IPC_MOTDEC);
+
+ fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST, FD1_IPC_DLI_BLEND);
+ fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST, FD1_IPC_DLI_HGAIN);
+ fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST, FD1_IPC_DLI_SPRS);
+ fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST, FD1_IPC_DLI_ANGLE);
+ fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST, FD1_IPC_DLI_ISOPIX0);
+ fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST, FD1_IPC_DLI_ISOPIX1);
+}
+
+
+static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ unsigned int x0, x1;
+ unsigned int hsize = src_q_data->format.width;
+ unsigned int vsize = src_q_data->format.height;
+
+ x0 = hsize / 3;
+ x1 = 2 * hsize / 3;
+
+ fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0);
+ fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1);
+
+ fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) |
+ ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT),
+ FD1_IPC_SENSOR_CTL2);
+
+ fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) |
+ (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT),
+ FD1_IPC_SENSOR_CTL3);
+}
+
+/*
+ * fdp1_write_lut: Write a padded LUT to the hw
+ *
+ * FDP1 uses constant data for de-interlacing processing,
+ * with large tables. These hardware tables are all 256 bytes
+ * long, however they often contain repeated data at the end.
+ *
+ * The last byte of the table is written to all remaining entries.
+ */
+static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut,
+ unsigned int len, unsigned int base)
+{
+ unsigned int i;
+ u8 pad;
+
+ /* Tables larger than the hw are clipped */
+ len = min(len, 256u);
+
+ for (i = 0; i < len; i++)
+ fdp1_write(fdp1, lut[i], base + (i*4));
+
+ /* Tables are padded with the last entry */
+ pad = lut[i-1];
+
+ for (; i < 256; i++)
+ fdp1_write(fdp1, pad, base + (i*4));
+}
+
+static void fdp1_set_lut(struct fdp1_dev *fdp1)
+{
+ fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj),
+ FD1_LUT_DIF_ADJ);
+ fdp1_write_lut(fdp1, fdp1_sad_adj, ARRAY_SIZE(fdp1_sad_adj),
+ FD1_LUT_SAD_ADJ);
+ fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain),
+ FD1_LUT_BLD_GAIN);
+ fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain),
+ FD1_LUT_DIF_GAIN);
+ fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet),
+ FD1_LUT_MDET);
+}
+
+static void fdp1_configure_rpf(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ u32 picture_size;
+ u32 pstride;
+ u32 format;
+ u32 smsk_addr;
+
+ struct fdp1_q_data *q_data = &ctx->out_q;
+
+ /* Picture size is common to Source and Destination frames */
+ picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT)
+ | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT);
+
+ /* Strides */
+ pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT;
+ if (q_data->format.num_planes > 1)
+ pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT;
+
+ /* Format control */
+ format = q_data->fmt->fmt;
+ if (q_data->fmt->swap_yc)
+ format |= FD1_RPF_FORMAT_RSPYCS;
+
+ if (q_data->fmt->swap_uv)
+ format |= FD1_RPF_FORMAT_RSPUVS;
+
+ if (job->active->field == V4L2_FIELD_BOTTOM) {
+ format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */
+ smsk_addr = ctx->smsk_addr[0];
+ } else {
+ smsk_addr = ctx->smsk_addr[1];
+ }
+
+ /* Deint mode is non-zero when deinterlacing */
+ if (ctx->deint_mode)
+ format |= FD1_RPF_FORMAT_CIPM;
+
+ fdp1_write(fdp1, format, FD1_RPF_FORMAT);
+ fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP);
+ fdp1_write(fdp1, picture_size, FD1_RPF_SIZE);
+ fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE);
+ fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR);
+
+ /* Previous Field Channel (CH0) */
+ if (job->previous)
+ fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y);
+
+ /* Current Field Channel (CH1) */
+ fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y);
+ fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0);
+ fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1);
+
+ /* Next Field Channel (CH2) */
+ if (job->next)
+ fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y);
+}
+
+static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ struct fdp1_q_data *q_data = &ctx->cap_q;
+ u32 pstride;
+ u32 format;
+ u32 swap;
+ u32 rndctl;
+
+ pstride = q_data->format.plane_fmt[0].bytesperline
+ << FD1_WPF_PSTRIDE_Y_SHIFT;
+
+ if (q_data->format.num_planes > 1)
+ pstride |= q_data->format.plane_fmt[1].bytesperline
+ << FD1_WPF_PSTRIDE_C_SHIFT;
+
+ format = q_data->fmt->fmt; /* Output Format Code */
+
+ if (q_data->fmt->swap_yc)
+ format |= FD1_WPF_FORMAT_WSPYCS;
+
+ if (q_data->fmt->swap_uv)
+ format |= FD1_WPF_FORMAT_WSPUVS;
+
+ if (fdp1_fmt_is_rgb(q_data->fmt)) {
+ /* Enable Colour Space conversion */
+ format |= FD1_WPF_FORMAT_CSC;
+
+ /* Set WRTM */
+ if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709)
+ format |= FD1_WPF_FORMAT_WRTM_709_16;
+ else if (src_q_data->format.quantization ==
+ V4L2_QUANTIZATION_FULL_RANGE)
+ format |= FD1_WPF_FORMAT_WRTM_601_0;
+ else
+ format |= FD1_WPF_FORMAT_WRTM_601_16;
+ }
+
+ /* Set an alpha value into the Pad Value */
+ format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT;
+
+ /* Determine picture rounding and clipping */
+ rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */
+ rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP;
+
+ /* WPF Swap needs both ISWAP and OSWAP setting */
+ swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT;
+ swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT;
+
+ fdp1_write(fdp1, format, FD1_WPF_FORMAT);
+ fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL);
+ fdp1_write(fdp1, swap, FD1_WPF_SWAP);
+ fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE);
+
+ fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y);
+ fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0);
+ fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1);
+}
+
+static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx,
+ struct fdp1_job *job)
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT;
+ u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */
+ u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */
+
+ /* De-interlacing Mode */
+ switch (ctx->deint_mode) {
+ default:
+ case FDP1_PROGRESSIVE:
+ dprintk(fdp1, "Progressive Mode\n");
+ opmode |= FD1_CTL_OPMODE_PRG;
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ break;
+ case FDP1_ADAPT2D3D:
+ dprintk(fdp1, "Adapt2D3D Mode\n");
+ if (ctx->sequence == 0 || ctx->aborting)
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ else
+ ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D;
+
+ if (ctx->sequence > 1) {
+ channels |= FD1_CTL_CHACT_SMW;
+ channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+ }
+
+ if (ctx->sequence > 2)
+ channels |= FD1_CTL_CHACT_SMR;
+
+ break;
+ case FDP1_FIXED3D:
+ dprintk(fdp1, "Fixed 3D Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED3D;
+ /* Except for first and last frame, enable all channels */
+ if (!(ctx->sequence == 0 || ctx->aborting))
+ channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+ break;
+ case FDP1_FIXED2D:
+ dprintk(fdp1, "Fixed 2D Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+ /* No extra channels enabled */
+ break;
+ case FDP1_PREVFIELD:
+ dprintk(fdp1, "Previous Field Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD;
+ channels |= FD1_CTL_CHACT_RD0; /* Previous */
+ break;
+ case FDP1_NEXTFIELD:
+ dprintk(fdp1, "Next Field Mode\n");
+ ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD;
+ channels |= FD1_CTL_CHACT_RD2; /* Next */
+ break;
+ }
+
+ fdp1_write(fdp1, channels, FD1_CTL_CHACT);
+ fdp1_write(fdp1, opmode, FD1_CTL_OPMODE);
+ fdp1_write(fdp1, ipcmode, FD1_IPC_MODE);
+}
+
+/*
+ * fdp1_device_process() - Run the hardware
+ *
+ * Configure and start the hardware to generate a single frame
+ * of output given our input parameters.
+ */
+static int fdp1_device_process(struct fdp1_ctx *ctx)
+
+{
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_job *job;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fdp1->device_process_lock, flags);
+
+ /* Get a job to process */
+ job = get_queued_job(fdp1);
+ if (!job) {
+ /*
+ * VINT can call us to see if we can queue another job.
+ * If we have no work to do, we simply return.
+ */
+ spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+ return 0;
+ }
+
+ /* First Frame only? ... */
+ fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL);
+
+ /* Set the mode, and configuration */
+ fdp1_configure_deint_mode(ctx, job);
+
+ /* DLI Static Configuration */
+ fdp1_set_ipc_dli(ctx);
+
+ /* Sensor Configuration */
+ fdp1_set_ipc_sensor(ctx);
+
+ /* Setup the source picture */
+ fdp1_configure_rpf(ctx, job);
+
+ /* Setup the destination picture */
+ fdp1_configure_wpf(ctx, job);
+
+ /* Line Memory Pixel Number Register for linear access */
+ fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM);
+
+ /* Enable Interrupts */
+ fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB);
+
+ /* Finally, the Immediate Registers */
+
+ /* This job is now in the HW queue */
+ queue_hw_job(fdp1, job);
+
+ /* Start the command */
+ fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD);
+
+ /* Registers will update to HW at next VINT */
+ fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND);
+
+ /* Enable VINT Generator */
+ fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD);
+
+ spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+
+ return 0;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/*
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int fdp1_m2m_job_ready(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+ int srcbufs = 1;
+ int dstbufs = 1;
+
+ dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
+ v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
+
+ /* One output buffer is required for each field */
+ if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+ dstbufs = 2;
+
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs
+ || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) {
+ dprintk(ctx->fdp1, "Not enough buffers available\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void fdp1_m2m_job_abort(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+
+ dprintk(ctx->fdp1, "+\n");
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+
+ /* Immediate abort sequence */
+ fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD);
+ fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET);
+}
+
+/*
+ * fdp1_prepare_job: Prepare and queue a new job for a single action of work
+ *
+ * Prepare the next field, (or frame in progressive) and an output
+ * buffer for the hardware to perform a single operation.
+ */
+static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct fdp1_buffer *fbuf;
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct fdp1_job *job;
+ unsigned int buffers_required = 1;
+
+ dprintk(fdp1, "+\n");
+
+ if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode))
+ buffers_required = 2;
+
+ if (ctx->buffers_queued < buffers_required)
+ return NULL;
+
+ job = fdp1_job_alloc(fdp1);
+ if (!job) {
+ dprintk(fdp1, "No free jobs currently available\n");
+ return NULL;
+ }
+
+ job->active = fdp1_dequeue_field(ctx);
+ if (!job->active) {
+ /* Buffer check should prevent this ever happening */
+ dprintk(fdp1, "No input buffers currently available\n");
+
+ fdp1_job_free(fdp1, job);
+ return NULL;
+ }
+
+ dprintk(fdp1, "+ Buffer en-route...\n");
+
+ /* Source buffers have been prepared on our buffer_queue
+ * Prepare our Output buffer
+ */
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ fbuf = to_fdp1_buffer(vbuf);
+ job->dst = &fbuf->fields[0];
+
+ job->active->vb->sequence = ctx->sequence;
+ job->dst->vb->sequence = ctx->sequence;
+ ctx->sequence++;
+
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) {
+ job->previous = ctx->previous;
+
+ /* Active buffer becomes the next job's previous buffer */
+ ctx->previous = job->active;
+ }
+
+ if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) {
+ /* Must be called after 'active' is dequeued */
+ job->next = fdp1_peek_queued_field(ctx);
+ }
+
+ /* Transfer timestamps and flags from src->dst */
+
+ job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp;
+
+ job->dst->vb->flags = job->active->vb->flags &
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ /* Ideally, the frame-end function will just 'check' to see
+ * if there are more jobs instead
+ */
+ ctx->translen++;
+
+ /* Finally, Put this job on the processing queue */
+ queue_job(fdp1, job);
+
+ dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen);
+
+ return job;
+}
+
+/* fdp1_m2m_device_run() - prepares and starts the device for an M2M task
+ *
+ * A single input buffer is taken and serialised into our fdp1_buffer
+ * queue. The queue is then processed to create as many jobs as possible
+ * from our available input.
+ */
+static void fdp1_m2m_device_run(void *priv)
+{
+ struct fdp1_ctx *ctx = priv;
+ struct fdp1_dev *fdp1 = ctx->fdp1;
+ struct vb2_v4l2_buffer *src_vb;
+ struct fdp1_buffer *buf;
+ unsigned int i;
+
+ dprintk(fdp1, "+\n");
+
+ ctx->translen = 0;
+
+ /* Get our incoming buffer of either one or two fields, or one frame */
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ buf = to_fdp1_buffer(src_vb);
+
+ for (i = 0; i < buf->num_fields; i++) {
+ struct fdp1_field_buffer *fbuf = &buf->fields[i];
+
+ fdp1_queue_field(ctx, fbuf);
+ dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
+ i, fbuf->last_field);
+ }
+
+ /* Queue as many jobs as our data provides for */
+ while (fdp1_prepare_job(ctx))
+ ;
+
+ if (ctx->translen == 0) {
+ dprintk(fdp1, "No jobs were processed. M2M action complete\n");
+ v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+ }
+
+ /* Kick the job processing action */
+ fdp1_device_process(ctx);
+}
+
+/*
+ * device_frame_end:
+ *
+ * Handles the M2M level after a buffer completion event.
+ */
+static void device_frame_end(struct fdp1_dev *fdp1,
+ enum vb2_buffer_state state)
+{
+ struct fdp1_ctx *ctx;
+ unsigned long flags;
+ struct fdp1_job *job = get_hw_queued_job(fdp1);
+
+ dprintk(fdp1, "+\n");
+
+ ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev);
+
+ if (ctx == NULL) {
+ v4l2_err(&fdp1->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return;
+ }
+
+ ctx->num_processed++;
+
+ /*
+ * fdp1_field_complete will call buf_done only when the last vb2_buffer
+ * reference is complete
+ */
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+ fdp1_field_complete(ctx, job->previous);
+ else
+ fdp1_field_complete(ctx, job->active);
+
+ spin_lock_irqsave(&fdp1->irqlock, flags);
+ v4l2_m2m_buf_done(job->dst->vb, state);
+ job->dst = NULL;
+ spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+ /* Move this job back to the free job list */
+ fdp1_job_free(fdp1, job);
+
+ dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
+ ctx->num_processed, ctx->translen);
+
+ if (ctx->num_processed == ctx->translen ||
+ ctx->aborting) {
+ dprintk(ctx->fdp1, "Finishing transaction\n");
+ ctx->num_processed = 0;
+ v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+ } else {
+ /*
+ * For pipelined performance support, this would
+ * be called from a VINT handler
+ */
+ fdp1_device_process(ctx);
+ }
+}
+
+/*
+ * video ioctls
+ */
+static int fdp1_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, DRIVER_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", DRIVER_NAME);
+ return 0;
+}
+
+static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ unsigned int i, num;
+
+ num = 0;
+
+ for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) {
+ if (fdp1_formats[i].types & type) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ /* Format not found */
+ if (i >= ARRAY_SIZE(fdp1_formats))
+ return -EINVAL;
+
+ /* Format found */
+ f->pixelformat = fdp1_formats[i].fourcc;
+
+ return 0;
+}
+
+static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return fdp1_enum_fmt(f, FDP1_CAPTURE);
+}
+
+static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return fdp1_enum_fmt(f, FDP1_OUTPUT);
+}
+
+static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_q_data *q_data;
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ q_data = get_q_data(ctx, f->type);
+ f->fmt.pix_mp = q_data->format;
+
+ return 0;
+}
+
+static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
+ const struct fdp1_fmt *fmt)
+{
+ unsigned int i;
+
+ /* Compute and clamp the stride and image size. */
+ for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) {
+ unsigned int hsub = i > 0 ? fmt->hsub : 1;
+ unsigned int vsub = i > 0 ? fmt->vsub : 1;
+ /* From VSP : TODO: Confirm alignment limits for FDP1 */
+ unsigned int align = 128;
+ unsigned int bpl;
+
+ bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
+ pix->width / hsub * fmt->bpp[i] / 8,
+ round_down(FDP1_MAX_STRIDE, align));
+
+ pix->plane_fmt[i].bytesperline = round_up(bpl, align);
+ pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
+ * pix->height / vsub;
+
+ }
+
+ if (fmt->num_planes == 3) {
+ /* The two chroma planes must have the same stride. */
+ pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
+ pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
+
+ }
+}
+
+static void fdp1_try_fmt_output(struct fdp1_ctx *ctx,
+ const struct fdp1_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix)
+{
+ const struct fdp1_fmt *fmt;
+ unsigned int width;
+ unsigned int height;
+
+ /* Validate the pixel format to ensure the output queue supports it. */
+ fmt = fdp1_find_format(pix->pixelformat);
+ if (!fmt || !(fmt->types & FDP1_OUTPUT))
+ fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->num_planes = fmt->num_planes;
+
+ /*
+ * Progressive video and all interlaced field orders are acceptable.
+ * Default to V4L2_FIELD_INTERLACED.
+ */
+ if (pix->field != V4L2_FIELD_NONE &&
+ pix->field != V4L2_FIELD_ALTERNATE &&
+ !V4L2_FIELD_HAS_BOTH(pix->field))
+ pix->field = V4L2_FIELD_INTERLACED;
+
+ /*
+ * The deinterlacer doesn't care about the colorspace, accept all values
+ * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion
+ * at the output of the deinterlacer supports a subset of encodings and
+ * quantization methods and will only be available when the colorspace
+ * allows it.
+ */
+ if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ /*
+ * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp
+ * them to the supported frame size range. The height boundary are
+ * related to the full frame, divide them by two when the format passes
+ * fields in separate buffers.
+ */
+ width = round_down(pix->width, fmt->hsub);
+ pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W);
+
+ height = round_down(pix->height, fmt->vsub);
+ if (pix->field == V4L2_FIELD_ALTERNATE)
+ pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2);
+ else
+ pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H);
+
+ fdp1_compute_stride(pix, fmt);
+}
+
+static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
+ const struct fdp1_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix)
+{
+ struct fdp1_q_data *src_data = &ctx->out_q;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ const struct fdp1_fmt *fmt;
+ bool allow_rgb;
+
+ /*
+ * Validate the pixel format. We can only accept RGB output formats if
+ * the input encoding and quantization are compatible with the format
+ * conversions supported by the hardware. The supported combinations are
+ *
+ * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE
+ * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE
+ * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE
+ */
+ colorspace = src_data->format.colorspace;
+
+ ycbcr_enc = src_data->format.ycbcr_enc;
+ if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
+
+ quantization = src_data->format.quantization;
+ if (quantization == V4L2_QUANTIZATION_DEFAULT)
+ quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace,
+ ycbcr_enc);
+
+ allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 ||
+ (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
+ quantization == V4L2_QUANTIZATION_LIM_RANGE);
+
+ fmt = fdp1_find_format(pix->pixelformat);
+ if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt)))
+ fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ pix->pixelformat = fmt->fourcc;
+ pix->num_planes = fmt->num_planes;
+ pix->field = V4L2_FIELD_NONE;
+
+ /*
+ * The colorspace on the capture queue is copied from the output queue
+ * as the hardware can't change the colorspace. It can convert YCbCr to
+ * RGB though, in which case the encoding and quantization are set to
+ * default values as anything else wouldn't make sense.
+ */
+ pix->colorspace = src_data->format.colorspace;
+ pix->xfer_func = src_data->format.xfer_func;
+
+ if (fdp1_fmt_is_rgb(fmt)) {
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ } else {
+ pix->ycbcr_enc = src_data->format.ycbcr_enc;
+ pix->quantization = src_data->format.quantization;
+ }
+
+ /*
+ * The frame width is identical to the output queue, and the height is
+ * either doubled or identical depending on whether the output queue
+ * field order contains one or two fields per frame.
+ */
+ pix->width = src_data->format.width;
+ if (src_data->format.field == V4L2_FIELD_ALTERNATE)
+ pix->height = 2 * src_data->format.height;
+ else
+ pix->height = src_data->format.height;
+
+ fdp1_compute_stride(pix, fmt);
+}
+
+static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
+ else
+ fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
+
+ dprintk(ctx->fdp1, "Try %s format: %4.4s (0x%08x) %ux%u field %u\n",
+ V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+ (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+ return 0;
+}
+
+static void fdp1_set_format(struct fdp1_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix,
+ enum v4l2_buf_type type)
+{
+ struct fdp1_q_data *q_data = get_q_data(ctx, type);
+ const struct fdp1_fmt *fmtinfo;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fdp1_try_fmt_output(ctx, &fmtinfo, pix);
+ else
+ fdp1_try_fmt_capture(ctx, &fmtinfo, pix);
+
+ q_data->fmt = fmtinfo;
+ q_data->format = *pix;
+
+ q_data->vsize = pix->height;
+ if (pix->field != V4L2_FIELD_NONE)
+ q_data->vsize /= 2;
+
+ q_data->stride_y = pix->plane_fmt[0].bytesperline;
+ q_data->stride_c = pix->plane_fmt[1].bytesperline;
+
+ /* Adjust strides for interleaved buffers */
+ if (pix->field == V4L2_FIELD_INTERLACED ||
+ pix->field == V4L2_FIELD_INTERLACED_TB ||
+ pix->field == V4L2_FIELD_INTERLACED_BT) {
+ q_data->stride_y *= 2;
+ q_data->stride_c *= 2;
+ }
+
+ /* Propagate the format from the output node to the capture node. */
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct fdp1_q_data *dst_data = &ctx->cap_q;
+
+ /*
+ * Copy the format, clear the per-plane bytes per line and image
+ * size, override the field and double the height if needed.
+ */
+ dst_data->format = q_data->format;
+ memset(dst_data->format.plane_fmt, 0,
+ sizeof(dst_data->format.plane_fmt));
+
+ dst_data->format.field = V4L2_FIELD_NONE;
+ if (pix->field == V4L2_FIELD_ALTERNATE)
+ dst_data->format.height *= 2;
+
+ fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format);
+
+ dst_data->vsize = dst_data->format.height;
+ dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline;
+ dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline;
+ }
+}
+
+static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct fdp1_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
+
+ dprintk(ctx->fdp1, "Set %s format: %4.4s (0x%08x) %ux%u field %u\n",
+ V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+ (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+ f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+ return 0;
+}
+
+static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fdp1_ctx *ctx =
+ container_of(ctrl->handler, struct fdp1_ctx, hdl);
+ struct fdp1_q_data *src_q_data = &ctx->out_q;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+ ctrl->val = 2;
+ else
+ ctrl->val = 1;
+ return 0;
+ }
+
+ return 1;
+}
+
+static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct fdp1_ctx *ctx =
+ container_of(ctrl->handler, struct fdp1_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ ctx->alpha = ctrl->val;
+ break;
+
+ case V4L2_CID_DEINTERLACING_MODE:
+ ctx->deint_mode = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops fdp1_ctrl_ops = {
+ .s_ctrl = fdp1_s_ctrl,
+ .g_volatile_ctrl = fdp1_g_ctrl,
+};
+
+static const char * const fdp1_ctrl_deint_menu[] = {
+ "Progressive",
+ "Adaptive 2D/3D",
+ "Fixed 2D",
+ "Fixed 3D",
+ "Previous field",
+ "Next field",
+ NULL
+};
+
+static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
+ .vidioc_querycap = fdp1_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = fdp1_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = fdp1_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_cap_mplane = fdp1_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = fdp1_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = fdp1_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = fdp1_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = fdp1_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = fdp1_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+
+static int fdp1_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_ctxs[])
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vq);
+ struct fdp1_q_data *q_data;
+ unsigned int i;
+
+ q_data = get_q_data(ctx, vq->type);
+
+ if (*nplanes) {
+ if (*nplanes > FDP1_MAX_PLANES)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *nplanes = q_data->format.num_planes;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->format.plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data,
+ struct vb2_v4l2_buffer *vbuf,
+ unsigned int field_num)
+{
+ struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+ struct fdp1_field_buffer *fbuf = &buf->fields[field_num];
+ unsigned int num_fields;
+ unsigned int i;
+
+ num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+
+ fbuf->vb = vbuf;
+ fbuf->last_field = (field_num + 1) == num_fields;
+
+ for (i = 0; i < vbuf->vb2_buf.num_planes; ++i)
+ fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i);
+
+ switch (vbuf->field) {
+ case V4L2_FIELD_INTERLACED:
+ /*
+ * Interlaced means bottom-top for 60Hz TV standards (NTSC) and
+ * top-bottom for 50Hz. As TV standards are not applicable to
+ * the mem-to-mem API, use the height as a heuristic.
+ */
+ fbuf->field = (q_data->format.height < 576) == field_num
+ ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ break;
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_SEQ_TB:
+ fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_SEQ_BT:
+ fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ break;
+ default:
+ fbuf->field = vbuf->field;
+ break;
+ }
+
+ /* Buffer is completed */
+ if (!field_num)
+ return;
+
+ /* Adjust buffer addresses for second field */
+ switch (vbuf->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ fbuf->addrs[i] +=
+ (i == 0 ? q_data->stride_y : q_data->stride_c);
+ break;
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ fbuf->addrs[i] += q_data->vsize *
+ (i == 0 ? q_data->stride_y : q_data->stride_c);
+ break;
+ }
+}
+
+static int fdp1_buf_prepare(struct vb2_buffer *vb)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+ unsigned int i;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ bool field_valid = true;
+
+ /* Validate the buffer field. */
+ switch (q_data->format.field) {
+ case V4L2_FIELD_NONE:
+ if (vbuf->field != V4L2_FIELD_NONE)
+ field_valid = false;
+ break;
+
+ case V4L2_FIELD_ALTERNATE:
+ if (vbuf->field != V4L2_FIELD_TOP &&
+ vbuf->field != V4L2_FIELD_BOTTOM)
+ field_valid = false;
+ break;
+
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ if (vbuf->field != q_data->format.field)
+ field_valid = false;
+ break;
+ }
+
+ if (!field_valid) {
+ dprintk(ctx->fdp1,
+ "buffer field %u invalid for format field %u\n",
+ vbuf->field, q_data->format.field);
+ return -EINVAL;
+ }
+ } else {
+ vbuf->field = V4L2_FIELD_NONE;
+ }
+
+ /* Validate the planes sizes. */
+ for (i = 0; i < q_data->format.num_planes; i++) {
+ unsigned long size = q_data->format.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dprintk(ctx->fdp1,
+ "data will not fit into plane [%u/%u] (%lu < %lu)\n",
+ i, q_data->format.num_planes,
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ /* We have known size formats all around */
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+ for (i = 0; i < buf->num_fields; ++i)
+ fdp1_buf_prepare_field(q_data, vbuf, i);
+
+ return 0;
+}
+
+static void fdp1_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+ struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * Force our deint_mode when we are progressive,
+ * ignoring any setting on the device from the user,
+ * Otherwise, lock in the requested de-interlace mode.
+ */
+ if (q_data->format.field == V4L2_FIELD_NONE)
+ ctx->deint_mode = FDP1_PROGRESSIVE;
+
+ if (ctx->deint_mode == FDP1_ADAPT2D3D) {
+ u32 stride;
+ dma_addr_t smsk_base;
+ const u32 bpp = 2; /* bytes per pixel */
+
+ stride = round_up(q_data->format.width, 8);
+
+ ctx->smsk_size = bpp * stride * q_data->vsize;
+
+ ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev,
+ ctx->smsk_size, &smsk_base, GFP_KERNEL);
+
+ if (ctx->smsk_cpu == NULL) {
+ dprintk(ctx->fdp1, "Failed to alloc smsk\n");
+ return -ENOMEM;
+ }
+
+ ctx->smsk_addr[0] = smsk_base;
+ ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2);
+ }
+ }
+
+ return 0;
+}
+
+static void fdp1_stop_streaming(struct vb2_queue *q)
+{
+ struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned long flags;
+
+ while (1) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vbuf == NULL)
+ break;
+ spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+ }
+
+ /* Empty Output queues */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /* Empty our internal queues */
+ struct fdp1_field_buffer *fbuf;
+
+ /* Free any queued buffers */
+ fbuf = fdp1_dequeue_field(ctx);
+ while (fbuf != NULL) {
+ fdp1_field_complete(ctx, fbuf);
+ fbuf = fdp1_dequeue_field(ctx);
+ }
+
+ /* Free smsk_data */
+ if (ctx->smsk_cpu) {
+ dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
+ ctx->smsk_cpu, ctx->smsk_addr[0]);
+ ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
+ ctx->smsk_cpu = NULL;
+ }
+
+ WARN(!list_empty(&ctx->fields_queue),
+ "Buffer queue not empty");
+ } else {
+ /* Empty Capture queues (Jobs) */
+ struct fdp1_job *job;
+
+ job = get_queued_job(ctx->fdp1);
+ while (job) {
+ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+ fdp1_field_complete(ctx, job->previous);
+ else
+ fdp1_field_complete(ctx, job->active);
+
+ v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR);
+ job->dst = NULL;
+
+ job = get_queued_job(ctx->fdp1);
+ }
+
+ /* Free any held buffer in the ctx */
+ fdp1_field_complete(ctx, ctx->previous);
+
+ WARN(!list_empty(&ctx->fdp1->queued_job_list),
+ "Queued Job List not empty");
+
+ WARN(!list_empty(&ctx->fdp1->hw_job_list),
+ "HW Job list not empty");
+ }
+}
+
+static const struct vb2_ops fdp1_qops = {
+ .queue_setup = fdp1_queue_setup,
+ .buf_prepare = fdp1_buf_prepare,
+ .buf_queue = fdp1_buf_queue,
+ .start_streaming = fdp1_start_streaming,
+ .stop_streaming = fdp1_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct fdp1_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+ src_vq->ops = &fdp1_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->fdp1->dev_mutex;
+ src_vq->dev = ctx->fdp1->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+ dst_vq->ops = &fdp1_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->fdp1->dev_mutex;
+ dst_vq->dev = ctx->fdp1->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int fdp1_open(struct file *file)
+{
+ struct fdp1_dev *fdp1 = video_drvdata(file);
+ struct v4l2_pix_format_mplane format;
+ struct fdp1_ctx *ctx = NULL;
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&fdp1->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->fdp1 = fdp1;
+
+ /* Initialise Queues */
+ INIT_LIST_HEAD(&ctx->fields_queue);
+
+ ctx->translen = 1;
+ ctx->sequence = 0;
+
+ /* Initialise controls */
+
+ v4l2_ctrl_handler_init(&ctx->hdl, 3);
+ v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_DEINTERLACING_MODE,
+ FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D,
+ fdp1_ctrl_deint_menu);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (ctx->hdl.error) {
+ ret = ctx->hdl.error;
+ goto error_ctx;
+ }
+
+ ctx->fh.ctrl_handler = &ctx->hdl;
+ v4l2_ctrl_handler_setup(&ctx->hdl);
+
+ /* Configure default parameters. */
+ memset(&format, 0, sizeof(format));
+ fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto error_ctx;
+ }
+
+ /* Perform any power management required */
+ ret = pm_runtime_resume_and_get(fdp1->dev);
+ if (ret < 0)
+ goto error_pm;
+
+ v4l2_fh_add(&ctx->fh);
+
+ dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
+ ctx, ctx->fh.m2m_ctx);
+
+ mutex_unlock(&fdp1->dev_mutex);
+ return 0;
+
+error_pm:
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+error_ctx:
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx);
+done:
+ mutex_unlock(&fdp1->dev_mutex);
+ return ret;
+}
+
+static int fdp1_release(struct file *file)
+{
+ struct fdp1_dev *fdp1 = video_drvdata(file);
+ struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
+
+ dprintk(fdp1, "Releasing instance %p\n", ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ mutex_lock(&fdp1->dev_mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&fdp1->dev_mutex);
+ kfree(ctx);
+
+ pm_runtime_put(fdp1->dev);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations fdp1_fops = {
+ .owner = THIS_MODULE,
+ .open = fdp1_open,
+ .release = fdp1_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device fdp1_videodev = {
+ .name = DRIVER_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &fdp1_fops,
+ .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+ .ioctl_ops = &fdp1_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+ .device_run = fdp1_m2m_device_run,
+ .job_ready = fdp1_m2m_job_ready,
+ .job_abort = fdp1_m2m_job_abort,
+};
+
+static irqreturn_t fdp1_irq_handler(int irq, void *dev_id)
+{
+ struct fdp1_dev *fdp1 = dev_id;
+ u32 int_status;
+ u32 ctl_status;
+ u32 vint_cnt;
+ u32 cycles;
+
+ int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA);
+ cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT);
+ ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS);
+ vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >>
+ FD1_CTL_STATUS_VINT_CNT_SHIFT;
+
+ /* Clear interrupts */
+ fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA);
+
+ if (debug >= 2) {
+ dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status,
+ int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]",
+ int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]",
+ int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]");
+
+ dprintk(fdp1, "CycleStatus = %d (%dms)\n",
+ cycles, cycles/(fdp1->clk_rate/1000));
+
+ dprintk(fdp1,
+ "Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n",
+ ctl_status, vint_cnt,
+ ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "",
+ ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "",
+ ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "",
+ ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : "");
+ dprintk(fdp1, "***********************************\n");
+ }
+
+ /* Spurious interrupt */
+ if (!(FD1_CTL_IRQ_MASK & int_status))
+ return IRQ_NONE;
+
+ /* Work completed, release the frame */
+ if (FD1_CTL_IRQ_VERE & int_status)
+ device_frame_end(fdp1, VB2_BUF_STATE_ERROR);
+ else if (FD1_CTL_IRQ_FREE & int_status)
+ device_frame_end(fdp1, VB2_BUF_STATE_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static int fdp1_probe(struct platform_device *pdev)
+{
+ struct fdp1_dev *fdp1;
+ struct video_device *vfd;
+ struct device_node *fcp_node;
+ struct clk *clk;
+ unsigned int i;
+
+ int ret;
+ int hw_version;
+
+ fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL);
+ if (!fdp1)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fdp1->free_job_list);
+ INIT_LIST_HEAD(&fdp1->queued_job_list);
+ INIT_LIST_HEAD(&fdp1->hw_job_list);
+
+ /* Initialise the jobs on the free list */
+ for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++)
+ list_add(&fdp1->jobs[i].list, &fdp1->free_job_list);
+
+ mutex_init(&fdp1->dev_mutex);
+
+ spin_lock_init(&fdp1->irqlock);
+ spin_lock_init(&fdp1->device_process_lock);
+ fdp1->dev = &pdev->dev;
+ platform_set_drvdata(pdev, fdp1);
+
+ /* Memory-mapped registers */
+ fdp1->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fdp1->regs))
+ return PTR_ERR(fdp1->regs);
+
+ /* Interrupt service routine registration */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ fdp1->irq = ret;
+
+ ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
+ dev_name(&pdev->dev), fdp1);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq);
+ return ret;
+ }
+
+ /* FCP */
+ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
+ if (fcp_node) {
+ fdp1->fcp = rcar_fcp_get(fcp_node);
+ of_node_put(fcp_node);
+ if (IS_ERR(fdp1->fcp)) {
+ dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
+ PTR_ERR(fdp1->fcp));
+ return PTR_ERR(fdp1->fcp);
+ }
+ }
+
+ /* Determine our clock rate */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto put_dev;
+ }
+
+ fdp1->clk_rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ /* V4L2 device registration */
+ ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
+ if (ret) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+ goto put_dev;
+ }
+
+ /* M2M registration */
+ fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops);
+ if (IS_ERR(fdp1->m2m_dev)) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(fdp1->m2m_dev);
+ goto unreg_dev;
+ }
+
+ /* Video registration */
+ fdp1->vfd = fdp1_videodev;
+ vfd = &fdp1->vfd;
+ vfd->lock = &fdp1->dev_mutex;
+ vfd->v4l2_dev = &fdp1->v4l2_dev;
+ video_set_drvdata(vfd, fdp1);
+ strscpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
+ if (ret) {
+ v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+ goto release_m2m;
+ }
+
+ v4l2_info(&fdp1->v4l2_dev, "Device registered as /dev/video%d\n",
+ vfd->num);
+
+ /* Power up the cells to read HW */
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(fdp1->dev);
+ if (ret < 0)
+ goto disable_pm;
+
+ hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
+ switch (hw_version) {
+ case FD1_IP_GEN2:
+ dprintk(fdp1, "FDP1 Version R-Car Gen2\n");
+ break;
+ case FD1_IP_M3W:
+ dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
+ break;
+ case FD1_IP_H3:
+ dprintk(fdp1, "FDP1 Version R-Car H3\n");
+ break;
+ case FD1_IP_M3N:
+ dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
+ break;
+ case FD1_IP_E3:
+ dprintk(fdp1, "FDP1 Version R-Car E3\n");
+ break;
+ default:
+ dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
+ hw_version);
+ }
+
+ /* Allow the hw to sleep until an open call puts it to use */
+ pm_runtime_put(fdp1->dev);
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(fdp1->dev);
+
+release_m2m:
+ v4l2_m2m_release(fdp1->m2m_dev);
+
+unreg_dev:
+ v4l2_device_unregister(&fdp1->v4l2_dev);
+
+put_dev:
+ rcar_fcp_put(fdp1->fcp);
+ return ret;
+}
+
+static void fdp1_remove(struct platform_device *pdev)
+{
+ struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
+
+ v4l2_m2m_release(fdp1->m2m_dev);
+ video_unregister_device(&fdp1->vfd);
+ v4l2_device_unregister(&fdp1->v4l2_dev);
+ pm_runtime_disable(&pdev->dev);
+ rcar_fcp_put(fdp1->fcp);
+}
+
+static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
+{
+ struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+ rcar_fcp_disable(fdp1->fcp);
+
+ return 0;
+}
+
+static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
+{
+ struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+ /* Program in the static LUTs */
+ fdp1_set_lut(fdp1);
+
+ return rcar_fcp_enable(fdp1->fcp);
+}
+
+static const struct dev_pm_ops fdp1_pm_ops = {
+ SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
+ fdp1_pm_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id fdp1_dt_ids[] = {
+ { .compatible = "renesas,fdp1" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
+
+static struct platform_driver fdp1_pdrv = {
+ .probe = fdp1_probe,
+ .remove_new = fdp1_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = fdp1_dt_ids,
+ .pm = &fdp1_pm_ops,
+ },
+};
+
+module_platform_driver(fdp1_pdrv);
+
+MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver");
+MODULE_AUTHOR("Kieran Bingham <kieran@bingham.xyz>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/platform/renesas/rcar_jpu.c b/drivers/media/platform/renesas/rcar_jpu.c
new file mode 100644
index 0000000000..fff349e450
--- /dev/null
+++ b/drivers/media/platform/renesas/rcar_jpu.c
@@ -0,0 +1,1752 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Mikhail Ulyanov
+ * Copyright (C) 2014-2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ *
+ * This is based on the drivers/media/platform/samsung/s5p-jpeg driver by
+ * Andrzej Pietrasiewicz and Jacek Anaszewski.
+ * Some portions of code inspired by VSP1 driver by Laurent Pinchart.
+ *
+ * TODO in order of priority:
+ * 1) Rotation
+ * 2) Cropping
+ * 3) V4L2_CID_JPEG_ACTIVE_MARKER
+ */
+
+#include <asm/unaligned.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+#include <media/jpeg.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+
+#define DRV_NAME "rcar_jpu"
+
+/*
+ * Align JPEG header end to cache line to make sure we will not have any issues
+ * with cache; additionally to requirement (33.3.27 R01UH0501EJ0100 Rev.1.00)
+ */
+#define JPU_JPEG_HDR_SIZE (ALIGN(0x258, L1_CACHE_BYTES))
+#define JPU_JPEG_MAX_BYTES_PER_PIXEL 2 /* 16 bit precision format */
+#define JPU_JPEG_MIN_SIZE 25 /* SOI + SOF + EOI */
+#define JPU_JPEG_QTBL_SIZE 0x40
+#define JPU_JPEG_HDCTBL_SIZE 0x1c
+#define JPU_JPEG_HACTBL_SIZE 0xb2
+#define JPU_JPEG_HEIGHT_OFFSET 0x91
+#define JPU_JPEG_WIDTH_OFFSET 0x93
+#define JPU_JPEG_SUBS_OFFSET 0x97
+#define JPU_JPEG_QTBL_LUM_OFFSET 0x07
+#define JPU_JPEG_QTBL_CHR_OFFSET 0x4c
+#define JPU_JPEG_HDCTBL_LUM_OFFSET 0xa4
+#define JPU_JPEG_HACTBL_LUM_OFFSET 0xc5
+#define JPU_JPEG_HDCTBL_CHR_OFFSET 0x17c
+#define JPU_JPEG_HACTBL_CHR_OFFSET 0x19d
+#define JPU_JPEG_PADDING_OFFSET 0x24f
+#define JPU_JPEG_LUM 0x00
+#define JPU_JPEG_CHR 0x01
+#define JPU_JPEG_DC 0x00
+#define JPU_JPEG_AC 0x10
+
+#define JPU_JPEG_422 0x21
+#define JPU_JPEG_420 0x22
+
+#define JPU_JPEG_DEFAULT_422_PIX_FMT V4L2_PIX_FMT_NV16M
+#define JPU_JPEG_DEFAULT_420_PIX_FMT V4L2_PIX_FMT_NV12M
+
+#define JPU_RESET_TIMEOUT 100 /* ms */
+#define JPU_JOB_TIMEOUT 300 /* ms */
+#define JPU_MAX_QUALITY 4
+#define JPU_WIDTH_MIN 16
+#define JPU_HEIGHT_MIN 16
+#define JPU_WIDTH_MAX 4096
+#define JPU_HEIGHT_MAX 4096
+#define JPU_MEMALIGN 8
+
+/* Flags that indicate a format can be used for capture/output */
+#define JPU_FMT_TYPE_OUTPUT 0
+#define JPU_FMT_TYPE_CAPTURE 1
+#define JPU_ENC_CAPTURE (1 << 0)
+#define JPU_ENC_OUTPUT (1 << 1)
+#define JPU_DEC_CAPTURE (1 << 2)
+#define JPU_DEC_OUTPUT (1 << 3)
+
+/*
+ * JPEG registers and bits
+ */
+
+/* JPEG code mode register */
+#define JCMOD 0x00
+#define JCMOD_PCTR (1 << 7)
+#define JCMOD_MSKIP_ENABLE (1 << 5)
+#define JCMOD_DSP_ENC (0 << 3)
+#define JCMOD_DSP_DEC (1 << 3)
+#define JCMOD_REDU (7 << 0)
+#define JCMOD_REDU_422 (1 << 0)
+#define JCMOD_REDU_420 (2 << 0)
+
+/* JPEG code command register */
+#define JCCMD 0x04
+#define JCCMD_SRST (1 << 12)
+#define JCCMD_JEND (1 << 2)
+#define JCCMD_JSRT (1 << 0)
+
+/* JPEG code quantization table number register */
+#define JCQTN 0x0c
+#define JCQTN_SHIFT(t) (((t) - 1) << 1)
+
+/* JPEG code Huffman table number register */
+#define JCHTN 0x10
+#define JCHTN_AC_SHIFT(t) (((t) << 1) - 1)
+#define JCHTN_DC_SHIFT(t) (((t) - 1) << 1)
+
+#define JCVSZU 0x1c /* JPEG code vertical size upper register */
+#define JCVSZD 0x20 /* JPEG code vertical size lower register */
+#define JCHSZU 0x24 /* JPEG code horizontal size upper register */
+#define JCHSZD 0x28 /* JPEG code horizontal size lower register */
+#define JCSZ_MASK 0xff /* JPEG code h/v size register contains only 1 byte*/
+
+#define JCDTCU 0x2c /* JPEG code data count upper register */
+#define JCDTCM 0x30 /* JPEG code data count middle register */
+#define JCDTCD 0x34 /* JPEG code data count lower register */
+
+/* JPEG interrupt enable register */
+#define JINTE 0x38
+#define JINTE_ERR (7 << 5) /* INT5 + INT6 + INT7 */
+#define JINTE_TRANSF_COMPL (1 << 10)
+
+/* JPEG interrupt status register */
+#define JINTS 0x3c
+#define JINTS_MASK 0x7c68
+#define JINTS_ERR (1 << 5)
+#define JINTS_PROCESS_COMPL (1 << 6)
+#define JINTS_TRANSF_COMPL (1 << 10)
+
+#define JCDERR 0x40 /* JPEG code decode error register */
+#define JCDERR_MASK 0xf /* JPEG code decode error register mask*/
+
+/* JPEG interface encoding */
+#define JIFECNT 0x70
+#define JIFECNT_INFT_422 0
+#define JIFECNT_INFT_420 1
+#define JIFECNT_SWAP_WB (3 << 4) /* to JPU */
+
+#define JIFESYA1 0x74 /* encode source Y address register 1 */
+#define JIFESCA1 0x78 /* encode source C address register 1 */
+#define JIFESYA2 0x7c /* encode source Y address register 2 */
+#define JIFESCA2 0x80 /* encode source C address register 2 */
+#define JIFESMW 0x84 /* encode source memory width register */
+#define JIFESVSZ 0x88 /* encode source vertical size register */
+#define JIFESHSZ 0x8c /* encode source horizontal size register */
+#define JIFEDA1 0x90 /* encode destination address register 1 */
+#define JIFEDA2 0x94 /* encode destination address register 2 */
+
+/* JPEG decoding control register */
+#define JIFDCNT 0xa0
+#define JIFDCNT_SWAP_WB (3 << 1) /* from JPU */
+
+#define JIFDSA1 0xa4 /* decode source address register 1 */
+#define JIFDDMW 0xb0 /* decode destination memory width register */
+#define JIFDDVSZ 0xb4 /* decode destination vert. size register */
+#define JIFDDHSZ 0xb8 /* decode destination horiz. size register */
+#define JIFDDYA1 0xbc /* decode destination Y address register 1 */
+#define JIFDDCA1 0xc0 /* decode destination C address register 1 */
+
+#define JCQTBL(n) (0x10000 + (n) * 0x40) /* quantization tables regs */
+#define JCHTBD(n) (0x10100 + (n) * 0x100) /* Huffman table DC regs */
+#define JCHTBA(n) (0x10120 + (n) * 0x100) /* Huffman table AC regs */
+
+/**
+ * struct jpu - JPEG IP abstraction
+ * @mutex: the mutex protecting this structure
+ * @lock: spinlock protecting the device contexts
+ * @v4l2_dev: v4l2 device for mem2mem mode
+ * @vfd_encoder: video device node for encoder mem2mem mode
+ * @vfd_decoder: video device node for decoder mem2mem mode
+ * @m2m_dev: v4l2 mem2mem device data
+ * @curr: pointer to current context
+ * @regs: JPEG IP registers mapping
+ * @irq: JPEG IP irq
+ * @clk: JPEG IP clock
+ * @dev: JPEG IP struct device
+ * @ref_count: reference counter
+ */
+struct jpu {
+ struct mutex mutex;
+ spinlock_t lock;
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd_encoder;
+ struct video_device vfd_decoder;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct jpu_ctx *curr;
+
+ void __iomem *regs;
+ unsigned int irq;
+ struct clk *clk;
+ struct device *dev;
+ int ref_count;
+};
+
+/**
+ * struct jpu_buffer - driver's specific video buffer
+ * @buf: m2m buffer
+ * @compr_quality: destination image quality in compression mode
+ * @subsampling: source image subsampling in decompression mode
+ */
+struct jpu_buffer {
+ struct v4l2_m2m_buffer buf;
+ unsigned short compr_quality;
+ unsigned char subsampling;
+};
+
+/**
+ * struct jpu_fmt - driver's internal format data
+ * @fourcc: the fourcc code, 0 if not applicable
+ * @colorspace: the colorspace specifier
+ * @bpp: number of bits per pixel per plane
+ * @h_align: horizontal alignment order (align to 2^h_align)
+ * @v_align: vertical alignment order (align to 2^v_align)
+ * @subsampling: (horizontal:4 | vertical:4) subsampling factor
+ * @num_planes: number of planes
+ * @types: types of queue this format is applicable to
+ */
+struct jpu_fmt {
+ u32 fourcc;
+ u32 colorspace;
+ u8 bpp[2];
+ u8 h_align;
+ u8 v_align;
+ u8 subsampling;
+ u8 num_planes;
+ u16 types;
+};
+
+/**
+ * struct jpu_q_data - parameters of one queue
+ * @fmtinfo: driver-specific format of this queue
+ * @format: multiplanar format of this queue
+ * @sequence: sequence number
+ */
+struct jpu_q_data {
+ struct jpu_fmt *fmtinfo;
+ struct v4l2_pix_format_mplane format;
+ unsigned int sequence;
+};
+
+/**
+ * struct jpu_ctx - the device context data
+ * @jpu: JPEG IP device for this context
+ * @encoder: compression (encode) operation or decompression (decode)
+ * @compr_quality: destination image quality in compression (encode) mode
+ * @out_q: source (output) queue information
+ * @cap_q: destination (capture) queue information
+ * @fh: file handler
+ * @ctrl_handler: controls handler
+ */
+struct jpu_ctx {
+ struct jpu *jpu;
+ bool encoder;
+ unsigned short compr_quality;
+ struct jpu_q_data out_q;
+ struct jpu_q_data cap_q;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+ /**
+ * jpeg_buffer - description of memory containing input JPEG data
+ * @end: end position in the buffer
+ * @curr: current position in the buffer
+ */
+struct jpeg_buffer {
+ void *end;
+ void *curr;
+};
+
+static struct jpu_fmt jpu_formats[] = {
+ { V4L2_PIX_FMT_JPEG, V4L2_COLORSPACE_JPEG,
+ {0, 0}, 0, 0, 0, 1, JPU_ENC_CAPTURE | JPU_DEC_OUTPUT },
+ { V4L2_PIX_FMT_NV16M, V4L2_COLORSPACE_SRGB,
+ {8, 8}, 2, 2, JPU_JPEG_422, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+ { V4L2_PIX_FMT_NV12M, V4L2_COLORSPACE_SRGB,
+ {8, 4}, 2, 2, JPU_JPEG_420, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+ { V4L2_PIX_FMT_NV16, V4L2_COLORSPACE_SRGB,
+ {16, 0}, 2, 2, JPU_JPEG_422, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+ { V4L2_PIX_FMT_NV12, V4L2_COLORSPACE_SRGB,
+ {12, 0}, 2, 2, JPU_JPEG_420, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE },
+};
+
+static const u8 zigzag[] = {
+ 0x03, 0x02, 0x0b, 0x13, 0x0a, 0x01, 0x00, 0x09,
+ 0x12, 0x1b, 0x23, 0x1a, 0x11, 0x08, 0x07, 0x06,
+ 0x0f, 0x10, 0x19, 0x22, 0x2b, 0x33, 0x2a, 0x21,
+ 0x18, 0x17, 0x0e, 0x05, 0x04, 0x0d, 0x16, 0x1f,
+ 0x20, 0x29, 0x32, 0x3b, 0x3a, 0x31, 0x28, 0x27,
+ 0x1e, 0x15, 0x0e, 0x14, 0x10, 0x26, 0x2f, 0x30,
+ 0x39, 0x38, 0x37, 0x2e, 0x25, 0x1c, 0x24, 0x2b,
+ 0x36, 0x3f, 0x3e, 0x35, 0x2c, 0x34, 0x3d, 0x3c
+};
+
+#define QTBL_SIZE (ALIGN(JPU_JPEG_QTBL_SIZE, \
+ sizeof(unsigned int)) / sizeof(unsigned int))
+#define HDCTBL_SIZE (ALIGN(JPU_JPEG_HDCTBL_SIZE, \
+ sizeof(unsigned int)) / sizeof(unsigned int))
+#define HACTBL_SIZE (ALIGN(JPU_JPEG_HACTBL_SIZE, \
+ sizeof(unsigned int)) / sizeof(unsigned int))
+/*
+ * Start of image; Quantization tables
+ * SOF0 (17 bytes payload) is Baseline DCT - Sample precision, height, width,
+ * Number of image components, (Ci:8 - Hi:4 - Vi:4 - Tq:8) * 3 - Y,Cb,Cr;
+ * Huffman tables; Padding with 0xff (33.3.27 R01UH0501EJ0100 Rev.1.00)
+ */
+#define JPU_JPEG_HDR_BLOB { \
+ 0xff, JPEG_MARKER_SOI, 0xff, JPEG_MARKER_DQT, 0x00, \
+ JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_LUM, \
+ [JPU_JPEG_QTBL_LUM_OFFSET ... \
+ JPU_JPEG_QTBL_LUM_OFFSET + JPU_JPEG_QTBL_SIZE - 1] = 0x00, \
+ 0xff, JPEG_MARKER_DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_CHR, \
+ [JPU_JPEG_QTBL_CHR_OFFSET ... JPU_JPEG_QTBL_CHR_OFFSET + \
+ JPU_JPEG_QTBL_SIZE - 1] = 0x00, \
+ 0xff, JPEG_MARKER_SOF0, 0x00, 0x11, 0x08, \
+ [JPU_JPEG_HEIGHT_OFFSET ... JPU_JPEG_HEIGHT_OFFSET + 1] = 0x00, \
+ [JPU_JPEG_WIDTH_OFFSET ... JPU_JPEG_WIDTH_OFFSET + 1] = 0x00, \
+ 0x03, 0x01, [JPU_JPEG_SUBS_OFFSET] = 0x00, JPU_JPEG_LUM, \
+ 0x02, 0x11, JPU_JPEG_CHR, 0x03, 0x11, JPU_JPEG_CHR, \
+ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, \
+ JPU_JPEG_LUM | JPU_JPEG_DC, \
+ [JPU_JPEG_HDCTBL_LUM_OFFSET ... \
+ JPU_JPEG_HDCTBL_LUM_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
+ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, \
+ JPU_JPEG_LUM | JPU_JPEG_AC, \
+ [JPU_JPEG_HACTBL_LUM_OFFSET ... \
+ JPU_JPEG_HACTBL_LUM_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
+ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, \
+ JPU_JPEG_CHR | JPU_JPEG_DC, \
+ [JPU_JPEG_HDCTBL_CHR_OFFSET ... \
+ JPU_JPEG_HDCTBL_CHR_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \
+ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, \
+ JPU_JPEG_CHR | JPU_JPEG_AC, \
+ [JPU_JPEG_HACTBL_CHR_OFFSET ... \
+ JPU_JPEG_HACTBL_CHR_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \
+ [JPU_JPEG_PADDING_OFFSET ... JPU_JPEG_HDR_SIZE - 1] = 0xff \
+}
+
+static unsigned char jpeg_hdrs[JPU_MAX_QUALITY][JPU_JPEG_HDR_SIZE] = {
+ [0 ... JPU_MAX_QUALITY - 1] = JPU_JPEG_HDR_BLOB
+};
+
+static const unsigned int qtbl_lum[JPU_MAX_QUALITY][QTBL_SIZE] = {
+ {
+ 0x14101927, 0x322e3e44, 0x10121726, 0x26354144,
+ 0x19171f26, 0x35414444, 0x27262635, 0x41444444,
+ 0x32263541, 0x44444444, 0x2e354144, 0x44444444,
+ 0x3e414444, 0x44444444, 0x44444444, 0x44444444
+ },
+ {
+ 0x100b0b10, 0x171b1f1e, 0x0b0c0c0f, 0x1417171e,
+ 0x0b0c0d10, 0x171a232f, 0x100f1017, 0x1a252f40,
+ 0x1714171a, 0x27334040, 0x1b171a25, 0x33404040,
+ 0x1f17232f, 0x40404040, 0x1e1e2f40, 0x40404040
+ },
+ {
+ 0x0c08080c, 0x11151817, 0x0809090b, 0x0f131217,
+ 0x08090a0c, 0x13141b24, 0x0c0b0c15, 0x141c2435,
+ 0x110f1314, 0x1e27333b, 0x1513141c, 0x27333b3b,
+ 0x18121b24, 0x333b3b3b, 0x17172435, 0x3b3b3b3b
+ },
+ {
+ 0x08060608, 0x0c0e1011, 0x06060608, 0x0a0d0c0f,
+ 0x06060708, 0x0d0e1218, 0x0808080e, 0x0d131823,
+ 0x0c0a0d0d, 0x141a2227, 0x0e0d0e13, 0x1a222727,
+ 0x100c1318, 0x22272727, 0x110f1823, 0x27272727
+ }
+};
+
+static const unsigned int qtbl_chr[JPU_MAX_QUALITY][QTBL_SIZE] = {
+ {
+ 0x15192026, 0x36444444, 0x191c1826, 0x36444444,
+ 0x2018202b, 0x42444444, 0x26262b35, 0x44444444,
+ 0x36424444, 0x44444444, 0x44444444, 0x44444444,
+ 0x44444444, 0x44444444, 0x44444444, 0x44444444
+ },
+ {
+ 0x110f1115, 0x141a2630, 0x0f131211, 0x141a232b,
+ 0x11121416, 0x1a1e2e35, 0x1511161c, 0x1e273540,
+ 0x14141a1e, 0x27304040, 0x1a1a1e27, 0x303f4040,
+ 0x26232e35, 0x40404040, 0x302b3540, 0x40404040
+ },
+ {
+ 0x0d0b0d10, 0x14141d25, 0x0b0e0e0e, 0x10141a20,
+ 0x0d0e0f11, 0x14172328, 0x100e1115, 0x171e2832,
+ 0x14101417, 0x1e25323b, 0x1414171e, 0x25303b3b,
+ 0x1d1a2328, 0x323b3b3b, 0x25202832, 0x3b3b3b3b
+ },
+ {
+ 0x0908090b, 0x0e111318, 0x080a090b, 0x0e0d1116,
+ 0x09090d0e, 0x0d0f171a, 0x0b0b0e0e, 0x0f141a21,
+ 0x0e0e0d0f, 0x14182127, 0x110d0f14, 0x18202727,
+ 0x1311171a, 0x21272727, 0x18161a21, 0x27272727
+ }
+};
+
+static const unsigned int hdctbl_lum[HDCTBL_SIZE] = {
+ 0x00010501, 0x01010101, 0x01000000, 0x00000000,
+ 0x00010203, 0x04050607, 0x08090a0b
+};
+
+static const unsigned int hdctbl_chr[HDCTBL_SIZE] = {
+ 0x00010501, 0x01010101, 0x01000000, 0x00000000,
+ 0x00010203, 0x04050607, 0x08090a0b
+};
+
+static const unsigned int hactbl_lum[HACTBL_SIZE] = {
+ 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
+ 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
+ 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
+ 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
+ 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
+ 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
+ 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
+ 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
+};
+
+static const unsigned int hactbl_chr[HACTBL_SIZE] = {
+ 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512,
+ 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0,
+ 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839,
+ 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869,
+ 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798,
+ 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5,
+ 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea,
+ 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000
+};
+
+static const char *error_to_text[16] = {
+ "Normal",
+ "SOI not detected",
+ "SOF1 to SOFF detected",
+ "Subsampling not detected",
+ "SOF accuracy error",
+ "DQT accuracy error",
+ "Component error 1",
+ "Component error 2",
+ "SOF0, DQT, and DHT not detected when SOS detected",
+ "SOS not detected",
+ "EOI not detected",
+ "Restart interval data number error detected",
+ "Image size error",
+ "Last MCU data number error",
+ "Block data number error",
+ "Unknown"
+};
+
+static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_v4l2_buffer *vb)
+{
+ struct v4l2_m2m_buffer *b =
+ container_of(vb, struct v4l2_m2m_buffer, vb);
+
+ return container_of(b, struct jpu_buffer, buf);
+}
+
+static u32 jpu_read(struct jpu *jpu, unsigned int reg)
+{
+ return ioread32(jpu->regs + reg);
+}
+
+static void jpu_write(struct jpu *jpu, u32 val, unsigned int reg)
+{
+ iowrite32(val, jpu->regs + reg);
+}
+
+static struct jpu_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
+{
+ return container_of(c->handler, struct jpu_ctx, ctrl_handler);
+}
+
+static struct jpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct jpu_ctx, fh);
+}
+
+static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl,
+ unsigned int len) {
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ jpu_write(jpu, tbl[i], reg + (i << 2));
+}
+
+static void jpu_set_qtbl(struct jpu *jpu, unsigned short quality)
+{
+ jpu_set_tbl(jpu, JCQTBL(0), qtbl_lum[quality], QTBL_SIZE);
+ jpu_set_tbl(jpu, JCQTBL(1), qtbl_chr[quality], QTBL_SIZE);
+}
+
+static void jpu_set_htbl(struct jpu *jpu)
+{
+ jpu_set_tbl(jpu, JCHTBD(0), hdctbl_lum, HDCTBL_SIZE);
+ jpu_set_tbl(jpu, JCHTBA(0), hactbl_lum, HACTBL_SIZE);
+ jpu_set_tbl(jpu, JCHTBD(1), hdctbl_chr, HDCTBL_SIZE);
+ jpu_set_tbl(jpu, JCHTBA(1), hactbl_chr, HACTBL_SIZE);
+}
+
+static int jpu_wait_reset(struct jpu *jpu)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(JPU_RESET_TIMEOUT);
+
+ while (jpu_read(jpu, JCCMD) & JCCMD_SRST) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(jpu->dev, "timed out in reset\n");
+ return -ETIMEDOUT;
+ }
+ schedule();
+ }
+
+ return 0;
+}
+
+static int jpu_reset(struct jpu *jpu)
+{
+ jpu_write(jpu, JCCMD_SRST, JCCMD);
+ return jpu_wait_reset(jpu);
+}
+
+/*
+ * ============================================================================
+ * video ioctl operations
+ * ============================================================================
+ */
+static void put_qtbl(u8 *p, const u8 *qtbl)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(zigzag); i++)
+ p[i] = *(qtbl + zigzag[i]);
+}
+
+static void put_htbl(u8 *p, const u8 *htbl, unsigned int len)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < len; i += 4)
+ for (j = 0; j < 4 && (i + j) < len; ++j)
+ p[i + j] = htbl[i + 3 - j];
+}
+
+static void jpu_generate_hdr(unsigned short quality, unsigned char *p)
+{
+ put_qtbl(p + JPU_JPEG_QTBL_LUM_OFFSET, (const u8 *)qtbl_lum[quality]);
+ put_qtbl(p + JPU_JPEG_QTBL_CHR_OFFSET, (const u8 *)qtbl_chr[quality]);
+
+ put_htbl(p + JPU_JPEG_HDCTBL_LUM_OFFSET, (const u8 *)hdctbl_lum,
+ JPU_JPEG_HDCTBL_SIZE);
+ put_htbl(p + JPU_JPEG_HACTBL_LUM_OFFSET, (const u8 *)hactbl_lum,
+ JPU_JPEG_HACTBL_SIZE);
+
+ put_htbl(p + JPU_JPEG_HDCTBL_CHR_OFFSET, (const u8 *)hdctbl_chr,
+ JPU_JPEG_HDCTBL_SIZE);
+ put_htbl(p + JPU_JPEG_HACTBL_CHR_OFFSET, (const u8 *)hactbl_chr,
+ JPU_JPEG_HACTBL_SIZE);
+}
+
+static int get_byte(struct jpeg_buffer *buf)
+{
+ if (buf->curr >= buf->end)
+ return -1;
+
+ return *(u8 *)buf->curr++;
+}
+
+static int get_word_be(struct jpeg_buffer *buf, unsigned int *word)
+{
+ if (buf->end - buf->curr < 2)
+ return -1;
+
+ *word = get_unaligned_be16(buf->curr);
+ buf->curr += 2;
+
+ return 0;
+}
+
+static void skip(struct jpeg_buffer *buf, unsigned long len)
+{
+ buf->curr += min((unsigned long)(buf->end - buf->curr), len);
+}
+
+static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
+ unsigned int *height)
+{
+ struct jpeg_buffer jpeg_buffer;
+ unsigned int word;
+ bool soi = false;
+
+ jpeg_buffer.end = buffer + size;
+ jpeg_buffer.curr = buffer;
+
+ /*
+ * basic size check and EOI - we don't want to let JPU cross
+ * buffer bounds in any case. Hope it's stopping by EOI.
+ */
+ if (size < JPU_JPEG_MIN_SIZE ||
+ *(u8 *)(buffer + size - 1) != JPEG_MARKER_EOI)
+ return 0;
+
+ for (;;) {
+ int c;
+
+ /* skip preceding filler bytes */
+ do
+ c = get_byte(&jpeg_buffer);
+ while (c == 0xff || c == 0);
+
+ if (!soi && c == JPEG_MARKER_SOI) {
+ soi = true;
+ continue;
+ } else if (soi != (c != JPEG_MARKER_SOI))
+ return 0;
+
+ switch (c) {
+ case JPEG_MARKER_SOF0: /* SOF0: baseline JPEG */
+ skip(&jpeg_buffer, 3); /* segment length and bpp */
+ if (get_word_be(&jpeg_buffer, height) ||
+ get_word_be(&jpeg_buffer, width) ||
+ get_byte(&jpeg_buffer) != 3) /* YCbCr only */
+ return 0;
+
+ skip(&jpeg_buffer, 1);
+ return get_byte(&jpeg_buffer);
+ case JPEG_MARKER_DHT:
+ case JPEG_MARKER_DQT:
+ case JPEG_MARKER_COM:
+ case JPEG_MARKER_DRI:
+ case JPEG_MARKER_APP0 ... JPEG_MARKER_APP0 + 0x0f:
+ if (get_word_be(&jpeg_buffer, &word))
+ return 0;
+ skip(&jpeg_buffer, (long)word - 2);
+ break;
+ case 0:
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int jpu_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->encoder)
+ strscpy(cap->card, DRV_NAME " encoder", sizeof(cap->card));
+ else
+ strscpy(cap->card, DRV_NAME " decoder", sizeof(cap->card));
+
+ strscpy(cap->driver, DRV_NAME, sizeof(cap->driver));
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+
+ return 0;
+}
+
+static struct jpu_fmt *jpu_find_format(bool encoder, u32 pixelformat,
+ unsigned int fmt_type)
+{
+ unsigned int i, fmt_flag;
+
+ if (encoder)
+ fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_ENC_OUTPUT :
+ JPU_ENC_CAPTURE;
+ else
+ fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_DEC_OUTPUT :
+ JPU_DEC_CAPTURE;
+
+ for (i = 0; i < ARRAY_SIZE(jpu_formats); i++) {
+ struct jpu_fmt *fmt = &jpu_formats[i];
+
+ if (fmt->fourcc == pixelformat && fmt->types & fmt_flag)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static int jpu_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+ unsigned int i, num = 0;
+
+ for (i = 0; i < ARRAY_SIZE(jpu_formats); ++i) {
+ if (jpu_formats[i].types & type) {
+ if (num == f->index)
+ break;
+ ++num;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(jpu_formats))
+ return -EINVAL;
+
+ f->pixelformat = jpu_formats[i].fourcc;
+
+ return 0;
+}
+
+static int jpu_enum_fmt_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_CAPTURE :
+ JPU_DEC_CAPTURE);
+}
+
+static int jpu_enum_fmt_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_OUTPUT : JPU_DEC_OUTPUT);
+}
+
+static struct jpu_q_data *jpu_get_q_data(struct jpu_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->out_q;
+ else
+ return &ctx->cap_q;
+}
+
+static void jpu_bound_align_image(u32 *w, unsigned int w_min,
+ unsigned int w_max, unsigned int w_align,
+ u32 *h, unsigned int h_min,
+ unsigned int h_max, unsigned int h_align)
+{
+ unsigned int width, height, w_step, h_step;
+
+ width = *w;
+ height = *h;
+
+ w_step = 1U << w_align;
+ h_step = 1U << h_align;
+ v4l_bound_align_image(w, w_min, w_max, w_align, h, h_min, h_max,
+ h_align, 3);
+
+ if (*w < width && *w + w_step < w_max)
+ *w += w_step;
+ if (*h < height && *h + h_step < h_max)
+ *h += h_step;
+}
+
+static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
+ struct v4l2_pix_format_mplane *pix,
+ enum v4l2_buf_type type)
+{
+ struct jpu_fmt *fmt;
+ unsigned int f_type, w, h;
+
+ f_type = V4L2_TYPE_IS_OUTPUT(type) ? JPU_FMT_TYPE_OUTPUT :
+ JPU_FMT_TYPE_CAPTURE;
+
+ fmt = jpu_find_format(ctx->encoder, pix->pixelformat, f_type);
+ if (!fmt) {
+ unsigned int pixelformat;
+
+ dev_dbg(ctx->jpu->dev, "unknown format; set default format\n");
+ if (ctx->encoder)
+ pixelformat = f_type == JPU_FMT_TYPE_OUTPUT ?
+ V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
+ else
+ pixelformat = f_type == JPU_FMT_TYPE_CAPTURE ?
+ V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG;
+ fmt = jpu_find_format(ctx->encoder, pixelformat, f_type);
+ }
+
+ pix->pixelformat = fmt->fourcc;
+ pix->colorspace = fmt->colorspace;
+ pix->field = V4L2_FIELD_NONE;
+ pix->num_planes = fmt->num_planes;
+
+ jpu_bound_align_image(&pix->width, JPU_WIDTH_MIN, JPU_WIDTH_MAX,
+ fmt->h_align, &pix->height, JPU_HEIGHT_MIN,
+ JPU_HEIGHT_MAX, fmt->v_align);
+
+ w = pix->width;
+ h = pix->height;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
+ /* ignore userspaces's sizeimage for encoding */
+ if (pix->plane_fmt[0].sizeimage <= 0 || ctx->encoder)
+ pix->plane_fmt[0].sizeimage = JPU_JPEG_HDR_SIZE +
+ (JPU_JPEG_MAX_BYTES_PER_PIXEL * w * h);
+ pix->plane_fmt[0].bytesperline = 0;
+ } else {
+ unsigned int i, bpl = 0;
+
+ for (i = 0; i < pix->num_planes; ++i)
+ bpl = max(bpl, pix->plane_fmt[i].bytesperline);
+
+ bpl = clamp_t(unsigned int, bpl, w, JPU_WIDTH_MAX);
+ bpl = round_up(bpl, JPU_MEMALIGN);
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ pix->plane_fmt[i].bytesperline = bpl;
+ pix->plane_fmt[i].sizeimage = bpl * h * fmt->bpp[i] / 8;
+ }
+ }
+
+ if (fmtinfo)
+ *fmtinfo = fmt;
+
+ return 0;
+}
+
+static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ return __jpu_try_fmt(ctx, NULL, &f->fmt.pix_mp, f->type);
+}
+
+static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct vb2_queue *vq;
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct jpu_fmt *fmtinfo;
+ struct jpu_q_data *q_data;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->jpu->v4l2_dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = __jpu_try_fmt(ctx, &fmtinfo, &f->fmt.pix_mp, f->type);
+ if (ret < 0)
+ return ret;
+
+ q_data = jpu_get_q_data(ctx, f->type);
+
+ q_data->format = f->fmt.pix_mp;
+ q_data->fmtinfo = fmtinfo;
+
+ return 0;
+}
+
+static int jpu_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct jpu_q_data *q_data;
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+
+ if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+ return -EINVAL;
+
+ q_data = jpu_get_q_data(ctx, f->type);
+ f->fmt.pix_mp = q_data->format;
+
+ return 0;
+}
+
+/*
+ * V4L2 controls
+ */
+static int jpu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct jpu_ctx *ctx = ctrl_to_ctx(ctrl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+ if (ctrl->id == V4L2_CID_JPEG_COMPRESSION_QUALITY)
+ ctx->compr_quality = ctrl->val;
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops jpu_ctrl_ops = {
+ .s_ctrl = jpu_s_ctrl,
+};
+
+static int jpu_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
+{
+ struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_q_data *src_q_data, *dst_q_data, *orig, adj, *ref;
+ enum v4l2_buf_type adj_type;
+
+ src_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ dst_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ if (ctx->encoder) {
+ adj = *src_q_data;
+ orig = src_q_data;
+ ref = dst_q_data;
+ adj_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ } else {
+ adj = *dst_q_data;
+ orig = dst_q_data;
+ ref = src_q_data;
+ adj_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ }
+
+ adj.format.width = ref->format.width;
+ adj.format.height = ref->format.height;
+
+ __jpu_try_fmt(ctx, NULL, &adj.format, adj_type);
+
+ if (adj.format.width != orig->format.width ||
+ adj.format.height != orig->format.height) {
+ dev_err(ctx->jpu->dev, "src and dst formats do not match.\n");
+ /* maybe we can return -EPIPE here? */
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops jpu_ioctl_ops = {
+ .vidioc_querycap = jpu_querycap,
+
+ .vidioc_enum_fmt_vid_cap = jpu_enum_fmt_cap,
+ .vidioc_enum_fmt_vid_out = jpu_enum_fmt_out,
+ .vidioc_g_fmt_vid_cap_mplane = jpu_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = jpu_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = jpu_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = jpu_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = jpu_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = jpu_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = jpu_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe
+};
+
+static int jpu_controls_create(struct jpu_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler, &jpu_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ 0, JPU_MAX_QUALITY - 1, 1, 0);
+
+ if (ctx->ctrl_handler.error) {
+ ret = ctx->ctrl_handler.error;
+ goto error_free;
+ }
+
+ if (!ctx->encoder)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY;
+
+ ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (ret < 0)
+ goto error_free;
+
+ return 0;
+
+error_free:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ret;
+}
+
+/*
+ * ============================================================================
+ * Queue operations
+ * ============================================================================
+ */
+static int jpu_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct jpu_q_data *q_data;
+ unsigned int i;
+
+ q_data = jpu_get_q_data(ctx, vq->type);
+
+ if (*nplanes) {
+ if (*nplanes != q_data->format.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++) {
+ unsigned int q_size = q_data->format.plane_fmt[i].sizeimage;
+
+ if (sizes[i] < q_size)
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ *nplanes = q_data->format.num_planes;
+
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->format.plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static int jpu_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct jpu_q_data *q_data;
+ unsigned int i;
+
+ q_data = jpu_get_q_data(ctx, vb->vb2_queue->type);
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_err(ctx->jpu->dev, "%s field isn't supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < q_data->format.num_planes; i++) {
+ unsigned long size = q_data->format.plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dev_err(ctx->jpu->dev,
+ "%s: data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ /* decoder capture queue */
+ if (!ctx->encoder && V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type))
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void jpu_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
+ struct jpu_q_data *q_data, adjust;
+ void *buffer = vb2_plane_vaddr(vb, 0);
+ unsigned long buf_size = vb2_get_plane_payload(vb, 0);
+ unsigned int width, height;
+
+ u8 subsampling = jpu_parse_hdr(buffer, buf_size, &width,
+ &height);
+
+ /* check if JPEG data basic parsing was successful */
+ if (subsampling != JPU_JPEG_422 && subsampling != JPU_JPEG_420)
+ goto format_error;
+
+ q_data = &ctx->out_q;
+
+ adjust = *q_data;
+ adjust.format.width = width;
+ adjust.format.height = height;
+
+ __jpu_try_fmt(ctx, &adjust.fmtinfo, &adjust.format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ if (adjust.format.width != q_data->format.width ||
+ adjust.format.height != q_data->format.height)
+ goto format_error;
+
+ /*
+ * keep subsampling in buffer to check it
+ * for compatibility in device_run
+ */
+ jpu_buf->subsampling = subsampling;
+ }
+
+ if (ctx->fh.m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+
+ return;
+
+format_error:
+ dev_err(ctx->jpu->dev, "incompatible or corrupted JPEG data\n");
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void jpu_buf_finish(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf);
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct jpu_q_data *q_data = &ctx->out_q;
+ enum v4l2_buf_type type = vb->vb2_queue->type;
+ u8 *buffer;
+
+ if (vb->state == VB2_BUF_STATE_DONE)
+ vbuf->sequence = jpu_get_q_data(ctx, type)->sequence++;
+
+ if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE ||
+ V4L2_TYPE_IS_OUTPUT(type))
+ return;
+
+ buffer = vb2_plane_vaddr(vb, 0);
+
+ memcpy(buffer, jpeg_hdrs[jpu_buf->compr_quality], JPU_JPEG_HDR_SIZE);
+ *(__be16 *)(buffer + JPU_JPEG_HEIGHT_OFFSET) =
+ cpu_to_be16(q_data->format.height);
+ *(__be16 *)(buffer + JPU_JPEG_WIDTH_OFFSET) =
+ cpu_to_be16(q_data->format.width);
+ *(buffer + JPU_JPEG_SUBS_OFFSET) = q_data->fmtinfo->subsampling;
+}
+
+static int jpu_start_streaming(struct vb2_queue *vq, unsigned count)
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct jpu_q_data *q_data = jpu_get_q_data(ctx, vq->type);
+
+ q_data->sequence = 0;
+ return 0;
+}
+
+static void jpu_stop_streaming(struct vb2_queue *vq)
+{
+ struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (vb == NULL)
+ return;
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+ }
+}
+
+static const struct vb2_ops jpu_qops = {
+ .queue_setup = jpu_queue_setup,
+ .buf_prepare = jpu_buf_prepare,
+ .buf_queue = jpu_buf_queue,
+ .buf_finish = jpu_buf_finish,
+ .start_streaming = jpu_start_streaming,
+ .stop_streaming = jpu_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int jpu_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct jpu_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct jpu_buffer);
+ src_vq->ops = &jpu_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->jpu->mutex;
+ src_vq->dev = ctx->jpu->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct jpu_buffer);
+ dst_vq->ops = &jpu_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->jpu->mutex;
+ dst_vq->dev = ctx->jpu->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+/*
+ * ============================================================================
+ * Device file operations
+ * ============================================================================
+ */
+static int jpu_open(struct file *file)
+{
+ struct jpu *jpu = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ struct jpu_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ v4l2_fh_init(&ctx->fh, vfd);
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->jpu = jpu;
+ ctx->encoder = vfd == &jpu->vfd_encoder;
+
+ __jpu_try_fmt(ctx, &ctx->out_q.fmtinfo, &ctx->out_q.format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ __jpu_try_fmt(ctx, &ctx->cap_q.fmtinfo, &ctx->cap_q.format,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpu->m2m_dev, ctx, jpu_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto v4l_prepare_rollback;
+ }
+
+ ret = jpu_controls_create(ctx);
+ if (ret < 0)
+ goto v4l_prepare_rollback;
+
+ if (mutex_lock_interruptible(&jpu->mutex)) {
+ ret = -ERESTARTSYS;
+ goto v4l_prepare_rollback;
+ }
+
+ if (jpu->ref_count == 0) {
+ ret = clk_prepare_enable(jpu->clk);
+ if (ret < 0)
+ goto device_prepare_rollback;
+ /* ...issue software reset */
+ ret = jpu_reset(jpu);
+ if (ret)
+ goto jpu_reset_rollback;
+ }
+
+ jpu->ref_count++;
+
+ mutex_unlock(&jpu->mutex);
+ return 0;
+
+jpu_reset_rollback:
+ clk_disable_unprepare(jpu->clk);
+device_prepare_rollback:
+ mutex_unlock(&jpu->mutex);
+v4l_prepare_rollback:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int jpu_release(struct file *file)
+{
+ struct jpu *jpu = video_drvdata(file);
+ struct jpu_ctx *ctx = fh_to_ctx(file->private_data);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ mutex_lock(&jpu->mutex);
+ if (--jpu->ref_count == 0)
+ clk_disable_unprepare(jpu->clk);
+ mutex_unlock(&jpu->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations jpu_fops = {
+ .owner = THIS_MODULE,
+ .open = jpu_open,
+ .release = jpu_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+/*
+ * ============================================================================
+ * mem2mem callbacks
+ * ============================================================================
+ */
+static void jpu_cleanup(struct jpu_ctx *ctx, bool reset)
+{
+ /* remove current buffers and finish job */
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+
+ /* ...and give it a chance on next run */
+ if (reset)
+ jpu_write(ctx->jpu, JCCMD_SRST, JCCMD);
+
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+
+ v4l2_m2m_job_finish(ctx->jpu->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void jpu_device_run(void *priv)
+{
+ struct jpu_ctx *ctx = priv;
+ struct jpu *jpu = ctx->jpu;
+ struct jpu_buffer *jpu_buf;
+ struct jpu_q_data *q_data;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ unsigned int w, h, bpl;
+ unsigned char num_planes, subsampling;
+ unsigned long flags;
+
+ /* ...wait until module reset completes; we have mutex locked here */
+ if (jpu_wait_reset(jpu)) {
+ jpu_cleanup(ctx, true);
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->jpu->lock, flags);
+
+ jpu->curr = ctx;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ if (ctx->encoder) {
+ jpu_buf = vb2_to_jpu_buffer(dst_buf);
+ q_data = &ctx->out_q;
+ } else {
+ jpu_buf = vb2_to_jpu_buffer(src_buf);
+ q_data = &ctx->cap_q;
+ }
+
+ w = q_data->format.width;
+ h = q_data->format.height;
+ bpl = q_data->format.plane_fmt[0].bytesperline;
+ num_planes = q_data->fmtinfo->num_planes;
+ subsampling = q_data->fmtinfo->subsampling;
+
+ if (ctx->encoder) {
+ unsigned long src_1_addr, src_2_addr, dst_addr;
+ unsigned int redu, inft;
+
+ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ src_1_addr =
+ vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ if (num_planes > 1)
+ src_2_addr = vb2_dma_contig_plane_dma_addr(
+ &src_buf->vb2_buf, 1);
+ else
+ src_2_addr = src_1_addr + w * h;
+
+ jpu_buf->compr_quality = ctx->compr_quality;
+
+ if (subsampling == JPU_JPEG_420) {
+ redu = JCMOD_REDU_420;
+ inft = JIFECNT_INFT_420;
+ } else {
+ redu = JCMOD_REDU_422;
+ inft = JIFECNT_INFT_422;
+ }
+
+ /* only no marker mode works for encoding */
+ jpu_write(jpu, JCMOD_DSP_ENC | JCMOD_PCTR | redu |
+ JCMOD_MSKIP_ENABLE, JCMOD);
+
+ jpu_write(jpu, JIFECNT_SWAP_WB | inft, JIFECNT);
+ jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
+ jpu_write(jpu, JINTE_TRANSF_COMPL, JINTE);
+
+ /* Y and C components source addresses */
+ jpu_write(jpu, src_1_addr, JIFESYA1);
+ jpu_write(jpu, src_2_addr, JIFESCA1);
+
+ /* memory width */
+ jpu_write(jpu, bpl, JIFESMW);
+
+ jpu_write(jpu, (w >> 8) & JCSZ_MASK, JCHSZU);
+ jpu_write(jpu, w & JCSZ_MASK, JCHSZD);
+
+ jpu_write(jpu, (h >> 8) & JCSZ_MASK, JCVSZU);
+ jpu_write(jpu, h & JCSZ_MASK, JCVSZD);
+
+ jpu_write(jpu, w, JIFESHSZ);
+ jpu_write(jpu, h, JIFESVSZ);
+
+ jpu_write(jpu, dst_addr + JPU_JPEG_HDR_SIZE, JIFEDA1);
+
+ jpu_write(jpu, 0 << JCQTN_SHIFT(1) | 1 << JCQTN_SHIFT(2) |
+ 1 << JCQTN_SHIFT(3), JCQTN);
+
+ jpu_write(jpu, 0 << JCHTN_AC_SHIFT(1) | 0 << JCHTN_DC_SHIFT(1) |
+ 1 << JCHTN_AC_SHIFT(2) | 1 << JCHTN_DC_SHIFT(2) |
+ 1 << JCHTN_AC_SHIFT(3) | 1 << JCHTN_DC_SHIFT(3),
+ JCHTN);
+
+ jpu_set_qtbl(jpu, ctx->compr_quality);
+ jpu_set_htbl(jpu);
+ } else {
+ unsigned long src_addr, dst_1_addr, dst_2_addr;
+
+ if (jpu_buf->subsampling != subsampling) {
+ dev_err(ctx->jpu->dev,
+ "src and dst formats do not match.\n");
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+ jpu_cleanup(ctx, false);
+ return;
+ }
+
+ src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ dst_1_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ if (q_data->fmtinfo->num_planes > 1)
+ dst_2_addr = vb2_dma_contig_plane_dma_addr(
+ &dst_buf->vb2_buf, 1);
+ else
+ dst_2_addr = dst_1_addr + w * h;
+
+ /* ...set up decoder operation */
+ jpu_write(jpu, JCMOD_DSP_DEC | JCMOD_PCTR, JCMOD);
+ jpu_write(jpu, JIFECNT_SWAP_WB, JIFECNT);
+ jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT);
+
+ /* ...enable interrupts on transfer completion and d-g error */
+ jpu_write(jpu, JINTE_TRANSF_COMPL | JINTE_ERR, JINTE);
+
+ /* ...set source/destination addresses of encoded data */
+ jpu_write(jpu, src_addr, JIFDSA1);
+ jpu_write(jpu, dst_1_addr, JIFDDYA1);
+ jpu_write(jpu, dst_2_addr, JIFDDCA1);
+
+ jpu_write(jpu, bpl, JIFDDMW);
+ }
+
+ /* ...start encoder/decoder operation */
+ jpu_write(jpu, JCCMD_JSRT, JCCMD);
+
+ spin_unlock_irqrestore(&ctx->jpu->lock, flags);
+}
+
+static const struct v4l2_m2m_ops jpu_m2m_ops = {
+ .device_run = jpu_device_run,
+};
+
+/*
+ * ============================================================================
+ * IRQ handler
+ * ============================================================================
+ */
+static irqreturn_t jpu_irq_handler(int irq, void *dev_id)
+{
+ struct jpu *jpu = dev_id;
+ struct jpu_ctx *curr_ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ unsigned int int_status;
+
+ int_status = jpu_read(jpu, JINTS);
+
+ /* ...spurious interrupt */
+ if (!((JINTS_TRANSF_COMPL | JINTS_PROCESS_COMPL | JINTS_ERR) &
+ int_status))
+ return IRQ_NONE;
+
+ /* ...clear interrupts */
+ jpu_write(jpu, ~(int_status & JINTS_MASK), JINTS);
+ if (int_status & (JINTS_ERR | JINTS_PROCESS_COMPL))
+ jpu_write(jpu, JCCMD_JEND, JCCMD);
+
+ spin_lock(&jpu->lock);
+
+ if ((int_status & JINTS_PROCESS_COMPL) &&
+ !(int_status & JINTS_TRANSF_COMPL))
+ goto handled;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(jpu->m2m_dev);
+ if (!curr_ctx) {
+ /* ...instance is not running */
+ dev_err(jpu->dev, "no active context for m2m\n");
+ goto handled;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ if (int_status & JINTS_TRANSF_COMPL) {
+ if (curr_ctx->encoder) {
+ unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16
+ | jpu_read(jpu, JCDTCM) << 8
+ | jpu_read(jpu, JCDTCD);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ payload_size + JPU_JPEG_HDR_SIZE);
+ }
+
+ dst_buf->field = src_buf->field;
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ if (src_buf->flags & V4L2_BUF_FLAG_TIMECODE)
+ dst_buf->timecode = src_buf->timecode;
+ dst_buf->flags = src_buf->flags &
+ (V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME |
+ V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ } else if (int_status & JINTS_ERR) {
+ unsigned char error = jpu_read(jpu, JCDERR) & JCDERR_MASK;
+
+ dev_dbg(jpu->dev, "processing error: %#X: %s\n", error,
+ error_to_text[error]);
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ jpu->curr = NULL;
+
+ /* ...reset JPU after completion */
+ jpu_write(jpu, JCCMD_SRST, JCCMD);
+ spin_unlock(&jpu->lock);
+
+ v4l2_m2m_job_finish(jpu->m2m_dev, curr_ctx->fh.m2m_ctx);
+
+ return IRQ_HANDLED;
+
+handled:
+ spin_unlock(&jpu->lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * ============================================================================
+ * Driver basic infrastructure
+ * ============================================================================
+ */
+static const struct of_device_id jpu_dt_ids[] = {
+ { .compatible = "renesas,jpu-r8a7790" }, /* H2 */
+ { .compatible = "renesas,jpu-r8a7791" }, /* M2-W */
+ { .compatible = "renesas,jpu-r8a7792" }, /* V2H */
+ { .compatible = "renesas,jpu-r8a7793" }, /* M2-N */
+ { .compatible = "renesas,rcar-gen2-jpu" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, jpu_dt_ids);
+
+static int jpu_probe(struct platform_device *pdev)
+{
+ struct jpu *jpu;
+ int ret;
+ unsigned int i;
+
+ jpu = devm_kzalloc(&pdev->dev, sizeof(*jpu), GFP_KERNEL);
+ if (!jpu)
+ return -ENOMEM;
+
+ mutex_init(&jpu->mutex);
+ spin_lock_init(&jpu->lock);
+ jpu->dev = &pdev->dev;
+
+ /* memory-mapped registers */
+ jpu->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(jpu->regs))
+ return PTR_ERR(jpu->regs);
+
+ /* interrupt service routine registration */
+ jpu->irq = ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, jpu->irq, jpu_irq_handler, 0,
+ dev_name(&pdev->dev), jpu);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpu->irq);
+ return ret;
+ }
+
+ /* clocks */
+ jpu->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(jpu->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ return PTR_ERR(jpu->clk);
+ }
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &jpu->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ return ret;
+ }
+
+ /* mem2mem device */
+ jpu->m2m_dev = v4l2_m2m_init(&jpu_m2m_ops);
+ if (IS_ERR(jpu->m2m_dev)) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(jpu->m2m_dev);
+ goto device_register_rollback;
+ }
+
+ /* fill in quantization and Huffman tables for encoder */
+ for (i = 0; i < JPU_MAX_QUALITY; i++)
+ jpu_generate_hdr(i, (unsigned char *)jpeg_hdrs[i]);
+
+ strscpy(jpu->vfd_encoder.name, DRV_NAME, sizeof(jpu->vfd_encoder.name));
+ jpu->vfd_encoder.fops = &jpu_fops;
+ jpu->vfd_encoder.ioctl_ops = &jpu_ioctl_ops;
+ jpu->vfd_encoder.minor = -1;
+ jpu->vfd_encoder.release = video_device_release_empty;
+ jpu->vfd_encoder.lock = &jpu->mutex;
+ jpu->vfd_encoder.v4l2_dev = &jpu->v4l2_dev;
+ jpu->vfd_encoder.vfl_dir = VFL_DIR_M2M;
+ jpu->vfd_encoder.device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+
+ ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
+ goto m2m_init_rollback;
+ }
+
+ video_set_drvdata(&jpu->vfd_encoder, jpu);
+
+ strscpy(jpu->vfd_decoder.name, DRV_NAME, sizeof(jpu->vfd_decoder.name));
+ jpu->vfd_decoder.fops = &jpu_fops;
+ jpu->vfd_decoder.ioctl_ops = &jpu_ioctl_ops;
+ jpu->vfd_decoder.minor = -1;
+ jpu->vfd_decoder.release = video_device_release_empty;
+ jpu->vfd_decoder.lock = &jpu->mutex;
+ jpu->vfd_decoder.v4l2_dev = &jpu->v4l2_dev;
+ jpu->vfd_decoder.vfl_dir = VFL_DIR_M2M;
+ jpu->vfd_decoder.device_caps = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+
+ ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
+ goto enc_vdev_register_rollback;
+ }
+
+ video_set_drvdata(&jpu->vfd_decoder, jpu);
+ platform_set_drvdata(pdev, jpu);
+
+ v4l2_info(&jpu->v4l2_dev, "encoder device registered as /dev/video%d\n",
+ jpu->vfd_encoder.num);
+ v4l2_info(&jpu->v4l2_dev, "decoder device registered as /dev/video%d\n",
+ jpu->vfd_decoder.num);
+
+ return 0;
+
+enc_vdev_register_rollback:
+ video_unregister_device(&jpu->vfd_encoder);
+
+m2m_init_rollback:
+ v4l2_m2m_release(jpu->m2m_dev);
+
+device_register_rollback:
+ v4l2_device_unregister(&jpu->v4l2_dev);
+
+ return ret;
+}
+
+static void jpu_remove(struct platform_device *pdev)
+{
+ struct jpu *jpu = platform_get_drvdata(pdev);
+
+ video_unregister_device(&jpu->vfd_decoder);
+ video_unregister_device(&jpu->vfd_encoder);
+ v4l2_m2m_release(jpu->m2m_dev);
+ v4l2_device_unregister(&jpu->v4l2_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int jpu_suspend(struct device *dev)
+{
+ struct jpu *jpu = dev_get_drvdata(dev);
+
+ if (jpu->ref_count == 0)
+ return 0;
+
+ clk_disable_unprepare(jpu->clk);
+
+ return 0;
+}
+
+static int jpu_resume(struct device *dev)
+{
+ struct jpu *jpu = dev_get_drvdata(dev);
+
+ if (jpu->ref_count == 0)
+ return 0;
+
+ clk_prepare_enable(jpu->clk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops jpu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(jpu_suspend, jpu_resume)
+};
+
+static struct platform_driver jpu_driver = {
+ .probe = jpu_probe,
+ .remove_new = jpu_remove,
+ .driver = {
+ .of_match_table = jpu_dt_ids,
+ .name = DRV_NAME,
+ .pm = &jpu_pm_ops,
+ },
+};
+
+module_platform_driver(jpu_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Mikhail Ulianov <mikhail.ulyanov@cogentembedded.com>");
+MODULE_DESCRIPTION("Renesas R-Car JPEG processing unit driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/renesas/renesas-ceu.c b/drivers/media/platform/renesas/renesas-ceu.c
new file mode 100644
index 0000000000..ec631c6e2a
--- /dev/null
+++ b/drivers/media/platform/renesas/renesas-ceu.c
@@ -0,0 +1,1737 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Driver for Renesas Capture Engine Unit (CEU) interface
+ * Copyright (C) 2017-2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ *
+ * Based on soc-camera driver "soc_camera/sh_mobile_ceu_camera.c"
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on V4L2 Driver for PXA camera host - "pxa_camera.c",
+ * Copyright (C) 2006, Sascha Hauer, Pengutronix
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <media/drv-intf/renesas-ceu.h>
+
+#define DRIVER_NAME "renesas-ceu"
+
+/* CEU registers offsets and masks. */
+#define CEU_CAPSR 0x00 /* Capture start register */
+#define CEU_CAPCR 0x04 /* Capture control register */
+#define CEU_CAMCR 0x08 /* Capture interface control register */
+#define CEU_CAMOR 0x10 /* Capture interface offset register */
+#define CEU_CAPWR 0x14 /* Capture interface width register */
+#define CEU_CAIFR 0x18 /* Capture interface input format register */
+#define CEU_CRCNTR 0x28 /* CEU register control register */
+#define CEU_CRCMPR 0x2c /* CEU register forcible control register */
+#define CEU_CFLCR 0x30 /* Capture filter control register */
+#define CEU_CFSZR 0x34 /* Capture filter size clip register */
+#define CEU_CDWDR 0x38 /* Capture destination width register */
+#define CEU_CDAYR 0x3c /* Capture data address Y register */
+#define CEU_CDACR 0x40 /* Capture data address C register */
+#define CEU_CFWCR 0x5c /* Firewall operation control register */
+#define CEU_CDOCR 0x64 /* Capture data output control register */
+#define CEU_CEIER 0x70 /* Capture event interrupt enable register */
+#define CEU_CETCR 0x74 /* Capture event flag clear register */
+#define CEU_CSTSR 0x7c /* Capture status register */
+#define CEU_CSRTR 0x80 /* Capture software reset register */
+
+/* Data synchronous fetch mode. */
+#define CEU_CAMCR_JPEG BIT(4)
+
+/* Input components ordering: CEU_CAMCR.DTARY field. */
+#define CEU_CAMCR_DTARY_8_UYVY (0x00 << 8)
+#define CEU_CAMCR_DTARY_8_VYUY (0x01 << 8)
+#define CEU_CAMCR_DTARY_8_YUYV (0x02 << 8)
+#define CEU_CAMCR_DTARY_8_YVYU (0x03 << 8)
+/* TODO: input components ordering for 16 bits input. */
+
+/* Bus transfer MTU. */
+#define CEU_CAPCR_BUS_WIDTH256 (0x3 << 20)
+
+/* Bus width configuration. */
+#define CEU_CAMCR_DTIF_16BITS BIT(12)
+
+/* No downsampling to planar YUV420 in image fetch mode. */
+#define CEU_CDOCR_NO_DOWSAMPLE BIT(4)
+
+/* Swap all input data in 8-bit, 16-bits and 32-bits units (Figure 46.45). */
+#define CEU_CDOCR_SWAP_ENDIANNESS (7)
+
+/* Capture reset and enable bits. */
+#define CEU_CAPSR_CPKIL BIT(16)
+#define CEU_CAPSR_CE BIT(0)
+
+/* CEU operating flag bit. */
+#define CEU_CAPCR_CTNCP BIT(16)
+#define CEU_CSTRST_CPTON BIT(0)
+
+/* Platform specific IRQ source flags. */
+#define CEU_CETCR_ALL_IRQS_RZ 0x397f313
+#define CEU_CETCR_ALL_IRQS_SH4 0x3d7f313
+
+/* Prohibited register access interrupt bit. */
+#define CEU_CETCR_IGRW BIT(4)
+/* One-frame capture end interrupt. */
+#define CEU_CEIER_CPE BIT(0)
+/* VBP error. */
+#define CEU_CEIER_VBP BIT(20)
+#define CEU_CEIER_MASK (CEU_CEIER_CPE | CEU_CEIER_VBP)
+
+#define CEU_MAX_WIDTH 2560
+#define CEU_MAX_HEIGHT 1920
+#define CEU_MAX_BPL 8188
+#define CEU_W_MAX(w) ((w) < CEU_MAX_WIDTH ? (w) : CEU_MAX_WIDTH)
+#define CEU_H_MAX(h) ((h) < CEU_MAX_HEIGHT ? (h) : CEU_MAX_HEIGHT)
+
+/*
+ * ceu_bus_fmt - describe a 8-bits yuyv format the sensor can produce
+ *
+ * @mbus_code: bus format code
+ * @fmt_order: CEU_CAMCR.DTARY ordering of input components (Y, Cb, Cr)
+ * @fmt_order_swap: swapped CEU_CAMCR.DTARY ordering of input components
+ * (Y, Cr, Cb)
+ * @swapped: does Cr appear before Cb?
+ * @bps: number of bits sent over bus for each sample
+ * @bpp: number of bits per pixels unit
+ */
+struct ceu_mbus_fmt {
+ u32 mbus_code;
+ u32 fmt_order;
+ u32 fmt_order_swap;
+ bool swapped;
+ u8 bps;
+ u8 bpp;
+};
+
+/*
+ * ceu_buffer - Link vb2 buffer to the list of available buffers.
+ */
+struct ceu_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head queue;
+};
+
+static inline struct ceu_buffer *vb2_to_ceu(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct ceu_buffer, vb);
+}
+
+/*
+ * ceu_subdev - Wraps v4l2 sub-device and provides async subdevice.
+ */
+struct ceu_subdev {
+ struct v4l2_async_connection asd;
+ struct v4l2_subdev *v4l2_sd;
+
+ /* per-subdevice mbus configuration options */
+ unsigned int mbus_flags;
+ struct ceu_mbus_fmt mbus_fmt;
+};
+
+static struct ceu_subdev *to_ceu_subdev(struct v4l2_async_connection *asd)
+{
+ return container_of(asd, struct ceu_subdev, asd);
+}
+
+/*
+ * ceu_device - CEU device instance
+ */
+struct ceu_device {
+ struct device *dev;
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+
+ /* subdevices descriptors */
+ struct ceu_subdev **subdevs;
+ /* the subdevice currently in use */
+ struct ceu_subdev *sd;
+ unsigned int sd_index;
+ unsigned int num_sd;
+
+ /* platform specific mask with all IRQ sources flagged */
+ u32 irq_mask;
+
+ /* currently configured field and pixel format */
+ enum v4l2_field field;
+ struct v4l2_pix_format_mplane v4l2_pix;
+
+ /* async subdev notification helpers */
+ struct v4l2_async_notifier notifier;
+
+ /* vb2 queue, capture buffer list and active buffer pointer */
+ struct vb2_queue vb2_vq;
+ struct list_head capture;
+ struct vb2_v4l2_buffer *active;
+ unsigned int sequence;
+
+ /* mlock - lock access to interface reset and vb2 queue */
+ struct mutex mlock;
+
+ /* lock - lock access to capture buffer queue and active buffer */
+ spinlock_t lock;
+
+ /* base - CEU memory base address */
+ void __iomem *base;
+};
+
+static inline struct ceu_device *v4l2_to_ceu(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct ceu_device, v4l2_dev);
+}
+
+/* --- CEU memory output formats --- */
+
+/*
+ * ceu_fmt - describe a memory output format supported by CEU interface.
+ *
+ * @fourcc: memory layout fourcc format code
+ * @bpp: number of bits for each pixel stored in memory
+ */
+struct ceu_fmt {
+ u32 fourcc;
+ u32 bpp;
+};
+
+/*
+ * ceu_format_list - List of supported memory output formats
+ *
+ * If sensor provides any YUYV bus format, all the following planar memory
+ * formats are available thanks to CEU re-ordering and sub-sampling
+ * capabilities.
+ */
+static const struct ceu_fmt ceu_fmt_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .bpp = 12,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .bpp = 12,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .bpp = 16,
+ },
+};
+
+static const struct ceu_fmt *get_ceu_fmt_from_fourcc(unsigned int fourcc)
+{
+ const struct ceu_fmt *fmt = &ceu_fmt_list[0];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ceu_fmt_list); i++, fmt++)
+ if (fmt->fourcc == fourcc)
+ return fmt;
+
+ return NULL;
+}
+
+static bool ceu_fmt_mplane(struct v4l2_pix_format_mplane *pix)
+{
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ return false;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* --- CEU HW operations --- */
+
+static void ceu_write(struct ceu_device *priv, unsigned int reg_offs, u32 data)
+{
+ iowrite32(data, priv->base + reg_offs);
+}
+
+static u32 ceu_read(struct ceu_device *priv, unsigned int reg_offs)
+{
+ return ioread32(priv->base + reg_offs);
+}
+
+/*
+ * ceu_soft_reset() - Software reset the CEU interface.
+ * @ceu_device: CEU device.
+ *
+ * Returns 0 for success, -EIO for error.
+ */
+static int ceu_soft_reset(struct ceu_device *ceudev)
+{
+ unsigned int i;
+
+ ceu_write(ceudev, CEU_CAPSR, CEU_CAPSR_CPKIL);
+
+ for (i = 0; i < 100; i++) {
+ if (!(ceu_read(ceudev, CEU_CSTSR) & CEU_CSTRST_CPTON))
+ break;
+ udelay(1);
+ }
+
+ if (i == 100) {
+ dev_err(ceudev->dev, "soft reset time out\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < 100; i++) {
+ if (!(ceu_read(ceudev, CEU_CAPSR) & CEU_CAPSR_CPKIL))
+ return 0;
+ udelay(1);
+ }
+
+ /* If we get here, CEU has not reset properly. */
+ return -EIO;
+}
+
+/* --- CEU Capture Operations --- */
+
+/*
+ * ceu_hw_config() - Configure CEU interface registers.
+ */
+static int ceu_hw_config(struct ceu_device *ceudev)
+{
+ u32 camcr, cdocr, cfzsr, cdwdr, capwr;
+ struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix;
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ struct ceu_mbus_fmt *mbus_fmt = &ceu_sd->mbus_fmt;
+ unsigned int mbus_flags = ceu_sd->mbus_flags;
+
+ /* Start configuring CEU registers */
+ ceu_write(ceudev, CEU_CAIFR, 0);
+ ceu_write(ceudev, CEU_CFWCR, 0);
+ ceu_write(ceudev, CEU_CRCNTR, 0);
+ ceu_write(ceudev, CEU_CRCMPR, 0);
+
+ /* Set the frame capture period for both image capture and data sync. */
+ capwr = (pix->height << 16) | pix->width * mbus_fmt->bpp / 8;
+
+ /*
+ * Swap input data endianness by default.
+ * In data fetch mode bytes are received in chunks of 8 bytes.
+ * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
+ * The data is however by default written to memory in reverse order:
+ * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
+ *
+ * Use CEU_CDOCR[2:0] to swap data ordering.
+ */
+ cdocr = CEU_CDOCR_SWAP_ENDIANNESS;
+
+ /*
+ * Configure CAMCR and CDOCR:
+ * match input components ordering with memory output format and
+ * handle downsampling to YUV420.
+ *
+ * If the memory output planar format is 'swapped' (Cr before Cb) and
+ * input format is not, use the swapped version of CAMCR.DTARY.
+ *
+ * If the memory output planar format is not 'swapped' (Cb before Cr)
+ * and input format is, use the swapped version of CAMCR.DTARY.
+ *
+ * CEU by default downsample to planar YUV420 (CDCOR[4] = 0).
+ * If output is planar YUV422 set CDOCR[4] = 1
+ *
+ * No downsample for data fetch sync mode.
+ */
+ switch (pix->pixelformat) {
+ /* Data fetch sync mode */
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ camcr = CEU_CAMCR_JPEG;
+ cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
+ cfzsr = (pix->height << 16) | pix->width;
+ cdwdr = pix->plane_fmt[0].bytesperline;
+ break;
+
+ /* Non-swapped planar image capture mode. */
+ case V4L2_PIX_FMT_NV16:
+ cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
+ fallthrough;
+ case V4L2_PIX_FMT_NV12:
+ if (mbus_fmt->swapped)
+ camcr = mbus_fmt->fmt_order_swap;
+ else
+ camcr = mbus_fmt->fmt_order;
+
+ cfzsr = (pix->height << 16) | pix->width;
+ cdwdr = pix->width;
+ break;
+
+ /* Swapped planar image capture mode. */
+ case V4L2_PIX_FMT_NV61:
+ cdocr |= CEU_CDOCR_NO_DOWSAMPLE;
+ fallthrough;
+ case V4L2_PIX_FMT_NV21:
+ if (mbus_fmt->swapped)
+ camcr = mbus_fmt->fmt_order;
+ else
+ camcr = mbus_fmt->fmt_order_swap;
+
+ cfzsr = (pix->height << 16) | pix->width;
+ cdwdr = pix->width;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ camcr |= mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
+ camcr |= mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
+
+ /* TODO: handle 16 bit bus width with DTIF bit in CAMCR */
+ ceu_write(ceudev, CEU_CAMCR, camcr);
+ ceu_write(ceudev, CEU_CDOCR, cdocr);
+ ceu_write(ceudev, CEU_CAPCR, CEU_CAPCR_BUS_WIDTH256);
+
+ /*
+ * TODO: make CAMOR offsets configurable.
+ * CAMOR wants to know the number of blanks between a VS/HS signal
+ * and valid data. This value should actually come from the sensor...
+ */
+ ceu_write(ceudev, CEU_CAMOR, 0);
+
+ /* TODO: 16 bit bus width require re-calculation of cdwdr and cfzsr */
+ ceu_write(ceudev, CEU_CAPWR, capwr);
+ ceu_write(ceudev, CEU_CFSZR, cfzsr);
+ ceu_write(ceudev, CEU_CDWDR, cdwdr);
+
+ return 0;
+}
+
+/*
+ * ceu_capture() - Trigger start of a capture sequence.
+ *
+ * Program the CEU DMA registers with addresses where to transfer image data.
+ */
+static int ceu_capture(struct ceu_device *ceudev)
+{
+ struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix;
+ dma_addr_t phys_addr_top;
+
+ phys_addr_top =
+ vb2_dma_contig_plane_dma_addr(&ceudev->active->vb2_buf, 0);
+ ceu_write(ceudev, CEU_CDAYR, phys_addr_top);
+
+ /* Ignore CbCr plane for non multi-planar image formats. */
+ if (ceu_fmt_mplane(pix)) {
+ phys_addr_top =
+ vb2_dma_contig_plane_dma_addr(&ceudev->active->vb2_buf,
+ 1);
+ ceu_write(ceudev, CEU_CDACR, phys_addr_top);
+ }
+
+ /*
+ * Trigger new capture start: once for each frame, as we work in
+ * one-frame capture mode.
+ */
+ ceu_write(ceudev, CEU_CAPSR, CEU_CAPSR_CE);
+
+ return 0;
+}
+
+static irqreturn_t ceu_irq(int irq, void *data)
+{
+ struct ceu_device *ceudev = data;
+ struct vb2_v4l2_buffer *vbuf;
+ struct ceu_buffer *buf;
+ u32 status;
+
+ /* Clean interrupt status. */
+ status = ceu_read(ceudev, CEU_CETCR);
+ ceu_write(ceudev, CEU_CETCR, ~ceudev->irq_mask);
+
+ /* Unexpected interrupt. */
+ if (!(status & CEU_CEIER_MASK))
+ return IRQ_NONE;
+
+ spin_lock(&ceudev->lock);
+
+ /* Stale interrupt from a released buffer, ignore it. */
+ vbuf = ceudev->active;
+ if (!vbuf) {
+ spin_unlock(&ceudev->lock);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * When a VBP interrupt occurs, no capture end interrupt will occur
+ * and the image of that frame is not captured correctly.
+ */
+ if (status & CEU_CEIER_VBP) {
+ dev_err(ceudev->dev, "VBP interrupt: abort capture\n");
+ goto error_irq_out;
+ }
+
+ /* Prepare to return the 'previous' buffer. */
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vbuf->sequence = ceudev->sequence++;
+ vbuf->field = ceudev->field;
+
+ /* Prepare a new 'active' buffer and trigger a new capture. */
+ if (!list_empty(&ceudev->capture)) {
+ buf = list_first_entry(&ceudev->capture, struct ceu_buffer,
+ queue);
+ list_del(&buf->queue);
+ ceudev->active = &buf->vb;
+
+ ceu_capture(ceudev);
+ }
+
+ /* Return the 'previous' buffer. */
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+
+ spin_unlock(&ceudev->lock);
+
+ return IRQ_HANDLED;
+
+error_irq_out:
+ /* Return the 'previous' buffer and all queued ones. */
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_ERROR);
+
+ list_for_each_entry(buf, &ceudev->capture, queue)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+
+ spin_unlock(&ceudev->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* --- CEU Videobuf2 operations --- */
+
+static void ceu_update_plane_sizes(struct v4l2_plane_pix_format *plane,
+ unsigned int bpl, unsigned int szimage)
+{
+ memset(plane, 0, sizeof(*plane));
+
+ plane->sizeimage = szimage;
+ if (plane->bytesperline < bpl || plane->bytesperline > CEU_MAX_BPL)
+ plane->bytesperline = bpl;
+}
+
+/*
+ * ceu_calc_plane_sizes() - Fill per-plane 'struct v4l2_plane_pix_format'
+ * information according to the currently configured
+ * pixel format.
+ * @ceu_device: CEU device.
+ * @ceu_fmt: Active image format.
+ * @pix: Pixel format information (store line width and image sizes)
+ */
+static void ceu_calc_plane_sizes(struct ceu_device *ceudev,
+ const struct ceu_fmt *ceu_fmt,
+ struct v4l2_pix_format_mplane *pix)
+{
+ unsigned int bpl, szimage;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ pix->num_planes = 1;
+ bpl = pix->width * ceu_fmt->bpp / 8;
+ szimage = pix->height * bpl;
+ ceu_update_plane_sizes(&pix->plane_fmt[0], bpl, szimage);
+ break;
+
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ pix->num_planes = 2;
+ bpl = pix->width;
+ szimage = pix->height * pix->width;
+ ceu_update_plane_sizes(&pix->plane_fmt[0], bpl, szimage);
+ ceu_update_plane_sizes(&pix->plane_fmt[1], bpl, szimage / 2);
+ break;
+
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ default:
+ pix->num_planes = 2;
+ bpl = pix->width;
+ szimage = pix->height * pix->width;
+ ceu_update_plane_sizes(&pix->plane_fmt[0], bpl, szimage);
+ ceu_update_plane_sizes(&pix->plane_fmt[1], bpl, szimage);
+ break;
+ }
+}
+
+/*
+ * ceu_vb2_setup() - is called to check whether the driver can accept the
+ * requested number of buffers and to fill in plane sizes
+ * for the current frame format, if required.
+ */
+static int ceu_vb2_setup(struct vb2_queue *vq, unsigned int *count,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix;
+ unsigned int i;
+
+ /* num_planes is set: just check plane sizes. */
+ if (*num_planes) {
+ for (i = 0; i < pix->num_planes; i++)
+ if (sizes[i] < pix->plane_fmt[i].sizeimage)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ /* num_planes not set: called from REQBUFS, just set plane sizes. */
+ *num_planes = pix->num_planes;
+ for (i = 0; i < pix->num_planes; i++)
+ sizes[i] = pix->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static void ceu_vb2_queue(struct vb2_buffer *vb)
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct ceu_buffer *buf = vb2_to_ceu(vbuf);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&ceudev->lock, irqflags);
+ list_add_tail(&buf->queue, &ceudev->capture);
+ spin_unlock_irqrestore(&ceudev->lock, irqflags);
+}
+
+static int ceu_vb2_prepare(struct vb2_buffer *vb)
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix;
+ unsigned int i;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) {
+ dev_err(ceudev->dev,
+ "Plane size too small (%lu < %u)\n",
+ vb2_plane_size(vb, i),
+ pix->plane_fmt[i].sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage);
+ }
+
+ return 0;
+}
+
+static int ceu_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vq);
+ struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
+ struct ceu_buffer *buf;
+ unsigned long irqflags;
+ int ret;
+
+ /* Program the CEU interface according to the CEU image format. */
+ ret = ceu_hw_config(ceudev);
+ if (ret)
+ goto error_return_bufs;
+
+ ret = v4l2_subdev_call(v4l2_sd, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ dev_dbg(ceudev->dev,
+ "Subdevice failed to start streaming: %d\n", ret);
+ goto error_return_bufs;
+ }
+
+ spin_lock_irqsave(&ceudev->lock, irqflags);
+ ceudev->sequence = 0;
+
+ /* Grab the first available buffer and trigger the first capture. */
+ buf = list_first_entry(&ceudev->capture, struct ceu_buffer,
+ queue);
+
+ list_del(&buf->queue);
+ ceudev->active = &buf->vb;
+
+ /* Clean and program interrupts for first capture. */
+ ceu_write(ceudev, CEU_CETCR, ~ceudev->irq_mask);
+ ceu_write(ceudev, CEU_CEIER, CEU_CEIER_MASK);
+
+ ceu_capture(ceudev);
+
+ spin_unlock_irqrestore(&ceudev->lock, irqflags);
+
+ return 0;
+
+error_return_bufs:
+ spin_lock_irqsave(&ceudev->lock, irqflags);
+ list_for_each_entry(buf, &ceudev->capture, queue)
+ vb2_buffer_done(&ceudev->active->vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ ceudev->active = NULL;
+ spin_unlock_irqrestore(&ceudev->lock, irqflags);
+
+ return ret;
+}
+
+static void ceu_stop_streaming(struct vb2_queue *vq)
+{
+ struct ceu_device *ceudev = vb2_get_drv_priv(vq);
+ struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
+ struct ceu_buffer *buf;
+ unsigned long irqflags;
+
+ /* Clean and disable interrupt sources. */
+ ceu_write(ceudev, CEU_CETCR,
+ ceu_read(ceudev, CEU_CETCR) & ceudev->irq_mask);
+ ceu_write(ceudev, CEU_CEIER, CEU_CEIER_MASK);
+
+ v4l2_subdev_call(v4l2_sd, video, s_stream, 0);
+
+ spin_lock_irqsave(&ceudev->lock, irqflags);
+ if (ceudev->active) {
+ vb2_buffer_done(&ceudev->active->vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ ceudev->active = NULL;
+ }
+
+ /* Release all queued buffers. */
+ list_for_each_entry(buf, &ceudev->capture, queue)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&ceudev->capture);
+
+ spin_unlock_irqrestore(&ceudev->lock, irqflags);
+
+ ceu_soft_reset(ceudev);
+}
+
+static const struct vb2_ops ceu_vb2_ops = {
+ .queue_setup = ceu_vb2_setup,
+ .buf_queue = ceu_vb2_queue,
+ .buf_prepare = ceu_vb2_prepare,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = ceu_start_streaming,
+ .stop_streaming = ceu_stop_streaming,
+};
+
+/* --- CEU image formats handling --- */
+
+/*
+ * __ceu_try_fmt() - test format on CEU and sensor
+ * @ceudev: The CEU device.
+ * @v4l2_fmt: format to test.
+ * @sd_mbus_code: the media bus code accepted by the subdevice; output param.
+ *
+ * Returns 0 for success, < 0 for errors.
+ */
+static int __ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt,
+ u32 *sd_mbus_code)
+{
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ struct v4l2_pix_format_mplane *pix = &v4l2_fmt->fmt.pix_mp;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_state pad_state = {
+ .pads = &pad_cfg,
+ };
+ const struct ceu_fmt *ceu_fmt;
+ u32 mbus_code_old;
+ u32 mbus_code;
+ int ret;
+
+ /*
+ * Set format on sensor sub device: bus format used to produce memory
+ * format is selected depending on YUV component ordering or
+ * at initialization time.
+ */
+ struct v4l2_subdev_format sd_format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+
+ mbus_code_old = ceu_sd->mbus_fmt.mbus_code;
+
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mbus_code = MEDIA_BUS_FMT_UYVY8_2X8;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ mbus_code = MEDIA_BUS_FMT_YVYU8_2X8;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ mbus_code = MEDIA_BUS_FMT_VYUY8_2X8;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ mbus_code = ceu_sd->mbus_fmt.mbus_code;
+ break;
+
+ default:
+ pix->pixelformat = V4L2_PIX_FMT_NV16;
+ mbus_code = ceu_sd->mbus_fmt.mbus_code;
+ break;
+ }
+
+ ceu_fmt = get_ceu_fmt_from_fourcc(pix->pixelformat);
+
+ /* CFSZR requires height and width to be 4-pixel aligned. */
+ v4l_bound_align_image(&pix->width, 2, CEU_MAX_WIDTH, 4,
+ &pix->height, 4, CEU_MAX_HEIGHT, 4, 0);
+
+ v4l2_fill_mbus_format_mplane(&sd_format.format, pix);
+
+ /*
+ * Try with the mbus_code matching YUYV components ordering first,
+ * if that one fails, fallback to default selected at initialization
+ * time.
+ */
+ sd_format.format.code = mbus_code;
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_state, &sd_format);
+ if (ret) {
+ if (ret == -EINVAL) {
+ /* fallback */
+ sd_format.format.code = mbus_code_old;
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt,
+ &pad_state, &sd_format);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ /* Apply size returned by sensor as the CEU can't scale. */
+ v4l2_fill_pix_format_mplane(pix, &sd_format.format);
+
+ /* Calculate per-plane sizes based on image format. */
+ ceu_calc_plane_sizes(ceudev, ceu_fmt, pix);
+
+ /* Report to caller the configured mbus format. */
+ *sd_mbus_code = sd_format.format.code;
+
+ return 0;
+}
+
+/*
+ * ceu_try_fmt() - Wrapper for __ceu_try_fmt; discard configured mbus_fmt
+ */
+static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
+{
+ u32 mbus_code;
+
+ return __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code);
+}
+
+/*
+ * ceu_set_fmt() - Apply the supplied format to both sensor and CEU
+ */
+static int ceu_set_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt)
+{
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ u32 mbus_code;
+ int ret;
+
+ /*
+ * Set format on sensor sub device: bus format used to produce memory
+ * format is selected at initialization time.
+ */
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ ret = __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code);
+ if (ret)
+ return ret;
+
+ format.format.code = mbus_code;
+ v4l2_fill_mbus_format_mplane(&format.format, &v4l2_fmt->fmt.pix_mp);
+ ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, NULL, &format);
+ if (ret)
+ return ret;
+
+ ceudev->v4l2_pix = v4l2_fmt->fmt.pix_mp;
+ ceudev->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/*
+ * ceu_set_default_fmt() - Apply default NV16 memory output format with VGA
+ * sizes.
+ */
+static int ceu_set_default_fmt(struct ceu_device *ceudev)
+{
+ int ret;
+
+ struct v4l2_format v4l2_fmt = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ .fmt.pix_mp = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .num_planes = 2,
+ .plane_fmt = {
+ [0] = {
+ .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
+ .bytesperline = VGA_WIDTH * 2,
+ },
+ [1] = {
+ .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2,
+ .bytesperline = VGA_WIDTH * 2,
+ },
+ },
+ },
+ };
+
+ ret = ceu_try_fmt(ceudev, &v4l2_fmt);
+ if (ret)
+ return ret;
+
+ ceudev->v4l2_pix = v4l2_fmt.fmt.pix_mp;
+ ceudev->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/*
+ * ceu_init_mbus_fmt() - Query sensor for supported formats and initialize
+ * CEU media bus format used to produce memory formats.
+ *
+ * Find out if sensor can produce a permutation of 8-bits YUYV bus format.
+ * From a single 8-bits YUYV bus format the CEU can produce several memory
+ * output formats:
+ * - NV[12|21|16|61] through image fetch mode;
+ * - YUYV422 if sensor provides YUYV422
+ *
+ * TODO: Other YUYV422 permutations through data fetch sync mode and DTARY
+ * TODO: Binary data (eg. JPEG) and raw formats through data fetch sync mode
+ */
+static int ceu_init_mbus_fmt(struct ceu_device *ceudev)
+{
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ struct ceu_mbus_fmt *mbus_fmt = &ceu_sd->mbus_fmt;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ bool yuyv_bus_fmt = false;
+
+ struct v4l2_subdev_mbus_code_enum sd_mbus_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .index = 0,
+ };
+
+ /* Find out if sensor can produce any permutation of 8-bits YUYV422. */
+ while (!yuyv_bus_fmt &&
+ !v4l2_subdev_call(v4l2_sd, pad, enum_mbus_code,
+ NULL, &sd_mbus_fmt)) {
+ switch (sd_mbus_fmt.code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ yuyv_bus_fmt = true;
+ break;
+ default:
+ /*
+ * Only support 8-bits YUYV bus formats at the moment;
+ *
+ * TODO: add support for binary formats (data sync
+ * fetch mode).
+ */
+ break;
+ }
+
+ sd_mbus_fmt.index++;
+ }
+
+ if (!yuyv_bus_fmt)
+ return -ENXIO;
+
+ /*
+ * Save the first encountered YUYV format as "mbus_fmt" and use it
+ * to output all planar YUV422 and YUV420 (NV*) formats to memory as
+ * well as for data synch fetch mode (YUYV - YVYU etc. ).
+ */
+ mbus_fmt->mbus_code = sd_mbus_fmt.code;
+ mbus_fmt->bps = 8;
+
+ /* Annotate the selected bus format components ordering. */
+ switch (sd_mbus_fmt.code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_YUYV;
+ mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_YVYU;
+ mbus_fmt->swapped = false;
+ mbus_fmt->bpp = 16;
+ break;
+
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_YVYU;
+ mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_YUYV;
+ mbus_fmt->swapped = true;
+ mbus_fmt->bpp = 16;
+ break;
+
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_UYVY;
+ mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_VYUY;
+ mbus_fmt->swapped = false;
+ mbus_fmt->bpp = 16;
+ break;
+
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_VYUY;
+ mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_UYVY;
+ mbus_fmt->swapped = true;
+ mbus_fmt->bpp = 16;
+ break;
+ }
+
+ return 0;
+}
+
+/* --- Runtime PM Handlers --- */
+
+/*
+ * ceu_runtime_resume() - soft-reset the interface and turn sensor power on.
+ */
+static int __maybe_unused ceu_runtime_resume(struct device *dev)
+{
+ struct ceu_device *ceudev = dev_get_drvdata(dev);
+ struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
+
+ v4l2_subdev_call(v4l2_sd, core, s_power, 1);
+
+ ceu_soft_reset(ceudev);
+
+ return 0;
+}
+
+/*
+ * ceu_runtime_suspend() - disable capture and interrupts and soft-reset.
+ * Turn sensor power off.
+ */
+static int __maybe_unused ceu_runtime_suspend(struct device *dev)
+{
+ struct ceu_device *ceudev = dev_get_drvdata(dev);
+ struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
+
+ v4l2_subdev_call(v4l2_sd, core, s_power, 0);
+
+ ceu_write(ceudev, CEU_CEIER, 0);
+ ceu_soft_reset(ceudev);
+
+ return 0;
+}
+
+/* --- File Operations --- */
+
+static int ceu_open(struct file *file)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ int ret;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ceudev->mlock);
+ /* Causes soft-reset and sensor power on on first open */
+ ret = pm_runtime_resume_and_get(ceudev->dev);
+ mutex_unlock(&ceudev->mlock);
+
+ return ret;
+}
+
+static int ceu_release(struct file *file)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ vb2_fop_release(file);
+
+ mutex_lock(&ceudev->mlock);
+ /* Causes soft-reset and sensor power down on last close */
+ pm_runtime_put(ceudev->dev);
+ mutex_unlock(&ceudev->mlock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations ceu_fops = {
+ .owner = THIS_MODULE,
+ .open = ceu_open,
+ .release = ceu_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+};
+
+/* --- Video Device IOCTLs --- */
+
+static int ceu_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ strscpy(cap->card, "Renesas CEU", sizeof(cap->card));
+ strscpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:renesas-ceu-%s", dev_name(ceudev->dev));
+
+ return 0;
+}
+
+static int ceu_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ const struct ceu_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(ceu_fmt_list))
+ return -EINVAL;
+
+ fmt = &ceu_fmt_list[f->index];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int ceu_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ return ceu_try_fmt(ceudev, f);
+}
+
+static int ceu_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ if (vb2_is_streaming(&ceudev->vb2_vq))
+ return -EBUSY;
+
+ return ceu_set_fmt(ceudev, f);
+}
+
+static int ceu_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ f->fmt.pix_mp = ceudev->v4l2_pix;
+
+ return 0;
+}
+
+static int ceu_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ struct ceu_subdev *ceusd;
+
+ if (inp->index >= ceudev->num_sd)
+ return -EINVAL;
+
+ ceusd = ceudev->subdevs[inp->index];
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = 0;
+ snprintf(inp->name, sizeof(inp->name), "Camera%u: %s",
+ inp->index, ceusd->v4l2_sd->name);
+
+ return 0;
+}
+
+static int ceu_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ *i = ceudev->sd_index;
+
+ return 0;
+}
+
+static int ceu_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ struct ceu_subdev *ceu_sd_old;
+ int ret;
+
+ if (i >= ceudev->num_sd)
+ return -EINVAL;
+
+ if (vb2_is_streaming(&ceudev->vb2_vq))
+ return -EBUSY;
+
+ if (i == ceudev->sd_index)
+ return 0;
+
+ ceu_sd_old = ceudev->sd;
+ ceudev->sd = ceudev->subdevs[i];
+
+ /*
+ * Make sure we can generate output image formats and apply
+ * default one.
+ */
+ ret = ceu_init_mbus_fmt(ceudev);
+ if (ret) {
+ ceudev->sd = ceu_sd_old;
+ return -EINVAL;
+ }
+
+ ret = ceu_set_default_fmt(ceudev);
+ if (ret) {
+ ceudev->sd = ceu_sd_old;
+ return -EINVAL;
+ }
+
+ /* Now that we're sure we can use the sensor, power off the old one. */
+ v4l2_subdev_call(ceu_sd_old->v4l2_sd, core, s_power, 0);
+ v4l2_subdev_call(ceudev->sd->v4l2_sd, core, s_power, 1);
+
+ ceudev->sd_index = i;
+
+ return 0;
+}
+
+static int ceu_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ return v4l2_g_parm_cap(video_devdata(file), ceudev->sd->v4l2_sd, a);
+}
+
+static int ceu_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+
+ return v4l2_s_parm_cap(video_devdata(file), ceudev->sd->v4l2_sd, a);
+}
+
+static int ceu_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ const struct ceu_fmt *ceu_fmt;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ int ret;
+
+ struct v4l2_subdev_frame_size_enum fse = {
+ .code = ceu_sd->mbus_fmt.mbus_code,
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ /* Just check if user supplied pixel format is supported. */
+ ceu_fmt = get_ceu_fmt_from_fourcc(fsize->pixel_format);
+ if (!ceu_fmt)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(v4l2_sd, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = CEU_W_MAX(fse.max_width);
+ fsize->discrete.height = CEU_H_MAX(fse.max_height);
+
+ return 0;
+}
+
+static int ceu_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct ceu_device *ceudev = video_drvdata(file);
+ struct ceu_subdev *ceu_sd = ceudev->sd;
+ const struct ceu_fmt *ceu_fmt;
+ struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd;
+ int ret;
+
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .code = ceu_sd->mbus_fmt.mbus_code,
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ /* Just check if user supplied pixel format is supported. */
+ ceu_fmt = get_ceu_fmt_from_fourcc(fival->pixel_format);
+ if (!ceu_fmt)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(v4l2_sd, pad, enum_frame_interval, NULL,
+ &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops ceu_ioctl_ops = {
+ .vidioc_querycap = ceu_querycap,
+
+ .vidioc_enum_fmt_vid_cap = ceu_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap_mplane = ceu_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = ceu_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = ceu_g_fmt_vid_cap,
+
+ .vidioc_enum_input = ceu_enum_input,
+ .vidioc_g_input = ceu_g_input,
+ .vidioc_s_input = ceu_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_g_parm = ceu_g_parm,
+ .vidioc_s_parm = ceu_s_parm,
+ .vidioc_enum_framesizes = ceu_enum_framesizes,
+ .vidioc_enum_frameintervals = ceu_enum_frameintervals,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * ceu_vdev_release() - release CEU video device memory when last reference
+ * to this driver is closed
+ */
+static void ceu_vdev_release(struct video_device *vdev)
+{
+ struct ceu_device *ceudev = video_get_drvdata(vdev);
+
+ kfree(ceudev);
+}
+
+static int ceu_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *v4l2_sd,
+ struct v4l2_async_connection *asd)
+{
+ struct v4l2_device *v4l2_dev = notifier->v4l2_dev;
+ struct ceu_device *ceudev = v4l2_to_ceu(v4l2_dev);
+ struct ceu_subdev *ceu_sd = to_ceu_subdev(asd);
+
+ ceu_sd->v4l2_sd = v4l2_sd;
+ ceudev->num_sd++;
+
+ return 0;
+}
+
+static int ceu_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_device *v4l2_dev = notifier->v4l2_dev;
+ struct ceu_device *ceudev = v4l2_to_ceu(v4l2_dev);
+ struct video_device *vdev = &ceudev->vdev;
+ struct vb2_queue *q = &ceudev->vb2_vq;
+ struct v4l2_subdev *v4l2_sd;
+ int ret;
+
+ /* Initialize vb2 queue. */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = ceudev;
+ q->ops = &ceu_vb2_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct ceu_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &ceudev->mlock;
+ q->dev = ceudev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure at least one sensor is primary and use it to initialize
+ * ceu formats.
+ */
+ if (!ceudev->sd) {
+ ceudev->sd = ceudev->subdevs[0];
+ ceudev->sd_index = 0;
+ }
+
+ v4l2_sd = ceudev->sd->v4l2_sd;
+
+ ret = ceu_init_mbus_fmt(ceudev);
+ if (ret)
+ return ret;
+
+ ret = ceu_set_default_fmt(ceudev);
+ if (ret)
+ return ret;
+
+ /* Register the video device. */
+ strscpy(vdev->name, DRIVER_NAME, sizeof(vdev->name));
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->lock = &ceudev->mlock;
+ vdev->queue = &ceudev->vb2_vq;
+ vdev->ctrl_handler = v4l2_sd->ctrl_handler;
+ vdev->fops = &ceu_fops;
+ vdev->ioctl_ops = &ceu_ioctl_ops;
+ vdev->release = ceu_vdev_release;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING;
+ video_set_drvdata(vdev, ceudev);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ v4l2_err(vdev->v4l2_dev,
+ "video_register_device failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations ceu_notify_ops = {
+ .bound = ceu_notify_bound,
+ .complete = ceu_notify_complete,
+};
+
+/*
+ * ceu_init_async_subdevs() - Initialize CEU subdevices and async_subdevs in
+ * ceu device. Both DT and platform data parsing use
+ * this routine.
+ *
+ * Returns 0 for success, -ENOMEM for failure.
+ */
+static int ceu_init_async_subdevs(struct ceu_device *ceudev, unsigned int n_sd)
+{
+ /* Reserve memory for 'n_sd' ceu_subdev descriptors. */
+ ceudev->subdevs = devm_kcalloc(ceudev->dev, n_sd,
+ sizeof(*ceudev->subdevs), GFP_KERNEL);
+ if (!ceudev->subdevs)
+ return -ENOMEM;
+
+ ceudev->sd = NULL;
+ ceudev->sd_index = 0;
+ ceudev->num_sd = 0;
+
+ return 0;
+}
+
+/*
+ * ceu_parse_platform_data() - Initialize async_subdevices using platform
+ * device provided data.
+ */
+static int ceu_parse_platform_data(struct ceu_device *ceudev,
+ const struct ceu_platform_data *pdata)
+{
+ const struct ceu_async_subdev *async_sd;
+ struct ceu_subdev *ceu_sd;
+ unsigned int i;
+ int ret;
+
+ if (pdata->num_subdevs == 0)
+ return -ENODEV;
+
+ ret = ceu_init_async_subdevs(ceudev, pdata->num_subdevs);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+
+ /* Setup the ceu subdevice and the async subdevice. */
+ async_sd = &pdata->subdevs[i];
+ ceu_sd = v4l2_async_nf_add_i2c(&ceudev->notifier,
+ async_sd->i2c_adapter_id,
+ async_sd->i2c_address,
+ struct ceu_subdev);
+ if (IS_ERR(ceu_sd)) {
+ v4l2_async_nf_cleanup(&ceudev->notifier);
+ return PTR_ERR(ceu_sd);
+ }
+ ceu_sd->mbus_flags = async_sd->flags;
+ ceudev->subdevs[i] = ceu_sd;
+ }
+
+ return pdata->num_subdevs;
+}
+
+/*
+ * ceu_parse_dt() - Initialize async_subdevs parsing device tree graph.
+ */
+static int ceu_parse_dt(struct ceu_device *ceudev)
+{
+ struct device_node *of = ceudev->dev->of_node;
+ struct device_node *ep;
+ struct ceu_subdev *ceu_sd;
+ unsigned int i;
+ int num_ep;
+ int ret;
+
+ num_ep = of_graph_get_endpoint_count(of);
+ if (!num_ep)
+ return -ENODEV;
+
+ ret = ceu_init_async_subdevs(ceudev, num_ep);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_ep; i++) {
+ struct v4l2_fwnode_endpoint fw_ep = {
+ .bus_type = V4L2_MBUS_PARALLEL,
+ .bus = {
+ .parallel = {
+ .flags = V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH,
+ .bus_width = 8,
+ },
+ },
+ };
+
+ ep = of_graph_get_endpoint_by_regs(of, 0, i);
+ if (!ep) {
+ dev_err(ceudev->dev,
+ "No subdevice connected on endpoint %u.\n", i);
+ ret = -ENODEV;
+ goto error_cleanup;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &fw_ep);
+ if (ret) {
+ dev_err(ceudev->dev,
+ "Unable to parse endpoint #%u: %d.\n", i, ret);
+ goto error_cleanup;
+ }
+
+ /* Setup the ceu subdevice and the async subdevice. */
+ ceu_sd = v4l2_async_nf_add_fwnode_remote(&ceudev->notifier,
+ of_fwnode_handle(ep),
+ struct ceu_subdev);
+ if (IS_ERR(ceu_sd)) {
+ ret = PTR_ERR(ceu_sd);
+ goto error_cleanup;
+ }
+ ceu_sd->mbus_flags = fw_ep.bus.parallel.flags;
+ ceudev->subdevs[i] = ceu_sd;
+
+ of_node_put(ep);
+ }
+
+ return num_ep;
+
+error_cleanup:
+ v4l2_async_nf_cleanup(&ceudev->notifier);
+ of_node_put(ep);
+ return ret;
+}
+
+/*
+ * struct ceu_data - Platform specific CEU data
+ * @irq_mask: CETCR mask with all interrupt sources enabled. The mask differs
+ * between SH4 and RZ platforms.
+ */
+struct ceu_data {
+ u32 irq_mask;
+};
+
+static const struct ceu_data ceu_data_sh4 = {
+ .irq_mask = CEU_CETCR_ALL_IRQS_SH4,
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct ceu_data ceu_data_rz = {
+ .irq_mask = CEU_CETCR_ALL_IRQS_RZ,
+};
+
+static const struct of_device_id ceu_of_match[] = {
+ { .compatible = "renesas,r7s72100-ceu", .data = &ceu_data_rz },
+ { .compatible = "renesas,r8a7740-ceu", .data = &ceu_data_rz },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ceu_of_match);
+#endif
+
+static int ceu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct ceu_data *ceu_data;
+ struct ceu_device *ceudev;
+ unsigned int irq;
+ int num_subdevs;
+ int ret;
+
+ ceudev = kzalloc(sizeof(*ceudev), GFP_KERNEL);
+ if (!ceudev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ceudev);
+ ceudev->dev = dev;
+
+ INIT_LIST_HEAD(&ceudev->capture);
+ spin_lock_init(&ceudev->lock);
+ mutex_init(&ceudev->mlock);
+
+ ceudev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ceudev->base)) {
+ ret = PTR_ERR(ceudev->base);
+ goto error_free_ceudev;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto error_free_ceudev;
+ irq = ret;
+
+ ret = devm_request_irq(dev, irq, ceu_irq,
+ 0, dev_name(dev), ceudev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request CEU interrupt.\n");
+ goto error_free_ceudev;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = v4l2_device_register(dev, &ceudev->v4l2_dev);
+ if (ret)
+ goto error_pm_disable;
+
+ v4l2_async_nf_init(&ceudev->notifier, &ceudev->v4l2_dev);
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ ceu_data = of_device_get_match_data(dev);
+ num_subdevs = ceu_parse_dt(ceudev);
+ } else if (dev->platform_data) {
+ /* Assume SH4 if booting with platform data. */
+ ceu_data = &ceu_data_sh4;
+ num_subdevs = ceu_parse_platform_data(ceudev,
+ dev->platform_data);
+ } else {
+ num_subdevs = -EINVAL;
+ }
+
+ if (num_subdevs < 0) {
+ ret = num_subdevs;
+ goto error_v4l2_unregister;
+ }
+ ceudev->irq_mask = ceu_data->irq_mask;
+
+ ceudev->notifier.v4l2_dev = &ceudev->v4l2_dev;
+ ceudev->notifier.ops = &ceu_notify_ops;
+ ret = v4l2_async_nf_register(&ceudev->notifier);
+ if (ret)
+ goto error_cleanup;
+
+ dev_info(dev, "Renesas Capture Engine Unit %s\n", dev_name(dev));
+
+ return 0;
+
+error_cleanup:
+ v4l2_async_nf_cleanup(&ceudev->notifier);
+error_v4l2_unregister:
+ v4l2_device_unregister(&ceudev->v4l2_dev);
+error_pm_disable:
+ pm_runtime_disable(dev);
+error_free_ceudev:
+ kfree(ceudev);
+
+ return ret;
+}
+
+static void ceu_remove(struct platform_device *pdev)
+{
+ struct ceu_device *ceudev = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(ceudev->dev);
+
+ v4l2_async_nf_unregister(&ceudev->notifier);
+
+ v4l2_async_nf_cleanup(&ceudev->notifier);
+
+ v4l2_device_unregister(&ceudev->v4l2_dev);
+
+ video_unregister_device(&ceudev->vdev);
+}
+
+static const struct dev_pm_ops ceu_pm_ops = {
+ SET_RUNTIME_PM_OPS(ceu_runtime_suspend,
+ ceu_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver ceu_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &ceu_pm_ops,
+ .of_match_table = of_match_ptr(ceu_of_match),
+ },
+ .probe = ceu_probe,
+ .remove_new = ceu_remove,
+};
+
+module_platform_driver(ceu_driver);
+
+MODULE_DESCRIPTION("Renesas CEU camera driver");
+MODULE_AUTHOR("Jacopo Mondi <jacopo+renesas@jmondi.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/renesas/rzg2l-cru/Kconfig b/drivers/media/platform/renesas/rzg2l-cru/Kconfig
new file mode 100644
index 0000000000..b39818c1f0
--- /dev/null
+++ b/drivers/media/platform/renesas/rzg2l-cru/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config VIDEO_RZG2L_CSI2
+ tristate "RZ/G2L MIPI CSI-2 Receiver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ select MEDIA_CONTROLLER
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Support for Renesas RZ/G2L (and alike SoC's) MIPI CSI-2
+ Receiver driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rzg2l-csi2.
+
+config VIDEO_RZG2L_CRU
+ tristate "RZ/G2L Camera Receiving Unit (CRU) Driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Support for Renesas RZ/G2L (and alike SoC's) Camera Receiving
+ Unit (CRU) driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rzg2l-cru.
diff --git a/drivers/media/platform/renesas/rzg2l-cru/Makefile b/drivers/media/platform/renesas/rzg2l-cru/Makefile
new file mode 100644
index 0000000000..c4db263287
--- /dev/null
+++ b/drivers/media/platform/renesas/rzg2l-cru/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_RZG2L_CSI2) += rzg2l-csi2.o
+
+rzg2l-cru-objs = rzg2l-core.o rzg2l-ip.o rzg2l-video.o
+obj-$(CONFIG_VIDEO_RZG2L_CRU) += rzg2l-cru.o
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
new file mode 100644
index 0000000000..280efd2a81
--- /dev/null
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Renesas RZ/G2L CRU
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ * Based on Renesas R-Car VIN
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+
+#include "rzg2l-cru.h"
+
+static inline struct rzg2l_cru_dev *notifier_to_cru(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct rzg2l_cru_dev, notifier);
+}
+
+static const struct media_device_ops rzg2l_cru_media_ops = {
+ .link_notify = v4l2_pipeline_link_notify,
+};
+
+/* -----------------------------------------------------------------------------
+ * Group async notifier
+ */
+
+static int rzg2l_cru_group_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rzg2l_cru_dev *cru = notifier_to_cru(notifier);
+ struct media_entity *source, *sink;
+ int ret;
+
+ ret = rzg2l_cru_ip_subdev_register(cru);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&cru->v4l2_dev);
+ if (ret) {
+ dev_err(cru->dev, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ ret = rzg2l_cru_video_register(cru);
+ if (ret)
+ return ret;
+
+ /*
+ * CRU can be connected either to CSI2 or PARALLEL device
+ * For now we are only supporting CSI2
+ *
+ * Create media device link between CSI-2 <-> CRU IP
+ */
+ source = &cru->csi.subdev->entity;
+ sink = &cru->ip.subdev.entity;
+ ret = media_create_pad_link(source, 1, sink, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(cru->dev, "Error creating link from %s to %s\n",
+ source->name, sink->name);
+ return ret;
+ }
+ cru->csi.channel = 0;
+ cru->ip.remote = cru->csi.subdev;
+
+ /* Create media device link between CRU IP <-> CRU OUTPUT */
+ source = &cru->ip.subdev.entity;
+ sink = &cru->vdev.entity;
+ ret = media_create_pad_link(source, 1, sink, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(cru->dev, "Error creating link from %s to %s\n",
+ source->name, sink->name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rzg2l_cru_group_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct rzg2l_cru_dev *cru = notifier_to_cru(notifier);
+
+ rzg2l_cru_ip_subdev_unregister(cru);
+
+ mutex_lock(&cru->mdev_lock);
+
+ if (cru->csi.asd == asd) {
+ cru->csi.subdev = NULL;
+ dev_dbg(cru->dev, "Unbind CSI-2 %s\n", subdev->name);
+ }
+
+ mutex_unlock(&cru->mdev_lock);
+}
+
+static int rzg2l_cru_group_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct rzg2l_cru_dev *cru = notifier_to_cru(notifier);
+
+ mutex_lock(&cru->mdev_lock);
+
+ if (cru->csi.asd == asd) {
+ cru->csi.subdev = subdev;
+ dev_dbg(cru->dev, "Bound CSI-2 %s\n", subdev->name);
+ }
+
+ mutex_unlock(&cru->mdev_lock);
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations rzg2l_cru_async_ops = {
+ .bound = rzg2l_cru_group_notify_bound,
+ .unbind = rzg2l_cru_group_notify_unbind,
+ .complete = rzg2l_cru_group_notify_complete,
+};
+
+static int rzg2l_cru_mc_parse_of(struct rzg2l_cru_dev *cru)
+{
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct fwnode_handle *ep, *fwnode;
+ struct v4l2_async_connection *asd;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(cru->dev), 1, 0, 0);
+ if (!ep)
+ return 0;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ dev_err(cru->dev, "Failed to parse %pOF\n", to_of_node(fwnode));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!of_device_is_available(to_of_node(fwnode))) {
+ dev_dbg(cru->dev, "OF device %pOF disabled, ignoring\n",
+ to_of_node(fwnode));
+ ret = -ENOTCONN;
+ goto out;
+ }
+
+ asd = v4l2_async_nf_add_fwnode(&cru->notifier, fwnode,
+ struct v4l2_async_connection);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto out;
+ }
+
+ cru->csi.asd = asd;
+
+ dev_dbg(cru->dev, "Added OF device %pOF to slot %u\n",
+ to_of_node(fwnode), vep.base.id);
+out:
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+static int rzg2l_cru_mc_parse_of_graph(struct rzg2l_cru_dev *cru)
+{
+ int ret;
+
+ v4l2_async_nf_init(&cru->notifier, &cru->v4l2_dev);
+
+ ret = rzg2l_cru_mc_parse_of(cru);
+ if (ret)
+ return ret;
+
+ cru->notifier.ops = &rzg2l_cru_async_ops;
+
+ if (list_empty(&cru->notifier.waiting_list))
+ return 0;
+
+ ret = v4l2_async_nf_register(&cru->notifier);
+ if (ret < 0) {
+ dev_err(cru->dev, "Notifier registration failed\n");
+ v4l2_async_nf_cleanup(&cru->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rzg2l_cru_media_init(struct rzg2l_cru_dev *cru)
+{
+ struct media_device *mdev = NULL;
+ const struct of_device_id *match;
+ int ret;
+
+ cru->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&cru->vdev.entity, 1, &cru->pad);
+ if (ret)
+ return ret;
+
+ mutex_init(&cru->mdev_lock);
+ mdev = &cru->mdev;
+ mdev->dev = cru->dev;
+ mdev->ops = &rzg2l_cru_media_ops;
+
+ match = of_match_node(cru->dev->driver->of_match_table,
+ cru->dev->of_node);
+
+ strscpy(mdev->driver_name, KBUILD_MODNAME, sizeof(mdev->driver_name));
+ strscpy(mdev->model, match->compatible, sizeof(mdev->model));
+
+ cru->v4l2_dev.mdev = &cru->mdev;
+
+ media_device_init(mdev);
+
+ ret = rzg2l_cru_mc_parse_of_graph(cru);
+ if (ret) {
+ mutex_lock(&cru->mdev_lock);
+ cru->v4l2_dev.mdev = NULL;
+ mutex_unlock(&cru->mdev_lock);
+ }
+
+ return 0;
+}
+
+static int rzg2l_cru_probe(struct platform_device *pdev)
+{
+ struct rzg2l_cru_dev *cru;
+ int ret;
+
+ cru = devm_kzalloc(&pdev->dev, sizeof(*cru), GFP_KERNEL);
+ if (!cru)
+ return -ENOMEM;
+
+ cru->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cru->base))
+ return PTR_ERR(cru->base);
+
+ cru->presetn = devm_reset_control_get_shared(&pdev->dev, "presetn");
+ if (IS_ERR(cru->presetn))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cru->presetn),
+ "Failed to get cpg presetn\n");
+
+ cru->aresetn = devm_reset_control_get_exclusive(&pdev->dev, "aresetn");
+ if (IS_ERR(cru->aresetn))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cru->aresetn),
+ "Failed to get cpg aresetn\n");
+
+ cru->vclk = devm_clk_get(&pdev->dev, "video");
+ if (IS_ERR(cru->vclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cru->vclk),
+ "Failed to get video clock\n");
+
+ cru->dev = &pdev->dev;
+ cru->info = of_device_get_match_data(&pdev->dev);
+
+ cru->image_conv_irq = platform_get_irq(pdev, 0);
+ if (cru->image_conv_irq < 0)
+ return cru->image_conv_irq;
+
+ platform_set_drvdata(pdev, cru);
+
+ ret = rzg2l_cru_dma_register(cru);
+ if (ret)
+ return ret;
+
+ cru->num_buf = RZG2L_CRU_HW_BUFFER_DEFAULT;
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = rzg2l_cru_media_init(cru);
+ if (ret)
+ goto error_dma_unregister;
+
+ return 0;
+
+error_dma_unregister:
+ rzg2l_cru_dma_unregister(cru);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static void rzg2l_cru_remove(struct platform_device *pdev)
+{
+ struct rzg2l_cru_dev *cru = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ v4l2_async_nf_unregister(&cru->notifier);
+ v4l2_async_nf_cleanup(&cru->notifier);
+
+ rzg2l_cru_video_unregister(cru);
+ media_device_cleanup(&cru->mdev);
+ mutex_destroy(&cru->mdev_lock);
+
+ rzg2l_cru_dma_unregister(cru);
+}
+
+static const struct of_device_id rzg2l_cru_of_id_table[] = {
+ { .compatible = "renesas,rzg2l-cru", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzg2l_cru_of_id_table);
+
+static struct platform_driver rzg2l_cru_driver = {
+ .driver = {
+ .name = "rzg2l-cru",
+ .of_match_table = rzg2l_cru_of_id_table,
+ },
+ .probe = rzg2l_cru_probe,
+ .remove_new = rzg2l_cru_remove,
+};
+
+module_platform_driver(rzg2l_cru_driver);
+
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/G2L CRU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
new file mode 100644
index 0000000000..811603f18a
--- /dev/null
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Driver for Renesas RZ/G2L CRU
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+
+#ifndef __RZG2L_CRU__
+#define __RZG2L_CRU__
+
+#include <linux/reset.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+
+/* Number of HW buffers */
+#define RZG2L_CRU_HW_BUFFER_MAX 8
+#define RZG2L_CRU_HW_BUFFER_DEFAULT 3
+
+/* Address alignment mask for HW buffers */
+#define RZG2L_CRU_HW_BUFFER_MASK 0x1ff
+
+/* Maximum number of CSI2 virtual channels */
+#define RZG2L_CRU_CSI2_VCHANNEL 4
+
+#define RZG2L_CRU_MIN_INPUT_WIDTH 320
+#define RZG2L_CRU_MAX_INPUT_WIDTH 2800
+#define RZG2L_CRU_MIN_INPUT_HEIGHT 240
+#define RZG2L_CRU_MAX_INPUT_HEIGHT 4095
+
+/**
+ * enum rzg2l_cru_dma_state - DMA states
+ * @RZG2L_CRU_DMA_STOPPED: No operation in progress
+ * @RZG2L_CRU_DMA_STARTING: Capture starting up
+ * @RZG2L_CRU_DMA_RUNNING: Operation in progress have buffers
+ * @RZG2L_CRU_DMA_STOPPING: Stopping operation
+ */
+enum rzg2l_cru_dma_state {
+ RZG2L_CRU_DMA_STOPPED = 0,
+ RZG2L_CRU_DMA_STARTING,
+ RZG2L_CRU_DMA_RUNNING,
+ RZG2L_CRU_DMA_STOPPING,
+};
+
+struct rzg2l_cru_csi {
+ struct v4l2_async_connection *asd;
+ struct v4l2_subdev *subdev;
+ u32 channel;
+};
+
+struct rzg2l_cru_ip {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[2];
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *remote;
+};
+
+/**
+ * struct rzg2l_cru_dev - Renesas CRU device structure
+ * @dev: (OF) device
+ * @base: device I/O register space remapped to virtual memory
+ * @info: info about CRU instance
+ *
+ * @presetn: CRU_PRESETN reset line
+ * @aresetn: CRU_ARESETN reset line
+ *
+ * @vclk: CRU Main clock
+ *
+ * @image_conv_irq: Holds image conversion interrupt number
+ *
+ * @vdev: V4L2 video device associated with CRU
+ * @v4l2_dev: V4L2 device
+ * @num_buf: Holds the current number of buffers enabled
+ * @notifier: V4L2 asynchronous subdevs notifier
+ *
+ * @ip: Image processing subdev info
+ * @csi: CSI info
+ * @mdev: media device
+ * @mdev_lock: protects the count, notifier and csi members
+ * @pad: media pad for the video device entity
+ *
+ * @lock: protects @queue
+ * @queue: vb2 buffers queue
+ * @scratch: cpu address for scratch buffer
+ * @scratch_phys: physical address of the scratch buffer
+ *
+ * @qlock: protects @queue_buf, @buf_list, @sequence
+ * @state
+ * @queue_buf: Keeps track of buffers given to HW slot
+ * @buf_list: list of queued buffers
+ * @sequence: V4L2 buffers sequence number
+ * @state: keeps track of operation state
+ *
+ * @format: active V4L2 pixel format
+ */
+struct rzg2l_cru_dev {
+ struct device *dev;
+ void __iomem *base;
+ const struct rzg2l_cru_info *info;
+
+ struct reset_control *presetn;
+ struct reset_control *aresetn;
+
+ struct clk *vclk;
+
+ int image_conv_irq;
+
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ u8 num_buf;
+
+ struct v4l2_async_notifier notifier;
+
+ struct rzg2l_cru_ip ip;
+ struct rzg2l_cru_csi csi;
+ struct media_device mdev;
+ struct mutex mdev_lock;
+ struct media_pad pad;
+
+ struct mutex lock;
+ struct vb2_queue queue;
+ void *scratch;
+ dma_addr_t scratch_phys;
+
+ spinlock_t qlock;
+ struct vb2_v4l2_buffer *queue_buf[RZG2L_CRU_HW_BUFFER_MAX];
+ struct list_head buf_list;
+ unsigned int sequence;
+ enum rzg2l_cru_dma_state state;
+
+ struct v4l2_pix_format format;
+};
+
+void rzg2l_cru_vclk_unprepare(struct rzg2l_cru_dev *cru);
+int rzg2l_cru_vclk_prepare(struct rzg2l_cru_dev *cru);
+
+int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru);
+void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru);
+
+int rzg2l_cru_dma_register(struct rzg2l_cru_dev *cru);
+void rzg2l_cru_dma_unregister(struct rzg2l_cru_dev *cru);
+
+int rzg2l_cru_video_register(struct rzg2l_cru_dev *cru);
+void rzg2l_cru_video_unregister(struct rzg2l_cru_dev *cru);
+
+const struct v4l2_format_info *rzg2l_cru_format_from_pixel(u32 format);
+
+int rzg2l_cru_ip_subdev_register(struct rzg2l_cru_dev *cru);
+void rzg2l_cru_ip_subdev_unregister(struct rzg2l_cru_dev *cru);
+struct v4l2_mbus_framefmt *rzg2l_cru_ip_get_src_fmt(struct rzg2l_cru_dev *cru);
+
+#endif
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
new file mode 100644
index 0000000000..ad2bd71037
--- /dev/null
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -0,0 +1,872 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Renesas RZ/G2L MIPI CSI-2 Receiver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sys_soc.h>
+#include <linux/units.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+
+/* LINK registers */
+/* Module Configuration Register */
+#define CSI2nMCG 0x0
+#define CSI2nMCG_SDLN GENMASK(11, 8)
+
+/* Module Control Register 0 */
+#define CSI2nMCT0 0x10
+#define CSI2nMCT0_VDLN(x) ((x) << 0)
+
+/* Module Control Register 2 */
+#define CSI2nMCT2 0x18
+#define CSI2nMCT2_FRRSKW(x) ((x) << 16)
+#define CSI2nMCT2_FRRCLK(x) ((x) << 0)
+
+/* Module Control Register 3 */
+#define CSI2nMCT3 0x1c
+#define CSI2nMCT3_RXEN BIT(0)
+
+/* Reset Control Register */
+#define CSI2nRTCT 0x28
+#define CSI2nRTCT_VSRST BIT(0)
+
+/* Reset Status Register */
+#define CSI2nRTST 0x2c
+#define CSI2nRTST_VSRSTS BIT(0)
+
+/* Receive Data Type Enable Low Register */
+#define CSI2nDTEL 0x60
+
+/* Receive Data Type Enable High Register */
+#define CSI2nDTEH 0x64
+
+/* DPHY registers */
+/* D-PHY Control Register 0 */
+#define CSIDPHYCTRL0 0x400
+#define CSIDPHYCTRL0_EN_LDO1200 BIT(1)
+#define CSIDPHYCTRL0_EN_BGR BIT(0)
+
+/* D-PHY Timing Register 0 */
+#define CSIDPHYTIM0 0x404
+#define CSIDPHYTIM0_TCLK_MISS(x) ((x) << 24)
+#define CSIDPHYTIM0_T_INIT(x) ((x) << 0)
+
+/* D-PHY Timing Register 1 */
+#define CSIDPHYTIM1 0x408
+#define CSIDPHYTIM1_THS_PREPARE(x) ((x) << 24)
+#define CSIDPHYTIM1_TCLK_PREPARE(x) ((x) << 16)
+#define CSIDPHYTIM1_THS_SETTLE(x) ((x) << 8)
+#define CSIDPHYTIM1_TCLK_SETTLE(x) ((x) << 0)
+
+/* D-PHY Skew Adjustment Function */
+#define CSIDPHYSKW0 0x460
+#define CSIDPHYSKW0_UTIL_DL0_SKW_ADJ(x) ((x) & 0x3)
+#define CSIDPHYSKW0_UTIL_DL1_SKW_ADJ(x) (((x) & 0x3) << 4)
+#define CSIDPHYSKW0_UTIL_DL2_SKW_ADJ(x) (((x) & 0x3) << 8)
+#define CSIDPHYSKW0_UTIL_DL3_SKW_ADJ(x) (((x) & 0x3) << 12)
+#define CSIDPHYSKW0_DEFAULT_SKW (CSIDPHYSKW0_UTIL_DL0_SKW_ADJ(1) | \
+ CSIDPHYSKW0_UTIL_DL1_SKW_ADJ(1) | \
+ CSIDPHYSKW0_UTIL_DL2_SKW_ADJ(1) | \
+ CSIDPHYSKW0_UTIL_DL3_SKW_ADJ(1))
+
+#define VSRSTS_RETRIES 20
+
+#define RZG2L_CSI2_MIN_WIDTH 320
+#define RZG2L_CSI2_MIN_HEIGHT 240
+#define RZG2L_CSI2_MAX_WIDTH 2800
+#define RZG2L_CSI2_MAX_HEIGHT 4095
+
+#define RZG2L_CSI2_DEFAULT_WIDTH RZG2L_CSI2_MIN_WIDTH
+#define RZG2L_CSI2_DEFAULT_HEIGHT RZG2L_CSI2_MIN_HEIGHT
+#define RZG2L_CSI2_DEFAULT_FMT MEDIA_BUS_FMT_UYVY8_1X16
+
+enum rzg2l_csi2_pads {
+ RZG2L_CSI2_SINK = 0,
+ RZG2L_CSI2_SOURCE,
+ NR_OF_RZG2L_CSI2_PAD,
+};
+
+struct rzg2l_csi2 {
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *presetn;
+ struct reset_control *cmn_rstb;
+ struct clk *sysclk;
+ unsigned long vclk_rate;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[NR_OF_RZG2L_CSI2_PAD];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *remote_source;
+
+ unsigned short lanes;
+ unsigned long hsfreq;
+
+ bool dphy_enabled;
+};
+
+struct rzg2l_csi2_timings {
+ u32 t_init;
+ u32 tclk_miss;
+ u32 tclk_settle;
+ u32 ths_settle;
+ u32 tclk_prepare;
+ u32 ths_prepare;
+ u32 max_hsfreq;
+};
+
+static const struct rzg2l_csi2_timings rzg2l_csi2_global_timings[] = {
+ {
+ .max_hsfreq = 80,
+ .t_init = 79801,
+ .tclk_miss = 4,
+ .tclk_settle = 23,
+ .ths_settle = 31,
+ .tclk_prepare = 10,
+ .ths_prepare = 19,
+ },
+ {
+ .max_hsfreq = 125,
+ .t_init = 79801,
+ .tclk_miss = 4,
+ .tclk_settle = 23,
+ .ths_settle = 28,
+ .tclk_prepare = 10,
+ .ths_prepare = 19,
+ },
+ {
+ .max_hsfreq = 250,
+ .t_init = 79801,
+ .tclk_miss = 4,
+ .tclk_settle = 23,
+ .ths_settle = 22,
+ .tclk_prepare = 10,
+ .ths_prepare = 16,
+ },
+ {
+ .max_hsfreq = 360,
+ .t_init = 79801,
+ .tclk_miss = 4,
+ .tclk_settle = 18,
+ .ths_settle = 19,
+ .tclk_prepare = 10,
+ .ths_prepare = 10,
+ },
+ {
+ .max_hsfreq = 1500,
+ .t_init = 79801,
+ .tclk_miss = 4,
+ .tclk_settle = 18,
+ .ths_settle = 18,
+ .tclk_prepare = 10,
+ .ths_prepare = 10,
+ },
+};
+
+struct rzg2l_csi2_format {
+ u32 code;
+ unsigned int datatype;
+ unsigned int bpp;
+};
+
+static const struct rzg2l_csi2_format rzg2l_csi2_formats[] = {
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .bpp = 16 },
+};
+
+static inline struct rzg2l_csi2 *sd_to_csi2(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rzg2l_csi2, subdev);
+}
+
+static const struct rzg2l_csi2_format *rzg2l_csi2_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rzg2l_csi2_formats); i++)
+ if (rzg2l_csi2_formats[i].code == code)
+ return &rzg2l_csi2_formats[i];
+
+ return NULL;
+}
+
+static inline struct rzg2l_csi2 *notifier_to_csi2(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct rzg2l_csi2, notifier);
+}
+
+static u32 rzg2l_csi2_read(struct rzg2l_csi2 *csi2, unsigned int reg)
+{
+ return ioread32(csi2->base + reg);
+}
+
+static void rzg2l_csi2_write(struct rzg2l_csi2 *csi2, unsigned int reg,
+ u32 data)
+{
+ iowrite32(data, csi2->base + reg);
+}
+
+static void rzg2l_csi2_set(struct rzg2l_csi2 *csi2, unsigned int reg, u32 set)
+{
+ rzg2l_csi2_write(csi2, reg, rzg2l_csi2_read(csi2, reg) | set);
+}
+
+static void rzg2l_csi2_clr(struct rzg2l_csi2 *csi2, unsigned int reg, u32 clr)
+{
+ rzg2l_csi2_write(csi2, reg, rzg2l_csi2_read(csi2, reg) & ~clr);
+}
+
+static int rzg2l_csi2_calc_mbps(struct rzg2l_csi2 *csi2)
+{
+ struct v4l2_subdev *source = csi2->remote_source;
+ const struct rzg2l_csi2_format *format;
+ const struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
+ struct v4l2_ctrl *ctrl;
+ u64 mbps;
+
+ /* Read the pixel rate control from remote. */
+ ctrl = v4l2_ctrl_find(source->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl) {
+ dev_err(csi2->dev, "no pixel rate control in subdev %s\n",
+ source->name);
+ return -EINVAL;
+ }
+
+ state = v4l2_subdev_lock_and_get_active_state(&csi2->subdev);
+ fmt = v4l2_subdev_get_pad_format(&csi2->subdev, state, RZG2L_CSI2_SINK);
+ format = rzg2l_csi2_code_to_fmt(fmt->code);
+ v4l2_subdev_unlock_state(state);
+
+ /*
+ * Calculate hsfreq in Mbps
+ * hsfreq = (pixel_rate * bits_per_sample) / number_of_lanes
+ */
+ mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * format->bpp;
+ do_div(mbps, csi2->lanes * 1000000);
+
+ return mbps;
+}
+
+/* -----------------------------------------------------------------------------
+ * DPHY setting
+ */
+
+static int rzg2l_csi2_dphy_disable(struct rzg2l_csi2 *csi2)
+{
+ int ret;
+
+ /* Reset the CRU (D-PHY) */
+ ret = reset_control_assert(csi2->cmn_rstb);
+ if (ret)
+ return ret;
+
+ /* Stop the D-PHY clock */
+ clk_disable_unprepare(csi2->sysclk);
+
+ /* Cancel the EN_LDO1200 register setting */
+ rzg2l_csi2_clr(csi2, CSIDPHYCTRL0, CSIDPHYCTRL0_EN_LDO1200);
+
+ /* Cancel the EN_BGR register setting */
+ rzg2l_csi2_clr(csi2, CSIDPHYCTRL0, CSIDPHYCTRL0_EN_BGR);
+
+ csi2->dphy_enabled = false;
+
+ return 0;
+}
+
+static int rzg2l_csi2_dphy_enable(struct rzg2l_csi2 *csi2)
+{
+ const struct rzg2l_csi2_timings *dphy_timing;
+ u32 dphytim0, dphytim1;
+ unsigned int i;
+ int mbps;
+ int ret;
+
+ mbps = rzg2l_csi2_calc_mbps(csi2);
+ if (mbps < 0)
+ return mbps;
+
+ csi2->hsfreq = mbps;
+
+ /* Set DPHY timing parameters */
+ for (i = 0; i < ARRAY_SIZE(rzg2l_csi2_global_timings); ++i) {
+ dphy_timing = &rzg2l_csi2_global_timings[i];
+
+ if (csi2->hsfreq <= dphy_timing->max_hsfreq)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(rzg2l_csi2_global_timings))
+ return -EINVAL;
+
+ /* Set D-PHY timing parameters */
+ dphytim0 = CSIDPHYTIM0_TCLK_MISS(dphy_timing->tclk_miss) |
+ CSIDPHYTIM0_T_INIT(dphy_timing->t_init);
+ dphytim1 = CSIDPHYTIM1_THS_PREPARE(dphy_timing->ths_prepare) |
+ CSIDPHYTIM1_TCLK_PREPARE(dphy_timing->tclk_prepare) |
+ CSIDPHYTIM1_THS_SETTLE(dphy_timing->ths_settle) |
+ CSIDPHYTIM1_TCLK_SETTLE(dphy_timing->tclk_settle);
+ rzg2l_csi2_write(csi2, CSIDPHYTIM0, dphytim0);
+ rzg2l_csi2_write(csi2, CSIDPHYTIM1, dphytim1);
+
+ /* Enable D-PHY power control 0 */
+ rzg2l_csi2_write(csi2, CSIDPHYSKW0, CSIDPHYSKW0_DEFAULT_SKW);
+
+ /* Set the EN_BGR bit */
+ rzg2l_csi2_set(csi2, CSIDPHYCTRL0, CSIDPHYCTRL0_EN_BGR);
+
+ /* Delay 20us to be stable */
+ usleep_range(20, 40);
+
+ /* Enable D-PHY power control 1 */
+ rzg2l_csi2_set(csi2, CSIDPHYCTRL0, CSIDPHYCTRL0_EN_LDO1200);
+
+ /* Delay 10us to be stable */
+ usleep_range(10, 20);
+
+ /* Start supplying the internal clock for the D-PHY block */
+ ret = clk_prepare_enable(csi2->sysclk);
+ if (ret)
+ rzg2l_csi2_dphy_disable(csi2);
+
+ csi2->dphy_enabled = true;
+
+ return ret;
+}
+
+static int rzg2l_csi2_dphy_setting(struct v4l2_subdev *sd, bool on)
+{
+ struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
+
+ if (on)
+ return rzg2l_csi2_dphy_enable(csi2);
+
+ return rzg2l_csi2_dphy_disable(csi2);
+}
+
+static void rzg2l_csi2_mipi_link_enable(struct rzg2l_csi2 *csi2)
+{
+ unsigned long vclk_rate = csi2->vclk_rate / HZ_PER_MHZ;
+ u32 frrskw, frrclk, frrskw_coeff, frrclk_coeff;
+
+ /* Select data lanes */
+ rzg2l_csi2_write(csi2, CSI2nMCT0, CSI2nMCT0_VDLN(csi2->lanes));
+
+ frrskw_coeff = 3 * vclk_rate * 8;
+ frrclk_coeff = frrskw_coeff / 2;
+ frrskw = DIV_ROUND_UP(frrskw_coeff, csi2->hsfreq);
+ frrclk = DIV_ROUND_UP(frrclk_coeff, csi2->hsfreq);
+ rzg2l_csi2_write(csi2, CSI2nMCT2, CSI2nMCT2_FRRSKW(frrskw) |
+ CSI2nMCT2_FRRCLK(frrclk));
+
+ /*
+ * Select data type.
+ * FS, FE, LS, LE, Generic Short Packet Codes 1 to 8,
+ * Generic Long Packet Data Types 1 to 4 YUV422 8-bit,
+ * RGB565, RGB888, RAW8 to RAW20, User-defined 8-bit
+ * data types 1 to 8
+ */
+ rzg2l_csi2_write(csi2, CSI2nDTEL, 0xf778ff0f);
+ rzg2l_csi2_write(csi2, CSI2nDTEH, 0x00ffff1f);
+
+ /* Enable LINK reception */
+ rzg2l_csi2_write(csi2, CSI2nMCT3, CSI2nMCT3_RXEN);
+}
+
+static void rzg2l_csi2_mipi_link_disable(struct rzg2l_csi2 *csi2)
+{
+ unsigned int timeout = VSRSTS_RETRIES;
+
+ /* Stop LINK reception */
+ rzg2l_csi2_clr(csi2, CSI2nMCT3, CSI2nMCT3_RXEN);
+
+ /* Request a software reset of the LINK Video Pixel Interface */
+ rzg2l_csi2_write(csi2, CSI2nRTCT, CSI2nRTCT_VSRST);
+
+ /* Make sure CSI2nRTST.VSRSTS bit is cleared */
+ while (--timeout) {
+ if (!(rzg2l_csi2_read(csi2, CSI2nRTST) & CSI2nRTST_VSRSTS))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!timeout)
+ dev_err(csi2->dev, "Clearing CSI2nRTST.VSRSTS timed out\n");
+}
+
+static int rzg2l_csi2_mipi_link_setting(struct v4l2_subdev *sd, bool on)
+{
+ struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
+
+ if (on)
+ rzg2l_csi2_mipi_link_enable(csi2);
+ else
+ rzg2l_csi2_mipi_link_disable(csi2);
+
+ return 0;
+}
+
+static int rzg2l_csi2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
+ int s_stream_ret = 0;
+ int ret;
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(csi2->dev);
+ if (ret)
+ return ret;
+
+ ret = rzg2l_csi2_mipi_link_setting(sd, 1);
+ if (ret)
+ goto err_pm_put;
+
+ ret = reset_control_deassert(csi2->cmn_rstb);
+ if (ret)
+ goto err_mipi_link_disable;
+ }
+
+ ret = v4l2_subdev_call(csi2->remote_source, video, s_stream, enable);
+ if (ret)
+ s_stream_ret = ret;
+
+ if (enable && ret)
+ goto err_assert_rstb;
+
+ if (!enable) {
+ ret = rzg2l_csi2_dphy_setting(sd, 0);
+ if (ret && !s_stream_ret)
+ s_stream_ret = ret;
+ ret = rzg2l_csi2_mipi_link_setting(sd, 0);
+ if (ret && !s_stream_ret)
+ s_stream_ret = ret;
+
+ pm_runtime_put_sync(csi2->dev);
+ }
+
+ return s_stream_ret;
+
+err_assert_rstb:
+ reset_control_assert(csi2->cmn_rstb);
+err_mipi_link_disable:
+ rzg2l_csi2_mipi_link_setting(sd, 0);
+err_pm_put:
+ pm_runtime_put_sync(csi2->dev);
+ return ret;
+}
+
+static int rzg2l_csi2_pre_streamon(struct v4l2_subdev *sd, u32 flags)
+{
+ return rzg2l_csi2_dphy_setting(sd, 1);
+}
+
+static int rzg2l_csi2_post_streamoff(struct v4l2_subdev *sd)
+{
+ struct rzg2l_csi2 *csi2 = sd_to_csi2(sd);
+
+ /*
+ * In ideal case D-PHY will be disabled in s_stream(0) callback
+ * as mentioned in the HW manual. The below will only happen when
+ * pre_streamon succeeds and further down the line s_stream(1)
+ * fails so we need to undo things in post_streamoff.
+ */
+ if (csi2->dphy_enabled)
+ return rzg2l_csi2_dphy_setting(sd, 0);
+
+ return 0;
+}
+
+static int rzg2l_csi2_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *src_format;
+ struct v4l2_mbus_framefmt *sink_format;
+
+ src_format = v4l2_subdev_get_pad_format(sd, state, RZG2L_CSI2_SOURCE);
+ if (fmt->pad == RZG2L_CSI2_SOURCE) {
+ fmt->format = *src_format;
+ return 0;
+ }
+
+ sink_format = v4l2_subdev_get_pad_format(sd, state, RZG2L_CSI2_SINK);
+
+ if (!rzg2l_csi2_code_to_fmt(fmt->format.code))
+ sink_format->code = rzg2l_csi2_formats[0].code;
+ else
+ sink_format->code = fmt->format.code;
+
+ sink_format->field = V4L2_FIELD_NONE;
+ sink_format->colorspace = fmt->format.colorspace;
+ sink_format->xfer_func = fmt->format.xfer_func;
+ sink_format->ycbcr_enc = fmt->format.ycbcr_enc;
+ sink_format->quantization = fmt->format.quantization;
+ sink_format->width = clamp_t(u32, fmt->format.width,
+ RZG2L_CSI2_MIN_WIDTH, RZG2L_CSI2_MAX_WIDTH);
+ sink_format->height = clamp_t(u32, fmt->format.height,
+ RZG2L_CSI2_MIN_HEIGHT, RZG2L_CSI2_MAX_HEIGHT);
+ fmt->format = *sink_format;
+
+ /* propagate format to source pad */
+ *src_format = *sink_format;
+
+ return 0;
+}
+
+static int rzg2l_csi2_init_config(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_subdev_format fmt = { .pad = RZG2L_CSI2_SINK, };
+
+ fmt.format.width = RZG2L_CSI2_DEFAULT_WIDTH;
+ fmt.format.height = RZG2L_CSI2_DEFAULT_HEIGHT;
+ fmt.format.field = V4L2_FIELD_NONE;
+ fmt.format.code = RZG2L_CSI2_DEFAULT_FMT;
+ fmt.format.colorspace = V4L2_COLORSPACE_SRGB;
+ fmt.format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt.format.quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt.format.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return rzg2l_csi2_set_format(sd, sd_state, &fmt);
+}
+
+static int rzg2l_csi2_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(rzg2l_csi2_formats))
+ return -EINVAL;
+
+ code->code = rzg2l_csi2_formats[code->index].code;
+
+ return 0;
+}
+
+static int rzg2l_csi2_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index != 0)
+ return -EINVAL;
+
+ fse->min_width = RZG2L_CSI2_MIN_WIDTH;
+ fse->min_height = RZG2L_CSI2_MIN_HEIGHT;
+ fse->max_width = RZG2L_CSI2_MAX_WIDTH;
+ fse->max_height = RZG2L_CSI2_MAX_HEIGHT;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops rzg2l_csi2_video_ops = {
+ .s_stream = rzg2l_csi2_s_stream,
+ .pre_streamon = rzg2l_csi2_pre_streamon,
+ .post_streamoff = rzg2l_csi2_post_streamoff,
+};
+
+static const struct v4l2_subdev_pad_ops rzg2l_csi2_pad_ops = {
+ .enum_mbus_code = rzg2l_csi2_enum_mbus_code,
+ .init_cfg = rzg2l_csi2_init_config,
+ .enum_frame_size = rzg2l_csi2_enum_frame_size,
+ .set_fmt = rzg2l_csi2_set_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+};
+
+static const struct v4l2_subdev_ops rzg2l_csi2_subdev_ops = {
+ .video = &rzg2l_csi2_video_ops,
+ .pad = &rzg2l_csi2_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Async handling and registration of subdevices and links.
+ */
+
+static int rzg2l_csi2_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct rzg2l_csi2 *csi2 = notifier_to_csi2(notifier);
+
+ csi2->remote_source = subdev;
+
+ dev_dbg(csi2->dev, "Bound subdev: %s pad\n", subdev->name);
+
+ return media_create_pad_link(&subdev->entity, RZG2L_CSI2_SINK,
+ &csi2->subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void rzg2l_csi2_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct rzg2l_csi2 *csi2 = notifier_to_csi2(notifier);
+
+ csi2->remote_source = NULL;
+
+ dev_dbg(csi2->dev, "Unbind subdev %s\n", subdev->name);
+}
+
+static const struct v4l2_async_notifier_operations rzg2l_csi2_notify_ops = {
+ .bound = rzg2l_csi2_notify_bound,
+ .unbind = rzg2l_csi2_notify_unbind,
+};
+
+static int rzg2l_csi2_parse_v4l2(struct rzg2l_csi2 *csi2,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ /* Only port 0 endpoint 0 is valid. */
+ if (vep->base.port || vep->base.id)
+ return -ENOTCONN;
+
+ csi2->lanes = vep->bus.mipi_csi2.num_data_lanes;
+
+ return 0;
+}
+
+static int rzg2l_csi2_parse_dt(struct rzg2l_csi2 *csi2)
+{
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *fwnode;
+ struct fwnode_handle *ep;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi2->dev), 0, 0, 0);
+ if (!ep) {
+ dev_err(csi2->dev, "Not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
+ if (ret) {
+ dev_err(csi2->dev, "Could not parse v4l2 endpoint\n");
+ fwnode_handle_put(ep);
+ return -EINVAL;
+ }
+
+ ret = rzg2l_csi2_parse_v4l2(csi2, &v4l2_ep);
+ if (ret) {
+ fwnode_handle_put(ep);
+ return ret;
+ }
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ fwnode_handle_put(ep);
+
+ v4l2_async_subdev_nf_init(&csi2->notifier, &csi2->subdev);
+ csi2->notifier.ops = &rzg2l_csi2_notify_ops;
+
+ asd = v4l2_async_nf_add_fwnode(&csi2->notifier, fwnode,
+ struct v4l2_async_connection);
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+
+ ret = v4l2_async_nf_register(&csi2->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&csi2->notifier);
+
+ return ret;
+}
+
+static int rzg2l_validate_csi2_lanes(struct rzg2l_csi2 *csi2)
+{
+ int lanes;
+ int ret;
+
+ if (csi2->lanes != 1 && csi2->lanes != 2 && csi2->lanes != 4) {
+ dev_err(csi2->dev, "Unsupported number of data-lanes: %u\n",
+ csi2->lanes);
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(csi2->dev);
+ if (ret)
+ return ret;
+
+ /* Checking the maximum lanes support for CSI-2 module */
+ lanes = (rzg2l_csi2_read(csi2, CSI2nMCG) & CSI2nMCG_SDLN) >> 8;
+ if (lanes < csi2->lanes) {
+ dev_err(csi2->dev,
+ "Failed to support %d data lanes\n", csi2->lanes);
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put_sync(csi2->dev);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver.
+ */
+
+static const struct media_entity_operations rzg2l_csi2_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int rzg2l_csi2_probe(struct platform_device *pdev)
+{
+ struct rzg2l_csi2 *csi2;
+ struct clk *vclk;
+ int ret;
+
+ csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
+ if (!csi2)
+ return -ENOMEM;
+
+ csi2->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(csi2->base))
+ return PTR_ERR(csi2->base);
+
+ csi2->cmn_rstb = devm_reset_control_get_exclusive(&pdev->dev, "cmn-rstb");
+ if (IS_ERR(csi2->cmn_rstb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(csi2->cmn_rstb),
+ "Failed to get cpg cmn-rstb\n");
+
+ csi2->presetn = devm_reset_control_get_shared(&pdev->dev, "presetn");
+ if (IS_ERR(csi2->presetn))
+ return dev_err_probe(&pdev->dev, PTR_ERR(csi2->presetn),
+ "Failed to get cpg presetn\n");
+
+ csi2->sysclk = devm_clk_get(&pdev->dev, "system");
+ if (IS_ERR(csi2->sysclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(csi2->sysclk),
+ "Failed to get system clk\n");
+
+ vclk = clk_get(&pdev->dev, "video");
+ if (IS_ERR(vclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vclk),
+ "Failed to get video clock\n");
+ csi2->vclk_rate = clk_get_rate(vclk);
+ clk_put(vclk);
+
+ csi2->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, csi2);
+
+ ret = rzg2l_csi2_parse_dt(csi2);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = rzg2l_validate_csi2_lanes(csi2);
+ if (ret)
+ goto error_pm;
+
+ csi2->subdev.dev = &pdev->dev;
+ v4l2_subdev_init(&csi2->subdev, &rzg2l_csi2_subdev_ops);
+ v4l2_set_subdevdata(&csi2->subdev, &pdev->dev);
+ snprintf(csi2->subdev.name, sizeof(csi2->subdev.name),
+ "csi-%s", dev_name(&pdev->dev));
+ csi2->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ csi2->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi2->subdev.entity.ops = &rzg2l_csi2_entity_ops;
+
+ csi2->pads[RZG2L_CSI2_SINK].flags = MEDIA_PAD_FL_SINK;
+ /*
+ * TODO: RZ/G2L CSI2 supports 4 virtual channels, as virtual
+ * channels should be implemented by streams API which is under
+ * development lets hardcode to VC0 for now.
+ */
+ csi2->pads[RZG2L_CSI2_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&csi2->subdev.entity, 2, csi2->pads);
+ if (ret)
+ goto error_pm;
+
+ ret = v4l2_subdev_init_finalize(&csi2->subdev);
+ if (ret < 0)
+ goto error_async;
+
+ ret = v4l2_async_register_subdev(&csi2->subdev);
+ if (ret < 0)
+ goto error_subdev;
+
+ return 0;
+
+error_subdev:
+ v4l2_subdev_cleanup(&csi2->subdev);
+error_async:
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
+ media_entity_cleanup(&csi2->subdev.entity);
+error_pm:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static void rzg2l_csi2_remove(struct platform_device *pdev)
+{
+ struct rzg2l_csi2 *csi2 = platform_get_drvdata(pdev);
+
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
+ v4l2_async_unregister_subdev(&csi2->subdev);
+ v4l2_subdev_cleanup(&csi2->subdev);
+ media_entity_cleanup(&csi2->subdev.entity);
+ pm_runtime_disable(&pdev->dev);
+}
+
+static int __maybe_unused rzg2l_csi2_pm_runtime_suspend(struct device *dev)
+{
+ struct rzg2l_csi2 *csi2 = dev_get_drvdata(dev);
+
+ reset_control_assert(csi2->presetn);
+
+ return 0;
+}
+
+static int __maybe_unused rzg2l_csi2_pm_runtime_resume(struct device *dev)
+{
+ struct rzg2l_csi2 *csi2 = dev_get_drvdata(dev);
+
+ return reset_control_deassert(csi2->presetn);
+}
+
+static const struct dev_pm_ops rzg2l_csi2_pm_ops = {
+ SET_RUNTIME_PM_OPS(rzg2l_csi2_pm_runtime_suspend, rzg2l_csi2_pm_runtime_resume, NULL)
+};
+
+static const struct of_device_id rzg2l_csi2_of_table[] = {
+ { .compatible = "renesas,rzg2l-csi2", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzg2l_csi2_pdrv = {
+ .remove_new = rzg2l_csi2_remove,
+ .probe = rzg2l_csi2_probe,
+ .driver = {
+ .name = "rzg2l-csi2",
+ .of_match_table = rzg2l_csi2_of_table,
+ .pm = &rzg2l_csi2_pm_ops,
+ },
+};
+
+module_platform_driver(rzg2l_csi2_pdrv);
+
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/G2L MIPI CSI2 receiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
new file mode 100644
index 0000000000..4dcd2faff5
--- /dev/null
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Renesas RZ/G2L CRU
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+
+#include "rzg2l-cru.h"
+
+struct rzg2l_cru_ip_format {
+ u32 code;
+ unsigned int datatype;
+ unsigned int bpp;
+};
+
+static const struct rzg2l_cru_ip_format rzg2l_cru_ip_formats[] = {
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .bpp = 16 },
+};
+
+enum rzg2l_csi2_pads {
+ RZG2L_CRU_IP_SINK = 0,
+ RZG2L_CRU_IP_SOURCE,
+};
+
+static const struct rzg2l_cru_ip_format *rzg2l_cru_ip_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rzg2l_cru_ip_formats); i++)
+ if (rzg2l_cru_ip_formats[i].code == code)
+ return &rzg2l_cru_ip_formats[i];
+
+ return NULL;
+}
+
+struct v4l2_mbus_framefmt *rzg2l_cru_ip_get_src_fmt(struct rzg2l_cru_dev *cru)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *fmt;
+
+ state = v4l2_subdev_lock_and_get_active_state(&cru->ip.subdev);
+ fmt = v4l2_subdev_get_pad_format(&cru->ip.subdev, state, 1);
+ v4l2_subdev_unlock_state(state);
+
+ return fmt;
+}
+
+static int rzg2l_cru_ip_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rzg2l_cru_dev *cru;
+ int s_stream_ret = 0;
+ int ret;
+
+ cru = v4l2_get_subdevdata(sd);
+
+ if (!enable) {
+ ret = v4l2_subdev_call(cru->ip.remote, video, s_stream, enable);
+ if (ret)
+ s_stream_ret = ret;
+
+ ret = v4l2_subdev_call(cru->ip.remote, video, post_streamoff);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ if (ret && !s_stream_ret)
+ s_stream_ret = ret;
+ rzg2l_cru_stop_image_processing(cru);
+ } else {
+ ret = v4l2_subdev_call(cru->ip.remote, video, pre_streamon, 0);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ if (ret)
+ return ret;
+
+ ret = rzg2l_cru_start_image_processing(cru);
+ if (ret) {
+ v4l2_subdev_call(cru->ip.remote, video, post_streamoff);
+ return ret;
+ }
+
+ rzg2l_cru_vclk_unprepare(cru);
+
+ ret = v4l2_subdev_call(cru->ip.remote, video, s_stream, enable);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ if (!ret) {
+ ret = rzg2l_cru_vclk_prepare(cru);
+ if (!ret)
+ return 0;
+ } else {
+ /* enable back vclk so that s_stream in error path disables it */
+ if (rzg2l_cru_vclk_prepare(cru))
+ dev_err(cru->dev, "Failed to enable vclk\n");
+ }
+
+ s_stream_ret = ret;
+
+ v4l2_subdev_call(cru->ip.remote, video, post_streamoff);
+ rzg2l_cru_stop_image_processing(cru);
+ }
+
+ return s_stream_ret;
+}
+
+static int rzg2l_cru_ip_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *src_format;
+ struct v4l2_mbus_framefmt *sink_format;
+
+ src_format = v4l2_subdev_get_pad_format(sd, state, RZG2L_CRU_IP_SOURCE);
+ if (fmt->pad == RZG2L_CRU_IP_SOURCE) {
+ fmt->format = *src_format;
+ return 0;
+ }
+
+ sink_format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+
+ if (!rzg2l_cru_ip_code_to_fmt(fmt->format.code))
+ sink_format->code = rzg2l_cru_ip_formats[0].code;
+ else
+ sink_format->code = fmt->format.code;
+
+ sink_format->field = V4L2_FIELD_NONE;
+ sink_format->colorspace = fmt->format.colorspace;
+ sink_format->xfer_func = fmt->format.xfer_func;
+ sink_format->ycbcr_enc = fmt->format.ycbcr_enc;
+ sink_format->quantization = fmt->format.quantization;
+ sink_format->width = clamp_t(u32, fmt->format.width,
+ RZG2L_CRU_MIN_INPUT_WIDTH, RZG2L_CRU_MAX_INPUT_WIDTH);
+ sink_format->height = clamp_t(u32, fmt->format.height,
+ RZG2L_CRU_MIN_INPUT_HEIGHT, RZG2L_CRU_MAX_INPUT_HEIGHT);
+
+ fmt->format = *sink_format;
+
+ /* propagate format to source pad */
+ *src_format = *sink_format;
+
+ return 0;
+}
+
+static int rzg2l_cru_ip_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(rzg2l_cru_ip_formats))
+ return -EINVAL;
+
+ code->code = rzg2l_cru_ip_formats[code->index].code;
+ return 0;
+}
+
+static int rzg2l_cru_ip_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index != 0)
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_UYVY8_1X16)
+ return -EINVAL;
+
+ fse->min_width = RZG2L_CRU_MIN_INPUT_WIDTH;
+ fse->min_height = RZG2L_CRU_MIN_INPUT_HEIGHT;
+ fse->max_width = RZG2L_CRU_MAX_INPUT_WIDTH;
+ fse->max_height = RZG2L_CRU_MAX_INPUT_HEIGHT;
+
+ return 0;
+}
+
+static int rzg2l_cru_ip_init_config(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_subdev_format fmt = { .pad = RZG2L_CRU_IP_SINK, };
+
+ fmt.format.width = RZG2L_CRU_MIN_INPUT_WIDTH;
+ fmt.format.height = RZG2L_CRU_MIN_INPUT_HEIGHT;
+ fmt.format.field = V4L2_FIELD_NONE;
+ fmt.format.code = MEDIA_BUS_FMT_UYVY8_1X16;
+ fmt.format.colorspace = V4L2_COLORSPACE_SRGB;
+ fmt.format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt.format.quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt.format.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return rzg2l_cru_ip_set_format(sd, sd_state, &fmt);
+}
+
+static const struct v4l2_subdev_video_ops rzg2l_cru_ip_video_ops = {
+ .s_stream = rzg2l_cru_ip_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops rzg2l_cru_ip_pad_ops = {
+ .enum_mbus_code = rzg2l_cru_ip_enum_mbus_code,
+ .enum_frame_size = rzg2l_cru_ip_enum_frame_size,
+ .init_cfg = rzg2l_cru_ip_init_config,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = rzg2l_cru_ip_set_format,
+};
+
+static const struct v4l2_subdev_ops rzg2l_cru_ip_subdev_ops = {
+ .video = &rzg2l_cru_ip_video_ops,
+ .pad = &rzg2l_cru_ip_pad_ops,
+};
+
+static const struct media_entity_operations rzg2l_cru_ip_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+int rzg2l_cru_ip_subdev_register(struct rzg2l_cru_dev *cru)
+{
+ struct rzg2l_cru_ip *ip = &cru->ip;
+ int ret;
+
+ ip->subdev.dev = cru->dev;
+ v4l2_subdev_init(&ip->subdev, &rzg2l_cru_ip_subdev_ops);
+ v4l2_set_subdevdata(&ip->subdev, cru);
+ snprintf(ip->subdev.name, sizeof(ip->subdev.name),
+ "cru-ip-%s", dev_name(cru->dev));
+ ip->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ ip->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ ip->subdev.entity.ops = &rzg2l_cru_ip_entity_ops;
+
+ ip->pads[0].flags = MEDIA_PAD_FL_SINK;
+ ip->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&ip->subdev.entity, 2, ip->pads);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_init_finalize(&ip->subdev);
+ if (ret < 0)
+ goto entity_cleanup;
+
+ ret = v4l2_device_register_subdev(&cru->v4l2_dev, &ip->subdev);
+ if (ret < 0)
+ goto error_subdev;
+
+ return 0;
+error_subdev:
+ v4l2_subdev_cleanup(&ip->subdev);
+entity_cleanup:
+ media_entity_cleanup(&ip->subdev.entity);
+
+ return ret;
+}
+
+void rzg2l_cru_ip_subdev_unregister(struct rzg2l_cru_dev *cru)
+{
+ struct rzg2l_cru_ip *ip = &cru->ip;
+
+ media_entity_cleanup(&ip->subdev.entity);
+ v4l2_subdev_cleanup(&ip->subdev);
+ v4l2_device_unregister_subdev(&ip->subdev);
+}
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
new file mode 100644
index 0000000000..e6eedd65b7
--- /dev/null
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -0,0 +1,1058 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Renesas RZ/G2L CRU
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ * Based on Renesas R-Car VIN
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "rzg2l-cru.h"
+
+/* HW CRU Registers Definition */
+
+/* CRU Control Register */
+#define CRUnCTRL 0x0
+#define CRUnCTRL_VINSEL(x) ((x) << 0)
+
+/* CRU Interrupt Enable Register */
+#define CRUnIE 0x4
+#define CRUnIE_EFE BIT(17)
+
+/* CRU Interrupt Status Register */
+#define CRUnINTS 0x8
+#define CRUnINTS_SFS BIT(16)
+
+/* CRU Reset Register */
+#define CRUnRST 0xc
+#define CRUnRST_VRESETN BIT(0)
+
+/* Memory Bank Base Address (Lower) Register for CRU Image Data */
+#define AMnMBxADDRL(x) (0x100 + ((x) * 8))
+
+/* Memory Bank Base Address (Higher) Register for CRU Image Data */
+#define AMnMBxADDRH(x) (0x104 + ((x) * 8))
+
+/* Memory Bank Enable Register for CRU Image Data */
+#define AMnMBVALID 0x148
+#define AMnMBVALID_MBVALID(x) GENMASK(x, 0)
+
+/* Memory Bank Status Register for CRU Image Data */
+#define AMnMBS 0x14c
+#define AMnMBS_MBSTS 0x7
+
+/* AXI Master FIFO Pointer Register for CRU Image Data */
+#define AMnFIFOPNTR 0x168
+#define AMnFIFOPNTR_FIFOWPNTR GENMASK(7, 0)
+#define AMnFIFOPNTR_FIFORPNTR_Y GENMASK(23, 16)
+
+/* AXI Master Transfer Stop Register for CRU Image Data */
+#define AMnAXISTP 0x174
+#define AMnAXISTP_AXI_STOP BIT(0)
+
+/* AXI Master Transfer Stop Status Register for CRU Image Data */
+#define AMnAXISTPACK 0x178
+#define AMnAXISTPACK_AXI_STOP_ACK BIT(0)
+
+/* CRU Image Processing Enable Register */
+#define ICnEN 0x200
+#define ICnEN_ICEN BIT(0)
+
+/* CRU Image Processing Main Control Register */
+#define ICnMC 0x208
+#define ICnMC_CSCTHR BIT(5)
+#define ICnMC_INF_YUV8_422 (0x1e << 16)
+#define ICnMC_INF_USER (0x30 << 16)
+#define ICnMC_VCSEL(x) ((x) << 22)
+#define ICnMC_INF_MASK GENMASK(21, 16)
+
+/* CRU Module Status Register */
+#define ICnMS 0x254
+#define ICnMS_IA BIT(2)
+
+/* CRU Data Output Mode Register */
+#define ICnDMR 0x26c
+#define ICnDMR_YCMODE_UYVY (1 << 4)
+
+#define RZG2L_TIMEOUT_MS 100
+#define RZG2L_RETRIES 10
+
+#define RZG2L_CRU_DEFAULT_FORMAT V4L2_PIX_FMT_UYVY
+#define RZG2L_CRU_DEFAULT_WIDTH RZG2L_CRU_MIN_INPUT_WIDTH
+#define RZG2L_CRU_DEFAULT_HEIGHT RZG2L_CRU_MIN_INPUT_HEIGHT
+#define RZG2L_CRU_DEFAULT_FIELD V4L2_FIELD_NONE
+#define RZG2L_CRU_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB
+
+struct rzg2l_cru_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+#define to_buf_list(vb2_buffer) \
+ (&container_of(vb2_buffer, struct rzg2l_cru_buffer, vb)->list)
+
+/* -----------------------------------------------------------------------------
+ * DMA operations
+ */
+static void rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value)
+{
+ iowrite32(value, cru->base + offset);
+}
+
+static u32 rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset)
+{
+ return ioread32(cru->base + offset);
+}
+
+/* Need to hold qlock before calling */
+static void return_unused_buffers(struct rzg2l_cru_dev *cru,
+ enum vb2_buffer_state state)
+{
+ struct rzg2l_cru_buffer *buf, *node;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&cru->qlock, flags);
+ for (i = 0; i < cru->num_buf; i++) {
+ if (cru->queue_buf[i]) {
+ vb2_buffer_done(&cru->queue_buf[i]->vb2_buf,
+ state);
+ cru->queue_buf[i] = NULL;
+ }
+ }
+
+ list_for_each_entry_safe(buf, node, &cru->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+ spin_unlock_irqrestore(&cru->qlock, flags);
+}
+
+static int rzg2l_cru_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
+
+ /* Make sure the image size is large enough. */
+ if (*nplanes)
+ return sizes[0] < cru->format.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = cru->format.sizeimage;
+
+ return 0;
+};
+
+static int rzg2l_cru_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = cru->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(cru->dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void rzg2l_cru_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cru->qlock, flags);
+
+ list_add_tail(to_buf_list(vbuf), &cru->buf_list);
+
+ spin_unlock_irqrestore(&cru->qlock, flags);
+}
+
+static int rzg2l_cru_mc_validate_format(struct rzg2l_cru_dev *cru,
+ struct v4l2_subdev *sd,
+ struct media_pad *pad)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmt.pad = pad->index;
+ if (v4l2_subdev_call_state_active(sd, pad, get_fmt, &fmt))
+ return -EPIPE;
+
+ switch (fmt.format.code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ break;
+ default:
+ return -EPIPE;
+ }
+
+ switch (fmt.format.field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_SEQ_TB:
+ case V4L2_FIELD_SEQ_BT:
+ break;
+ default:
+ return -EPIPE;
+ }
+
+ if (fmt.format.width != cru->format.width ||
+ fmt.format.height != cru->format.height)
+ return -EPIPE;
+
+ return 0;
+}
+
+static void rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev *cru,
+ int slot, dma_addr_t addr)
+{
+ /*
+ * The address needs to be 512 bytes aligned. Driver should never accept
+ * settings that do not satisfy this in the first place...
+ */
+ if (WARN_ON((addr) & RZG2L_CRU_HW_BUFFER_MASK))
+ return;
+
+ /* Currently, we just use the buffer in 32 bits address */
+ rzg2l_cru_write(cru, AMnMBxADDRL(slot), addr);
+ rzg2l_cru_write(cru, AMnMBxADDRH(slot), 0);
+}
+
+/*
+ * Moves a buffer from the queue to the HW slot. If no buffer is
+ * available use the scratch buffer. The scratch buffer is never
+ * returned to userspace, its only function is to enable the capture
+ * loop to keep running.
+ */
+static void rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev *cru, int slot)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct rzg2l_cru_buffer *buf;
+ dma_addr_t phys_addr;
+
+ /* A already populated slot shall never be overwritten. */
+ if (WARN_ON(cru->queue_buf[slot]))
+ return;
+
+ dev_dbg(cru->dev, "Filling HW slot: %d\n", slot);
+
+ if (list_empty(&cru->buf_list)) {
+ cru->queue_buf[slot] = NULL;
+ phys_addr = cru->scratch_phys;
+ } else {
+ /* Keep track of buffer we give to HW */
+ buf = list_entry(cru->buf_list.next,
+ struct rzg2l_cru_buffer, list);
+ vbuf = &buf->vb;
+ list_del_init(to_buf_list(vbuf));
+ cru->queue_buf[slot] = vbuf;
+
+ /* Setup DMA */
+ phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ }
+
+ rzg2l_cru_set_slot_addr(cru, slot, phys_addr);
+}
+
+static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
+{
+ unsigned int slot;
+
+ /*
+ * Set image data memory banks.
+ * Currently, we will use maximum address.
+ */
+ rzg2l_cru_write(cru, AMnMBVALID, AMnMBVALID_MBVALID(cru->num_buf - 1));
+
+ for (slot = 0; slot < cru->num_buf; slot++)
+ rzg2l_cru_fill_hw_slot(cru, slot);
+}
+
+static void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru, bool *input_is_yuv,
+ struct v4l2_mbus_framefmt *ip_sd_fmt)
+{
+ u32 icnmc;
+
+ switch (ip_sd_fmt->code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ icnmc = ICnMC_INF_YUV8_422;
+ *input_is_yuv = true;
+ break;
+ default:
+ *input_is_yuv = false;
+ icnmc = ICnMC_INF_USER;
+ break;
+ }
+
+ icnmc |= (rzg2l_cru_read(cru, ICnMC) & ~ICnMC_INF_MASK);
+
+ /* Set virtual channel CSI2 */
+ icnmc |= ICnMC_VCSEL(cru->csi.channel);
+
+ rzg2l_cru_write(cru, ICnMC, icnmc);
+}
+
+static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
+ struct v4l2_mbus_framefmt *ip_sd_fmt)
+{
+ bool output_is_yuv = false;
+ bool input_is_yuv = false;
+ u32 icndmr;
+
+ rzg2l_cru_csi2_setup(cru, &input_is_yuv, ip_sd_fmt);
+
+ /* Output format */
+ switch (cru->format.pixelformat) {
+ case V4L2_PIX_FMT_UYVY:
+ icndmr = ICnDMR_YCMODE_UYVY;
+ output_is_yuv = true;
+ break;
+ default:
+ dev_err(cru->dev, "Invalid pixelformat (0x%x)\n",
+ cru->format.pixelformat);
+ return -EINVAL;
+ }
+
+ /* If input and output use same colorspace, do bypass mode */
+ if (output_is_yuv == input_is_yuv)
+ rzg2l_cru_write(cru, ICnMC,
+ rzg2l_cru_read(cru, ICnMC) | ICnMC_CSCTHR);
+ else
+ rzg2l_cru_write(cru, ICnMC,
+ rzg2l_cru_read(cru, ICnMC) & (~ICnMC_CSCTHR));
+
+ /* Set output data format */
+ rzg2l_cru_write(cru, ICnDMR, icndmr);
+
+ return 0;
+}
+
+void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru)
+{
+ u32 amnfifopntr, amnfifopntr_w, amnfifopntr_r_y;
+ unsigned int retries = 0;
+ unsigned long flags;
+ u32 icnms;
+
+ spin_lock_irqsave(&cru->qlock, flags);
+
+ /* Disable and clear the interrupt */
+ rzg2l_cru_write(cru, CRUnIE, 0);
+ rzg2l_cru_write(cru, CRUnINTS, 0x001F0F0F);
+
+ /* Stop the operation of image conversion */
+ rzg2l_cru_write(cru, ICnEN, 0);
+
+ /* Wait for streaming to stop */
+ while ((rzg2l_cru_read(cru, ICnMS) & ICnMS_IA) && retries++ < RZG2L_RETRIES) {
+ spin_unlock_irqrestore(&cru->qlock, flags);
+ msleep(RZG2L_TIMEOUT_MS);
+ spin_lock_irqsave(&cru->qlock, flags);
+ }
+
+ icnms = rzg2l_cru_read(cru, ICnMS) & ICnMS_IA;
+ if (icnms)
+ dev_err(cru->dev, "Failed stop HW, something is seriously broken\n");
+
+ cru->state = RZG2L_CRU_DMA_STOPPED;
+
+ /* Wait until the FIFO becomes empty */
+ for (retries = 5; retries > 0; retries--) {
+ amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR);
+
+ amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR;
+ amnfifopntr_r_y =
+ (amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16;
+ if (amnfifopntr_w == amnfifopntr_r_y)
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ /* Notify that FIFO is not empty here */
+ if (!retries)
+ dev_err(cru->dev, "Failed to empty FIFO\n");
+
+ /* Stop AXI bus */
+ rzg2l_cru_write(cru, AMnAXISTP, AMnAXISTP_AXI_STOP);
+
+ /* Wait until the AXI bus stop */
+ for (retries = 5; retries > 0; retries--) {
+ if (rzg2l_cru_read(cru, AMnAXISTPACK) &
+ AMnAXISTPACK_AXI_STOP_ACK)
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ /* Notify that AXI bus can not stop here */
+ if (!retries)
+ dev_err(cru->dev, "Failed to stop AXI bus\n");
+
+ /* Cancel the AXI bus stop request */
+ rzg2l_cru_write(cru, AMnAXISTP, 0);
+
+ /* Reset the CRU (AXI-master) */
+ reset_control_assert(cru->aresetn);
+
+ /* Resets the image processing module */
+ rzg2l_cru_write(cru, CRUnRST, 0);
+
+ spin_unlock_irqrestore(&cru->qlock, flags);
+}
+
+int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru)
+{
+ struct v4l2_mbus_framefmt *fmt = rzg2l_cru_ip_get_src_fmt(cru);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cru->qlock, flags);
+
+ /* Initialize image convert */
+ ret = rzg2l_cru_initialize_image_conv(cru, fmt);
+ if (ret) {
+ spin_unlock_irqrestore(&cru->qlock, flags);
+ return ret;
+ }
+
+ /* Select a video input */
+ rzg2l_cru_write(cru, CRUnCTRL, CRUnCTRL_VINSEL(0));
+
+ /* Cancel the software reset for image processing block */
+ rzg2l_cru_write(cru, CRUnRST, CRUnRST_VRESETN);
+
+ /* Disable and clear the interrupt before using */
+ rzg2l_cru_write(cru, CRUnIE, 0);
+ rzg2l_cru_write(cru, CRUnINTS, 0x001f000f);
+
+ /* Initialize the AXI master */
+ rzg2l_cru_initialize_axi(cru);
+
+ /* Enable interrupt */
+ rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE);
+
+ /* Enable image processing reception */
+ rzg2l_cru_write(cru, ICnEN, ICnEN_ICEN);
+
+ spin_unlock_irqrestore(&cru->qlock, flags);
+
+ return 0;
+}
+
+void rzg2l_cru_vclk_unprepare(struct rzg2l_cru_dev *cru)
+{
+ clk_disable_unprepare(cru->vclk);
+}
+
+int rzg2l_cru_vclk_prepare(struct rzg2l_cru_dev *cru)
+{
+ return clk_prepare_enable(cru->vclk);
+}
+
+static int rzg2l_cru_set_stream(struct rzg2l_cru_dev *cru, int on)
+{
+ struct media_pipeline *pipe;
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+ int ret;
+
+ pad = media_pad_remote_pad_first(&cru->pad);
+ if (!pad)
+ return -EPIPE;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (!on) {
+ int stream_off_ret = 0;
+
+ ret = v4l2_subdev_call(sd, video, s_stream, 0);
+ if (ret)
+ stream_off_ret = ret;
+
+ ret = v4l2_subdev_call(sd, video, post_streamoff);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ if (ret && !stream_off_ret)
+ stream_off_ret = ret;
+
+ video_device_pipeline_stop(&cru->vdev);
+
+ pm_runtime_put_sync(cru->dev);
+ clk_disable_unprepare(cru->vclk);
+
+ return stream_off_ret;
+ }
+
+ ret = pm_runtime_resume_and_get(cru->dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(cru->vclk);
+ if (ret)
+ goto err_pm_put;
+
+ ret = rzg2l_cru_mc_validate_format(cru, sd, pad);
+ if (ret)
+ goto err_vclk_disable;
+
+ pipe = media_entity_pipeline(&sd->entity) ? : &cru->vdev.pipe;
+ ret = video_device_pipeline_start(&cru->vdev, pipe);
+ if (ret)
+ goto err_vclk_disable;
+
+ ret = v4l2_subdev_call(sd, video, pre_streamon, 0);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ if (ret)
+ goto pipe_line_stop;
+
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ if (ret)
+ goto err_s_stream;
+
+ return 0;
+
+err_s_stream:
+ v4l2_subdev_call(sd, video, post_streamoff);
+
+pipe_line_stop:
+ video_device_pipeline_stop(&cru->vdev);
+
+err_vclk_disable:
+ clk_disable_unprepare(cru->vclk);
+
+err_pm_put:
+ pm_runtime_put_sync(cru->dev);
+
+ return ret;
+}
+
+static void rzg2l_cru_stop_streaming(struct rzg2l_cru_dev *cru)
+{
+ cru->state = RZG2L_CRU_DMA_STOPPING;
+
+ rzg2l_cru_set_stream(cru, 0);
+}
+
+static irqreturn_t rzg2l_cru_irq(int irq, void *data)
+{
+ struct rzg2l_cru_dev *cru = data;
+ unsigned int handled = 0;
+ unsigned long flags;
+ u32 irq_status;
+ u32 amnmbs;
+ int slot;
+
+ spin_lock_irqsave(&cru->qlock, flags);
+
+ irq_status = rzg2l_cru_read(cru, CRUnINTS);
+ if (!irq_status)
+ goto done;
+
+ handled = 1;
+
+ rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS));
+
+ /* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */
+ if (cru->state == RZG2L_CRU_DMA_STOPPED) {
+ dev_dbg(cru->dev, "IRQ while state stopped\n");
+ goto done;
+ }
+
+ /* Increase stop retries if capture status is 'RZG2L_CRU_DMA_STOPPING' */
+ if (cru->state == RZG2L_CRU_DMA_STOPPING) {
+ if (irq_status & CRUnINTS_SFS)
+ dev_dbg(cru->dev, "IRQ while state stopping\n");
+ goto done;
+ }
+
+ /* Prepare for capture and update state */
+ amnmbs = rzg2l_cru_read(cru, AMnMBS);
+ slot = amnmbs & AMnMBS_MBSTS;
+
+ /*
+ * AMnMBS.MBSTS indicates the destination of Memory Bank (MB).
+ * Recalculate to get the current transfer complete MB.
+ */
+ if (slot == 0)
+ slot = cru->num_buf - 1;
+ else
+ slot--;
+
+ /*
+ * To hand buffers back in a known order to userspace start
+ * to capture first from slot 0.
+ */
+ if (cru->state == RZG2L_CRU_DMA_STARTING) {
+ if (slot != 0) {
+ dev_dbg(cru->dev, "Starting sync slot: %d\n", slot);
+ goto done;
+ }
+
+ dev_dbg(cru->dev, "Capture start synced!\n");
+ cru->state = RZG2L_CRU_DMA_RUNNING;
+ }
+
+ /* Capture frame */
+ if (cru->queue_buf[slot]) {
+ cru->queue_buf[slot]->field = cru->format.field;
+ cru->queue_buf[slot]->sequence = cru->sequence;
+ cru->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&cru->queue_buf[slot]->vb2_buf,
+ VB2_BUF_STATE_DONE);
+ cru->queue_buf[slot] = NULL;
+ } else {
+ /* Scratch buffer was used, dropping frame. */
+ dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence);
+ }
+
+ cru->sequence++;
+
+ /* Prepare for next frame */
+ rzg2l_cru_fill_hw_slot(cru, slot);
+
+done:
+ spin_unlock_irqrestore(&cru->qlock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count)
+{
+ struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
+ int ret;
+
+ /* Release reset state */
+ ret = reset_control_deassert(cru->aresetn);
+ if (ret) {
+ dev_err(cru->dev, "failed to deassert aresetn\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(cru->presetn);
+ if (ret) {
+ reset_control_assert(cru->aresetn);
+ dev_err(cru->dev, "failed to deassert presetn\n");
+ return ret;
+ }
+
+ ret = request_irq(cru->image_conv_irq, rzg2l_cru_irq,
+ IRQF_SHARED, KBUILD_MODNAME, cru);
+ if (ret) {
+ dev_err(cru->dev, "failed to request irq\n");
+ goto assert_resets;
+ }
+
+ /* Allocate scratch buffer. */
+ cru->scratch = dma_alloc_coherent(cru->dev, cru->format.sizeimage,
+ &cru->scratch_phys, GFP_KERNEL);
+ if (!cru->scratch) {
+ return_unused_buffers(cru, VB2_BUF_STATE_QUEUED);
+ dev_err(cru->dev, "Failed to allocate scratch buffer\n");
+ ret = -ENOMEM;
+ goto free_image_conv_irq;
+ }
+
+ cru->sequence = 0;
+
+ ret = rzg2l_cru_set_stream(cru, 1);
+ if (ret) {
+ return_unused_buffers(cru, VB2_BUF_STATE_QUEUED);
+ goto out;
+ }
+
+ cru->state = RZG2L_CRU_DMA_STARTING;
+ dev_dbg(cru->dev, "Starting to capture\n");
+ return 0;
+
+out:
+ if (ret)
+ dma_free_coherent(cru->dev, cru->format.sizeimage, cru->scratch,
+ cru->scratch_phys);
+free_image_conv_irq:
+ free_irq(cru->image_conv_irq, cru);
+
+assert_resets:
+ reset_control_assert(cru->presetn);
+ reset_control_assert(cru->aresetn);
+
+ return ret;
+}
+
+static void rzg2l_cru_stop_streaming_vq(struct vb2_queue *vq)
+{
+ struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq);
+
+ rzg2l_cru_stop_streaming(cru);
+
+ /* Free scratch buffer */
+ dma_free_coherent(cru->dev, cru->format.sizeimage,
+ cru->scratch, cru->scratch_phys);
+
+ free_irq(cru->image_conv_irq, cru);
+ reset_control_assert(cru->presetn);
+
+ return_unused_buffers(cru, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops rzg2l_cru_qops = {
+ .queue_setup = rzg2l_cru_queue_setup,
+ .buf_prepare = rzg2l_cru_buffer_prepare,
+ .buf_queue = rzg2l_cru_buffer_queue,
+ .start_streaming = rzg2l_cru_start_streaming_vq,
+ .stop_streaming = rzg2l_cru_stop_streaming_vq,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+void rzg2l_cru_dma_unregister(struct rzg2l_cru_dev *cru)
+{
+ mutex_destroy(&cru->lock);
+
+ v4l2_device_unregister(&cru->v4l2_dev);
+ vb2_queue_release(&cru->queue);
+}
+
+int rzg2l_cru_dma_register(struct rzg2l_cru_dev *cru)
+{
+ struct vb2_queue *q = &cru->queue;
+ unsigned int i;
+ int ret;
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(cru->dev, &cru->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&cru->lock);
+ INIT_LIST_HEAD(&cru->buf_list);
+
+ spin_lock_init(&cru->qlock);
+
+ cru->state = RZG2L_CRU_DMA_STOPPED;
+
+ for (i = 0; i < RZG2L_CRU_HW_BUFFER_MAX; i++)
+ cru->queue_buf[i] = NULL;
+
+ /* buffer queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->lock = &cru->lock;
+ q->drv_priv = cru;
+ q->buf_struct_size = sizeof(struct rzg2l_cru_buffer);
+ q->ops = &rzg2l_cru_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 4;
+ q->dev = cru->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(cru->dev, "failed to initialize VB2 queue\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ mutex_destroy(&cru->lock);
+ v4l2_device_unregister(&cru->v4l2_dev);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 stuff
+ */
+
+static const struct v4l2_format_info rzg2l_cru_formats[] = {
+ {
+ .format = V4L2_PIX_FMT_UYVY,
+ .bpp[0] = 2,
+ },
+};
+
+const struct v4l2_format_info *rzg2l_cru_format_from_pixel(u32 format)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rzg2l_cru_formats); i++)
+ if (rzg2l_cru_formats[i].format == format)
+ return rzg2l_cru_formats + i;
+
+ return NULL;
+}
+
+static u32 rzg2l_cru_format_bytesperline(struct v4l2_pix_format *pix)
+{
+ const struct v4l2_format_info *fmt;
+
+ fmt = rzg2l_cru_format_from_pixel(pix->pixelformat);
+
+ if (WARN_ON(!fmt))
+ return -EINVAL;
+
+ return pix->width * fmt->bpp[0];
+}
+
+static u32 rzg2l_cru_format_sizeimage(struct v4l2_pix_format *pix)
+{
+ return pix->bytesperline * pix->height;
+}
+
+static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru,
+ struct v4l2_pix_format *pix)
+{
+ if (!rzg2l_cru_format_from_pixel(pix->pixelformat))
+ pix->pixelformat = RZG2L_CRU_DEFAULT_FORMAT;
+
+ switch (pix->field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ break;
+ default:
+ pix->field = RZG2L_CRU_DEFAULT_FIELD;
+ break;
+ }
+
+ /* Limit to CRU capabilities */
+ v4l_bound_align_image(&pix->width, 320, RZG2L_CRU_MAX_INPUT_WIDTH, 1,
+ &pix->height, 240, RZG2L_CRU_MAX_INPUT_HEIGHT, 2, 0);
+
+ pix->bytesperline = rzg2l_cru_format_bytesperline(pix);
+ pix->sizeimage = rzg2l_cru_format_sizeimage(pix);
+
+ dev_dbg(cru->dev, "Format %ux%u bpl: %u size: %u\n",
+ pix->width, pix->height, pix->bytesperline, pix->sizeimage);
+}
+
+static void rzg2l_cru_try_format(struct rzg2l_cru_dev *cru,
+ struct v4l2_pix_format *pix)
+{
+ /*
+ * The V4L2 specification clearly documents the colorspace fields
+ * as being set by drivers for capture devices. Using the values
+ * supplied by userspace thus wouldn't comply with the API. Until
+ * the API is updated force fixed values.
+ */
+ pix->colorspace = RZG2L_CRU_DEFAULT_COLORSPACE;
+ pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
+ pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
+ pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace,
+ pix->ycbcr_enc);
+
+ rzg2l_cru_format_align(cru, pix);
+}
+
+static int rzg2l_cru_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strscpy(cap->card, "RZG2L_CRU", sizeof(cap->card));
+
+ return 0;
+}
+
+static int rzg2l_cru_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rzg2l_cru_dev *cru = video_drvdata(file);
+
+ rzg2l_cru_try_format(cru, &f->fmt.pix);
+
+ return 0;
+}
+
+static int rzg2l_cru_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rzg2l_cru_dev *cru = video_drvdata(file);
+
+ if (vb2_is_busy(&cru->queue))
+ return -EBUSY;
+
+ rzg2l_cru_try_format(cru, &f->fmt.pix);
+
+ cru->format = f->fmt.pix;
+
+ return 0;
+}
+
+static int rzg2l_cru_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rzg2l_cru_dev *cru = video_drvdata(file);
+
+ f->fmt.pix = cru->format;
+
+ return 0;
+}
+
+static int rzg2l_cru_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(rzg2l_cru_formats))
+ return -EINVAL;
+
+ f->pixelformat = rzg2l_cru_formats[f->index].format;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops rzg2l_cru_ioctl_ops = {
+ .vidioc_querycap = rzg2l_cru_querycap,
+ .vidioc_try_fmt_vid_cap = rzg2l_cru_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = rzg2l_cru_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = rzg2l_cru_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = rzg2l_cru_enum_fmt_vid_cap,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media controller file operations
+ */
+
+static int rzg2l_cru_open(struct file *file)
+{
+ struct rzg2l_cru_dev *cru = video_drvdata(file);
+ int ret;
+
+ ret = mutex_lock_interruptible(&cru->lock);
+ if (ret)
+ return ret;
+
+ file->private_data = cru;
+ ret = v4l2_fh_open(file);
+ if (ret)
+ goto err_unlock;
+
+ mutex_unlock(&cru->lock);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&cru->lock);
+
+ return ret;
+}
+
+static int rzg2l_cru_release(struct file *file)
+{
+ struct rzg2l_cru_dev *cru = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&cru->lock);
+
+ /* the release helper will cleanup any on-going streaming. */
+ ret = _vb2_fop_release(file, NULL);
+
+ mutex_unlock(&cru->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations rzg2l_cru_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = rzg2l_cru_open,
+ .release = rzg2l_cru_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+static void rzg2l_cru_v4l2_init(struct rzg2l_cru_dev *cru)
+{
+ struct video_device *vdev = &cru->vdev;
+
+ vdev->v4l2_dev = &cru->v4l2_dev;
+ vdev->queue = &cru->queue;
+ snprintf(vdev->name, sizeof(vdev->name), "CRU output");
+ vdev->release = video_device_release_empty;
+ vdev->lock = &cru->lock;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ vdev->device_caps |= V4L2_CAP_IO_MC;
+ vdev->fops = &rzg2l_cru_fops;
+ vdev->ioctl_ops = &rzg2l_cru_ioctl_ops;
+
+ /* Set a default format */
+ cru->format.pixelformat = RZG2L_CRU_DEFAULT_FORMAT;
+ cru->format.width = RZG2L_CRU_DEFAULT_WIDTH;
+ cru->format.height = RZG2L_CRU_DEFAULT_HEIGHT;
+ cru->format.field = RZG2L_CRU_DEFAULT_FIELD;
+ cru->format.colorspace = RZG2L_CRU_DEFAULT_COLORSPACE;
+ rzg2l_cru_format_align(cru, &cru->format);
+}
+
+void rzg2l_cru_video_unregister(struct rzg2l_cru_dev *cru)
+{
+ media_device_unregister(&cru->mdev);
+ video_unregister_device(&cru->vdev);
+}
+
+int rzg2l_cru_video_register(struct rzg2l_cru_dev *cru)
+{
+ struct video_device *vdev = &cru->vdev;
+ int ret;
+
+ if (video_is_registered(&cru->vdev)) {
+ struct media_entity *entity;
+
+ entity = &cru->vdev.entity;
+ if (!entity->graph_obj.mdev)
+ entity->graph_obj.mdev = &cru->mdev;
+ return 0;
+ }
+
+ rzg2l_cru_v4l2_init(cru);
+ video_set_drvdata(vdev, cru);
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(cru->dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ ret = media_device_register(&cru->mdev);
+ if (ret) {
+ video_unregister_device(&cru->vdev);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/renesas/sh_vou.c b/drivers/media/platform/renesas/sh_vou.c
new file mode 100644
index 0000000000..f792aedc9d
--- /dev/null
+++ b/drivers/media/platform/renesas/sh_vou.c
@@ -0,0 +1,1374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SuperH Video Output Unit (VOU) driver
+ *
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/module.h>
+
+#include <media/drv-intf/sh_vou.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+/* Mirror addresses are not available for all registers */
+#define VOUER 0
+#define VOUCR 4
+#define VOUSTR 8
+#define VOUVCR 0xc
+#define VOUISR 0x10
+#define VOUBCR 0x14
+#define VOUDPR 0x18
+#define VOUDSR 0x1c
+#define VOUVPR 0x20
+#define VOUIR 0x24
+#define VOUSRR 0x28
+#define VOUMSR 0x2c
+#define VOUHIR 0x30
+#define VOUDFR 0x34
+#define VOUAD1R 0x38
+#define VOUAD2R 0x3c
+#define VOUAIR 0x40
+#define VOUSWR 0x44
+#define VOURCR 0x48
+#define VOURPR 0x50
+
+enum sh_vou_status {
+ SH_VOU_IDLE,
+ SH_VOU_INITIALISING,
+ SH_VOU_RUNNING,
+};
+
+#define VOU_MIN_IMAGE_WIDTH 16
+#define VOU_MAX_IMAGE_WIDTH 720
+#define VOU_MIN_IMAGE_HEIGHT 16
+
+struct sh_vou_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+static inline struct
+sh_vou_buffer *to_sh_vou_buffer(struct vb2_v4l2_buffer *vb2)
+{
+ return container_of(vb2, struct sh_vou_buffer, vb);
+}
+
+struct sh_vou_device {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct sh_vou_pdata *pdata;
+ spinlock_t lock;
+ void __iomem *base;
+ /* State information */
+ struct v4l2_pix_format pix;
+ struct v4l2_rect rect;
+ struct list_head buf_list;
+ v4l2_std_id std;
+ int pix_idx;
+ struct vb2_queue queue;
+ struct sh_vou_buffer *active;
+ enum sh_vou_status status;
+ unsigned sequence;
+ struct mutex fop_lock;
+};
+
+/* Register access routines for sides A, B and mirror addresses */
+static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_ab_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg);
+ __raw_writel(value, vou_dev->base + reg + 0x1000);
+}
+
+static void sh_vou_reg_m_write(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value)
+{
+ __raw_writel(value, vou_dev->base + reg + 0x2000);
+}
+
+static u32 sh_vou_reg_a_read(struct sh_vou_device *vou_dev, unsigned int reg)
+{
+ return __raw_readl(vou_dev->base + reg);
+}
+
+static void sh_vou_reg_a_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ u32 old = __raw_readl(vou_dev->base + reg);
+
+ value = (value & mask) | (old & ~mask);
+ __raw_writel(value, vou_dev->base + reg);
+}
+
+static void sh_vou_reg_b_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg + 0x1000, value, mask);
+}
+
+static void sh_vou_reg_ab_set(struct sh_vou_device *vou_dev, unsigned int reg,
+ u32 value, u32 mask)
+{
+ sh_vou_reg_a_set(vou_dev, reg, value, mask);
+ sh_vou_reg_b_set(vou_dev, reg, value, mask);
+}
+
+struct sh_vou_fmt {
+ u32 pfmt;
+ unsigned char bpp;
+ unsigned char bpl;
+ unsigned char rgb;
+ unsigned char yf;
+ unsigned char pkf;
+};
+
+/* Further pixel formats can be added */
+static struct sh_vou_fmt vou_fmt[] = {
+ {
+ .pfmt = V4L2_PIX_FMT_NV12,
+ .bpp = 12,
+ .bpl = 1,
+ .yf = 0,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_NV16,
+ .bpp = 16,
+ .bpl = 1,
+ .yf = 1,
+ .rgb = 0,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB24,
+ .bpp = 24,
+ .bpl = 3,
+ .pkf = 2,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565,
+ .bpp = 16,
+ .bpl = 2,
+ .pkf = 3,
+ .rgb = 1,
+ },
+ {
+ .pfmt = V4L2_PIX_FMT_RGB565X,
+ .bpp = 16,
+ .bpl = 2,
+ .pkf = 3,
+ .rgb = 1,
+ },
+};
+
+static void sh_vou_schedule_next(struct sh_vou_device *vou_dev,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ dma_addr_t addr1, addr2;
+
+ addr1 = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ switch (vou_dev->pix.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ addr2 = addr1 + vou_dev->pix.width * vou_dev->pix.height;
+ break;
+ default:
+ addr2 = 0;
+ }
+
+ sh_vou_reg_m_write(vou_dev, VOUAD1R, addr1);
+ sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2);
+}
+
+static void sh_vou_stream_config(struct sh_vou_device *vou_dev)
+{
+ unsigned int row_coeff;
+#ifdef __LITTLE_ENDIAN
+ u32 dataswap = 7;
+#else
+ u32 dataswap = 0;
+#endif
+
+ switch (vou_dev->pix.pixelformat) {
+ default:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ row_coeff = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ dataswap ^= 1;
+ fallthrough;
+ case V4L2_PIX_FMT_RGB565X:
+ row_coeff = 2;
+ break;
+ case V4L2_PIX_FMT_RGB24:
+ row_coeff = 3;
+ break;
+ }
+
+ sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap);
+ sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff);
+}
+
+/* Locking: caller holds fop_lock mutex */
+static int sh_vou_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ if (*nplanes)
+ return sizes[0] < pix->height * bytes_per_line ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = pix->height * bytes_per_line;
+ return 0;
+}
+
+static int sh_vou_buf_prepare(struct vb2_buffer *vb)
+{
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ unsigned bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8;
+ unsigned size = pix->height * bytes_per_line;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ if (vb2_plane_size(vb, 0) < size) {
+ /* User buffer too small */
+ dev_warn(vou_dev->v4l2_dev.dev, "buffer too small (%lu < %u)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ return 0;
+}
+
+/* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */
+static void sh_vou_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vou_dev->lock, flags);
+ list_add_tail(&shbuf->list, &vou_dev->buf_list);
+ spin_unlock_irqrestore(&vou_dev->lock, flags);
+}
+
+static int sh_vou_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+ struct sh_vou_buffer *buf, *node;
+ int ret;
+
+ vou_dev->sequence = 0;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+ video, s_stream, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ list_del(&buf->list);
+ }
+ vou_dev->active = NULL;
+ return ret;
+ }
+
+ buf = list_entry(vou_dev->buf_list.next, struct sh_vou_buffer, list);
+
+ vou_dev->active = buf;
+
+ /* Start from side A: we use mirror addresses, so, set B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 1);
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s: first buffer status 0x%x\n",
+ __func__, sh_vou_reg_a_read(vou_dev, VOUSTR));
+ sh_vou_schedule_next(vou_dev, &buf->vb);
+
+ buf = list_entry(buf->list.next, struct sh_vou_buffer, list);
+
+ /* Second buffer - initialise register side B */
+ sh_vou_reg_a_write(vou_dev, VOURPR, 0);
+ sh_vou_schedule_next(vou_dev, &buf->vb);
+
+ /* Register side switching with frame VSYNC */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 5);
+
+ sh_vou_stream_config(vou_dev);
+ /* Enable End-of-Frame (VSYNC) interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004);
+
+ /* Two buffers on the queue - activate the hardware */
+ vou_dev->status = SH_VOU_RUNNING;
+ sh_vou_reg_a_write(vou_dev, VOUER, 0x107);
+ return 0;
+}
+
+static void sh_vou_stop_streaming(struct vb2_queue *vq)
+{
+ struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
+ struct sh_vou_buffer *buf, *node;
+ unsigned long flags;
+
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0,
+ video, s_stream, 0);
+ /* disable output */
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1);
+ /* ...but the current frame will complete */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000);
+ msleep(50);
+ spin_lock_irqsave(&vou_dev->lock, flags);
+ list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ list_del(&buf->list);
+ }
+ vou_dev->active = NULL;
+ spin_unlock_irqrestore(&vou_dev->lock, flags);
+}
+
+static const struct vb2_ops sh_vou_qops = {
+ .queue_setup = sh_vou_queue_setup,
+ .buf_prepare = sh_vou_buf_prepare,
+ .buf_queue = sh_vou_buf_queue,
+ .start_streaming = sh_vou_start_streaming,
+ .stop_streaming = sh_vou_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/* Video IOCTLs */
+static int sh_vou_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ strscpy(cap->card, "SuperH VOU", sizeof(cap->card));
+ strscpy(cap->driver, "sh-vou", sizeof(cap->driver));
+ strscpy(cap->bus_info, "platform:sh-vou", sizeof(cap->bus_info));
+ return 0;
+}
+
+/* Enumerate formats, that the device can accept from the user */
+static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ if (fmt->index >= ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ fmt->pixelformat = vou_fmt[fmt->index].pfmt;
+
+ return 0;
+}
+
+static int sh_vou_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ fmt->fmt.pix = vou_dev->pix;
+
+ return 0;
+}
+
+static const unsigned char vou_scale_h_num[] = {1, 9, 2, 9, 4};
+static const unsigned char vou_scale_h_den[] = {1, 8, 1, 4, 1};
+static const unsigned char vou_scale_h_fld[] = {0, 2, 1, 3};
+static const unsigned char vou_scale_v_num[] = {1, 2, 4};
+static const unsigned char vou_scale_v_den[] = {1, 1, 1};
+static const unsigned char vou_scale_v_fld[] = {0, 1};
+
+static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev,
+ int pix_idx, int w_idx, int h_idx)
+{
+ struct sh_vou_fmt *fmt = vou_fmt + pix_idx;
+ unsigned int black_left, black_top, width_max,
+ frame_in_height, frame_out_height, frame_out_top;
+ struct v4l2_rect *rect = &vou_dev->rect;
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ u32 vouvcr = 0, dsr_h, dsr_v;
+
+ if (vou_dev->std & V4L2_STD_525_60) {
+ width_max = 858;
+ /* height_max = 262; */
+ } else {
+ width_max = 864;
+ /* height_max = 312; */
+ }
+
+ frame_in_height = pix->height / 2;
+ frame_out_height = rect->height / 2;
+ frame_out_top = rect->top / 2;
+
+ /*
+ * Cropping scheme: max useful image is 720x480, and the total video
+ * area is 858x525 (NTSC) or 864x625 (PAL). AK8813 / 8814 starts
+ * sampling data beginning with fixed 276th (NTSC) / 288th (PAL) clock,
+ * of which the first 33 / 25 clocks HSYNC must be held active. This
+ * has to be configured in CR[HW]. 1 pixel equals 2 clock periods.
+ * This gives CR[HW] = 16 / 12, VPR[HVP] = 138 / 144, which gives
+ * exactly 858 - 138 = 864 - 144 = 720! We call the out-of-display area,
+ * beyond DSR, specified on the left and top by the VPR register "black
+ * pixels" and out-of-image area (DPR) "background pixels." We fix VPR
+ * at 138 / 144 : 20, because that's the HSYNC timing, that our first
+ * client requires, and that's exactly what leaves us 720 pixels for the
+ * image; we leave VPR[VVP] at default 20 for now, because the client
+ * doesn't seem to have any special requirements for it. Otherwise we
+ * could also set it to max - 240 = 22 / 72. Thus VPR depends only on
+ * the selected standard, and DPR and DSR are selected according to
+ * cropping. Q: how does the client detect the first valid line? Does
+ * HSYNC stay inactive during invalid (black) lines?
+ */
+ black_left = width_max - VOU_MAX_IMAGE_WIDTH;
+ black_top = 20;
+
+ dsr_h = rect->width + rect->left;
+ dsr_v = frame_out_height + frame_out_top;
+
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "image %ux%u, black %u:%u, offset %u:%u, display %ux%u\n",
+ pix->width, frame_in_height, black_left, black_top,
+ rect->left, frame_out_top, dsr_h, dsr_v);
+
+ /* VOUISR height - half of a frame height in frame mode */
+ sh_vou_reg_ab_write(vou_dev, VOUISR, (pix->width << 16) | frame_in_height);
+ sh_vou_reg_ab_write(vou_dev, VOUVPR, (black_left << 16) | black_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDPR, (rect->left << 16) | frame_out_top);
+ sh_vou_reg_ab_write(vou_dev, VOUDSR, (dsr_h << 16) | dsr_v);
+
+ /*
+ * if necessary, we could set VOUHIR to
+ * max(black_left + dsr_h, width_max) here
+ */
+
+ if (w_idx)
+ vouvcr |= (1 << 15) | (vou_scale_h_fld[w_idx - 1] << 4);
+ if (h_idx)
+ vouvcr |= (1 << 14) | vou_scale_v_fld[h_idx - 1];
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "0x%08x: scaling 0x%x\n",
+ fmt->pfmt, vouvcr);
+
+ /* To produce a colour bar for testing set bit 23 of VOUVCR */
+ sh_vou_reg_ab_write(vou_dev, VOUVCR, vouvcr);
+ sh_vou_reg_ab_write(vou_dev, VOUDFR,
+ fmt->pkf | (fmt->yf << 8) | (fmt->rgb << 16));
+}
+
+struct sh_vou_geometry {
+ struct v4l2_rect output;
+ unsigned int in_width;
+ unsigned int in_height;
+ int scale_idx_h;
+ int scale_idx_v;
+};
+
+/*
+ * Find input geometry, that we can use to produce output, closest to the
+ * requested rectangle, using VOU scaling
+ */
+static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ /* The compiler cannot know, that best and idx will indeed be set */
+ unsigned int best_err = UINT_MAX, best = 0, img_height_max;
+ int i, idx = 0;
+
+ if (std & V4L2_STD_525_60)
+ img_height_max = 480;
+ else
+ img_height_max = 576;
+
+ /* Image width must be a multiple of 4 */
+ v4l_bound_align_image(&geo->in_width,
+ VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2,
+ &geo->in_height,
+ VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.width * vou_scale_h_den[i] /
+ vou_scale_h_num[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_width);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_width = best;
+ geo->scale_idx_h = idx;
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = ARRAY_SIZE(vou_scale_v_num) - 1; i >= 0; i--) {
+ unsigned int err;
+ unsigned int found = geo->output.height * vou_scale_v_den[i] /
+ vou_scale_v_num[i];
+
+ if (found > img_height_max)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->in_height);
+ if (err < best_err) {
+ best_err = err;
+ idx = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->in_height = best;
+ geo->scale_idx_v = idx;
+}
+
+/*
+ * Find output geometry, that we can produce, using VOU scaling, closest to
+ * the requested rectangle
+ */
+static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std)
+{
+ unsigned int best_err = UINT_MAX, best = geo->in_width,
+ width_max, height_max, img_height_max;
+ int i, idx_h = 0, idx_v = 0;
+
+ if (std & V4L2_STD_525_60) {
+ width_max = 858;
+ height_max = 262 * 2;
+ img_height_max = 480;
+ } else {
+ width_max = 864;
+ height_max = 312 * 2;
+ img_height_max = 576;
+ }
+
+ /* Select scales to come as close as possible to the output image */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_h_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_width * vou_scale_h_num[i] /
+ vou_scale_h_den[i];
+
+ if (found > VOU_MAX_IMAGE_WIDTH)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.width);
+ if (err < best_err) {
+ best_err = err;
+ idx_h = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.width = best;
+ geo->scale_idx_h = idx_h;
+ if (geo->output.left + best > width_max)
+ geo->output.left = width_max - best;
+
+ pr_debug("%s(): W %u * %u/%u = %u\n", __func__, geo->in_width,
+ vou_scale_h_num[idx_h], vou_scale_h_den[idx_h], best);
+
+ best_err = UINT_MAX;
+
+ /* This loop can be replaced with one division */
+ for (i = 0; i < ARRAY_SIZE(vou_scale_v_num); i++) {
+ unsigned int err;
+ unsigned int found = geo->in_height * vou_scale_v_num[i] /
+ vou_scale_v_den[i];
+
+ if (found > img_height_max)
+ /* scales increase */
+ break;
+
+ err = abs(found - geo->output.height);
+ if (err < best_err) {
+ best_err = err;
+ idx_v = i;
+ best = found;
+ }
+ if (!err)
+ break;
+ }
+
+ geo->output.height = best;
+ geo->scale_idx_v = idx_v;
+ if (geo->output.top + best > height_max)
+ geo->output.top = height_max - best;
+
+ pr_debug("%s(): H %u * %u/%u = %u\n", __func__, geo->in_height,
+ vou_scale_v_num[idx_v], vou_scale_v_den[idx_v], best);
+}
+
+static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ unsigned int img_height_max;
+ int pix_idx;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ pix->field = V4L2_FIELD_INTERLACED;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ pix->ycbcr_enc = pix->quantization = 0;
+
+ for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+ if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+ break;
+
+ if (pix_idx == ARRAY_SIZE(vou_fmt))
+ return -EINVAL;
+
+ if (vou_dev->std & V4L2_STD_525_60)
+ img_height_max = 480;
+ else
+ img_height_max = 576;
+
+ v4l_bound_align_image(&pix->width,
+ VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2,
+ &pix->height,
+ VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
+ pix->bytesperline = pix->width * vou_fmt[pix_idx].bpl;
+ pix->sizeimage = pix->height * ((pix->width * vou_fmt[pix_idx].bpp) >> 3);
+
+ return 0;
+}
+
+static int sh_vou_set_fmt_vid_out(struct sh_vou_device *vou_dev,
+ struct v4l2_pix_format *pix)
+{
+ unsigned int img_height_max;
+ struct sh_vou_geometry geo;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ /* Revisit: is this the correct code? */
+ .format.code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .format.field = V4L2_FIELD_INTERLACED,
+ .format.colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ struct v4l2_mbus_framefmt *mbfmt = &format.format;
+ int pix_idx;
+ int ret;
+
+ if (vb2_is_busy(&vou_dev->queue))
+ return -EBUSY;
+
+ for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++)
+ if (vou_fmt[pix_idx].pfmt == pix->pixelformat)
+ break;
+
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+ geo.output = vou_dev->rect;
+
+ vou_adjust_output(&geo, vou_dev->std);
+
+ mbfmt->width = geo.output.width;
+ mbfmt->height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
+ set_fmt, NULL, &format);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__,
+ geo.output.width, geo.output.height, mbfmt->width, mbfmt->height);
+
+ if (vou_dev->std & V4L2_STD_525_60)
+ img_height_max = 480;
+ else
+ img_height_max = 576;
+
+ /* Sanity checks */
+ if ((unsigned)mbfmt->width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)mbfmt->height > img_height_max ||
+ mbfmt->code != MEDIA_BUS_FMT_YUYV8_2X8)
+ return -EIO;
+
+ if (mbfmt->width != geo.output.width ||
+ mbfmt->height != geo.output.height) {
+ geo.output.width = mbfmt->width;
+ geo.output.height = mbfmt->height;
+
+ vou_adjust_input(&geo, vou_dev->std);
+ }
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u\n", __func__,
+ pix->width, pix->height);
+
+ vou_dev->pix_idx = pix_idx;
+
+ vou_dev->pix = *pix;
+
+ sh_vou_configure_geometry(vou_dev, pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+static int sh_vou_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ int ret = sh_vou_try_fmt_vid_out(file, priv, fmt);
+
+ if (ret)
+ return ret;
+ return sh_vou_set_fmt_vid_out(vou_dev, &fmt->fmt.pix);
+}
+
+static int sh_vou_enum_output(struct file *file, void *fh,
+ struct v4l2_output *a)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ if (a->index)
+ return -EINVAL;
+ strscpy(a->name, "Video Out", sizeof(a->name));
+ a->type = V4L2_OUTPUT_TYPE_ANALOG;
+ a->std = vou_dev->vdev.tvnorms;
+ return 0;
+}
+
+static int sh_vou_g_output(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int sh_vou_s_output(struct file *file, void *fh, unsigned int i)
+{
+ return i ? -EINVAL : 0;
+}
+
+static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
+{
+ switch (bus_fmt) {
+ default:
+ pr_warn("%s(): Invalid bus-format code %d, using default 8-bit\n",
+ __func__, bus_fmt);
+ fallthrough;
+ case SH_VOU_BUS_8BIT:
+ return 1;
+ case SH_VOU_BUS_16BIT:
+ return 0;
+ case SH_VOU_BUS_BT656:
+ return 3;
+ }
+}
+
+static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ int ret;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, std_id);
+
+ if (std_id == vou_dev->std)
+ return 0;
+
+ if (vb2_is_busy(&vou_dev->queue))
+ return -EBUSY;
+
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video,
+ s_std_output, std_id);
+ /* Shall we continue, if the subdev doesn't support .s_std_output()? */
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ vou_dev->rect.top = vou_dev->rect.left = 0;
+ vou_dev->rect.width = VOU_MAX_IMAGE_WIDTH;
+ if (std_id & V4L2_STD_525_60) {
+ sh_vou_reg_ab_set(vou_dev, VOUCR,
+ sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29);
+ vou_dev->rect.height = 480;
+ } else {
+ sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29);
+ vou_dev->rect.height = 576;
+ }
+
+ vou_dev->pix.width = vou_dev->rect.width;
+ vou_dev->pix.height = vou_dev->rect.height;
+ vou_dev->pix.bytesperline =
+ vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpl;
+ vou_dev->pix.sizeimage = vou_dev->pix.height *
+ ((vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpp) >> 3);
+ vou_dev->std = std_id;
+ sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix);
+
+ return 0;
+}
+
+static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
+
+ *std = vou_dev->std;
+
+ return 0;
+}
+
+static int sh_vou_log_status(struct file *file, void *priv)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ pr_info("VOUER: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUER));
+ pr_info("VOUCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUCR));
+ pr_info("VOUSTR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSTR));
+ pr_info("VOUVCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVCR));
+ pr_info("VOUISR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUISR));
+ pr_info("VOUBCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUBCR));
+ pr_info("VOUDPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDPR));
+ pr_info("VOUDSR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDSR));
+ pr_info("VOUVPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVPR));
+ pr_info("VOUIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUIR));
+ pr_info("VOUSRR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSRR));
+ pr_info("VOUMSR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUMSR));
+ pr_info("VOUHIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUHIR));
+ pr_info("VOUDFR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDFR));
+ pr_info("VOUAD1R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD1R));
+ pr_info("VOUAD2R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD2R));
+ pr_info("VOUAIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAIR));
+ pr_info("VOUSWR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSWR));
+ pr_info("VOURCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURCR));
+ pr_info("VOURPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURPR));
+ return 0;
+}
+
+static int sh_vou_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = vou_dev->rect;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = VOU_MAX_IMAGE_WIDTH;
+ if (vou_dev->std & V4L2_STD_525_60)
+ sel->r.height = 480;
+ else
+ sel->r.height = 576;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Assume a dull encoder, do all the work ourselves. */
+static int sh_vou_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct v4l2_rect *rect = &sel->r;
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ struct v4l2_subdev_selection sd_sel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_COMPOSE,
+ };
+ struct v4l2_pix_format *pix = &vou_dev->pix;
+ struct sh_vou_geometry geo;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ /* Revisit: is this the correct code? */
+ .format.code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .format.field = V4L2_FIELD_INTERLACED,
+ .format.colorspace = V4L2_COLORSPACE_SMPTE170M,
+ };
+ unsigned int img_height_max;
+ int ret;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ if (vb2_is_busy(&vou_dev->queue))
+ return -EBUSY;
+
+ if (vou_dev->std & V4L2_STD_525_60)
+ img_height_max = 480;
+ else
+ img_height_max = 576;
+
+ v4l_bound_align_image(&rect->width,
+ VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 1,
+ &rect->height,
+ VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0);
+
+ if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH)
+ rect->left = VOU_MAX_IMAGE_WIDTH - rect->width;
+
+ if (rect->height + rect->top > img_height_max)
+ rect->top = img_height_max - rect->height;
+
+ geo.output = *rect;
+ geo.in_width = pix->width;
+ geo.in_height = pix->height;
+
+ /* Configure the encoder one-to-one, position at 0, ignore errors */
+ sd_sel.r.width = geo.output.width;
+ sd_sel.r.height = geo.output.height;
+ /*
+ * We first issue a S_SELECTION, so that the subsequent S_FMT delivers the
+ * final encoder configuration.
+ */
+ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
+ set_selection, NULL, &sd_sel);
+ format.format.width = geo.output.width;
+ format.format.height = geo.output.height;
+ ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad,
+ set_fmt, NULL, &format);
+ /* Must be implemented, so, don't check for -ENOIOCTLCMD */
+ if (ret < 0)
+ return ret;
+
+ /* Sanity checks */
+ if ((unsigned)format.format.width > VOU_MAX_IMAGE_WIDTH ||
+ (unsigned)format.format.height > img_height_max ||
+ format.format.code != MEDIA_BUS_FMT_YUYV8_2X8)
+ return -EIO;
+
+ geo.output.width = format.format.width;
+ geo.output.height = format.format.height;
+
+ /*
+ * No down-scaling. According to the API, current call has precedence:
+ * https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/crop.html#cropping-structures
+ */
+ vou_adjust_input(&geo, vou_dev->std);
+
+ /* We tried to preserve output rectangle, but it could have changed */
+ vou_dev->rect = geo.output;
+ pix->width = geo.in_width;
+ pix->height = geo.in_height;
+
+ sh_vou_configure_geometry(vou_dev, vou_dev->pix_idx,
+ geo.scale_idx_h, geo.scale_idx_v);
+
+ return 0;
+}
+
+static irqreturn_t sh_vou_isr(int irq, void *dev_id)
+{
+ struct sh_vou_device *vou_dev = dev_id;
+ static unsigned long j;
+ struct sh_vou_buffer *vb;
+ static int cnt;
+ u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked;
+ u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR);
+
+ if (!(irq_status & 0x300)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev, "IRQ status 0x%x!\n",
+ irq_status);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&vou_dev->lock);
+ if (!vou_dev->active || list_empty(&vou_dev->buf_list)) {
+ if (printk_timed_ratelimit(&j, 500))
+ dev_warn(vou_dev->v4l2_dev.dev,
+ "IRQ without active buffer: %x!\n", irq_status);
+ /* Just ack: buf_release will disable further interrupts */
+ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x300);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ masked = ~(0x300 & irq_status) & irq_status & 0x30304;
+ dev_dbg(vou_dev->v4l2_dev.dev,
+ "IRQ status 0x%x -> 0x%x, VOU status 0x%x, cnt %d\n",
+ irq_status, masked, vou_status, cnt);
+
+ cnt++;
+ /* side = vou_status & 0x10000; */
+
+ /* Clear only set interrupts */
+ sh_vou_reg_a_write(vou_dev, VOUIR, masked);
+
+ vb = vou_dev->active;
+ if (list_is_singular(&vb->list)) {
+ /* Keep cycling while no next buffer is available */
+ sh_vou_schedule_next(vou_dev, &vb->vb);
+ spin_unlock(&vou_dev->lock);
+ return IRQ_HANDLED;
+ }
+
+ list_del(&vb->list);
+
+ vb->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb->vb.sequence = vou_dev->sequence++;
+ vb->vb.field = V4L2_FIELD_INTERLACED;
+ vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE);
+
+ vou_dev->active = list_entry(vou_dev->buf_list.next,
+ struct sh_vou_buffer, list);
+
+ if (list_is_singular(&vou_dev->buf_list)) {
+ /* Keep cycling while no next buffer is available */
+ sh_vou_schedule_next(vou_dev, &vou_dev->active->vb);
+ } else {
+ struct sh_vou_buffer *new = list_entry(vou_dev->active->list.next,
+ struct sh_vou_buffer, list);
+ sh_vou_schedule_next(vou_dev, &new->vb);
+ }
+
+ spin_unlock(&vou_dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sh_vou_hw_init(struct sh_vou_device *vou_dev)
+{
+ struct sh_vou_pdata *pdata = vou_dev->pdata;
+ u32 voucr = sh_vou_ntsc_mode(pdata->bus_fmt) << 29;
+ int i = 100;
+
+ /* Disable all IRQs */
+ sh_vou_reg_a_write(vou_dev, VOUIR, 0);
+
+ /* Reset VOU interfaces - registers unaffected */
+ sh_vou_reg_a_write(vou_dev, VOUSRR, 0x101);
+ while (--i && (sh_vou_reg_a_read(vou_dev, VOUSRR) & 0x101))
+ udelay(1);
+
+ if (!i)
+ return -ETIMEDOUT;
+
+ dev_dbg(vou_dev->v4l2_dev.dev, "Reset took %dus\n", 100 - i);
+
+ if (pdata->flags & SH_VOU_PCLK_FALLING)
+ voucr |= 1 << 28;
+ if (pdata->flags & SH_VOU_HSYNC_LOW)
+ voucr |= 1 << 27;
+ if (pdata->flags & SH_VOU_VSYNC_LOW)
+ voucr |= 1 << 26;
+ sh_vou_reg_ab_set(vou_dev, VOUCR, voucr, 0xfc000000);
+
+ /* Manual register side switching at first */
+ sh_vou_reg_a_write(vou_dev, VOURCR, 4);
+ /* Default - fixed HSYNC length, can be made configurable is required */
+ sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000);
+
+ sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix);
+
+ return 0;
+}
+
+/* File operations */
+static int sh_vou_open(struct file *file)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ int err;
+
+ if (mutex_lock_interruptible(&vou_dev->fop_lock))
+ return -ERESTARTSYS;
+
+ err = v4l2_fh_open(file);
+ if (err)
+ goto done_open;
+ if (v4l2_fh_is_singular_file(file) &&
+ vou_dev->status == SH_VOU_INITIALISING) {
+ /* First open */
+ err = pm_runtime_resume_and_get(vou_dev->v4l2_dev.dev);
+ if (err < 0) {
+ v4l2_fh_release(file);
+ goto done_open;
+ }
+ err = sh_vou_hw_init(vou_dev);
+ if (err < 0) {
+ pm_runtime_put(vou_dev->v4l2_dev.dev);
+ v4l2_fh_release(file);
+ } else {
+ vou_dev->status = SH_VOU_IDLE;
+ }
+ }
+done_open:
+ mutex_unlock(&vou_dev->fop_lock);
+ return err;
+}
+
+static int sh_vou_release(struct file *file)
+{
+ struct sh_vou_device *vou_dev = video_drvdata(file);
+ bool is_last;
+
+ mutex_lock(&vou_dev->fop_lock);
+ is_last = v4l2_fh_is_singular_file(file);
+ _vb2_fop_release(file, NULL);
+ if (is_last) {
+ /* Last close */
+ vou_dev->status = SH_VOU_INITIALISING;
+ sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101);
+ pm_runtime_put(vou_dev->v4l2_dev.dev);
+ }
+ mutex_unlock(&vou_dev->fop_lock);
+ return 0;
+}
+
+/* sh_vou display ioctl operations */
+static const struct v4l2_ioctl_ops sh_vou_ioctl_ops = {
+ .vidioc_querycap = sh_vou_querycap,
+ .vidioc_enum_fmt_vid_out = sh_vou_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = sh_vou_g_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = sh_vou_s_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = sh_vou_try_fmt_vid_out,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_g_output = sh_vou_g_output,
+ .vidioc_s_output = sh_vou_s_output,
+ .vidioc_enum_output = sh_vou_enum_output,
+ .vidioc_s_std = sh_vou_s_std,
+ .vidioc_g_std = sh_vou_g_std,
+ .vidioc_g_selection = sh_vou_g_selection,
+ .vidioc_s_selection = sh_vou_s_selection,
+ .vidioc_log_status = sh_vou_log_status,
+};
+
+static const struct v4l2_file_operations sh_vou_fops = {
+ .owner = THIS_MODULE,
+ .open = sh_vou_open,
+ .release = sh_vou_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
+ .write = vb2_fop_write,
+};
+
+static const struct video_device sh_vou_video_template = {
+ .name = "sh_vou",
+ .fops = &sh_vou_fops,
+ .ioctl_ops = &sh_vou_ioctl_ops,
+ .tvnorms = V4L2_STD_525_60, /* PAL only supported in 8-bit non-bt656 mode */
+ .vfl_dir = VFL_DIR_TX,
+ .device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING,
+};
+
+static int sh_vou_probe(struct platform_device *pdev)
+{
+ struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data;
+ struct v4l2_rect *rect;
+ struct v4l2_pix_format *pix;
+ struct i2c_adapter *i2c_adap;
+ struct video_device *vdev;
+ struct sh_vou_device *vou_dev;
+ struct v4l2_subdev *subdev;
+ struct vb2_queue *q;
+ int irq, ret;
+
+ if (!vou_pdata) {
+ dev_err(&pdev->dev, "Insufficient VOU platform information.\n");
+ return -ENODEV;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ vou_dev = devm_kzalloc(&pdev->dev, sizeof(*vou_dev), GFP_KERNEL);
+ if (!vou_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vou_dev->buf_list);
+ spin_lock_init(&vou_dev->lock);
+ mutex_init(&vou_dev->fop_lock);
+ vou_dev->pdata = vou_pdata;
+ vou_dev->status = SH_VOU_INITIALISING;
+ vou_dev->pix_idx = 1;
+
+ rect = &vou_dev->rect;
+ pix = &vou_dev->pix;
+
+ /* Fill in defaults */
+ vou_dev->std = V4L2_STD_NTSC_M;
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = VOU_MAX_IMAGE_WIDTH;
+ rect->height = 480;
+ pix->width = VOU_MAX_IMAGE_WIDTH;
+ pix->height = 480;
+ pix->pixelformat = V4L2_PIX_FMT_NV16;
+ pix->field = V4L2_FIELD_INTERLACED;
+ pix->bytesperline = VOU_MAX_IMAGE_WIDTH;
+ pix->sizeimage = VOU_MAX_IMAGE_WIDTH * 2 * 480;
+ pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ vou_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vou_dev->base))
+ return PTR_ERR(vou_dev->base);
+
+ ret = devm_request_irq(&pdev->dev, irq, sh_vou_isr, 0, "vou", vou_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error registering v4l2 device\n");
+ return ret;
+ }
+
+ vdev = &vou_dev->vdev;
+ *vdev = sh_vou_video_template;
+ if (vou_pdata->bus_fmt == SH_VOU_BUS_8BIT)
+ vdev->tvnorms |= V4L2_STD_PAL;
+ vdev->v4l2_dev = &vou_dev->v4l2_dev;
+ vdev->release = video_device_release_empty;
+ vdev->lock = &vou_dev->fop_lock;
+
+ video_set_drvdata(vdev, vou_dev);
+
+ /* Initialize the vb2 queue */
+ q = &vou_dev->queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+ q->drv_priv = vou_dev;
+ q->buf_struct_size = sizeof(struct sh_vou_buffer);
+ q->ops = &sh_vou_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->lock = &vou_dev->fop_lock;
+ q->dev = &pdev->dev;
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto ei2cgadap;
+
+ vdev->queue = q;
+ INIT_LIST_HEAD(&vou_dev->buf_list);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
+ i2c_adap = i2c_get_adapter(vou_pdata->i2c_adap);
+ if (!i2c_adap) {
+ ret = -ENODEV;
+ goto ei2cgadap;
+ }
+
+ ret = sh_vou_hw_init(vou_dev);
+ if (ret < 0)
+ goto ereset;
+
+ subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap,
+ vou_pdata->board_info, NULL);
+ if (!subdev) {
+ ret = -ENOMEM;
+ goto ei2cnd;
+ }
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret < 0)
+ goto evregdev;
+
+ return 0;
+
+evregdev:
+ei2cnd:
+ereset:
+ i2c_put_adapter(i2c_adap);
+ei2cgadap:
+ pm_runtime_disable(&pdev->dev);
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+ return ret;
+}
+
+static void sh_vou_remove(struct platform_device *pdev)
+{
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct sh_vou_device *vou_dev = container_of(v4l2_dev,
+ struct sh_vou_device, v4l2_dev);
+ struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next,
+ struct v4l2_subdev, list);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ pm_runtime_disable(&pdev->dev);
+ video_unregister_device(&vou_dev->vdev);
+ i2c_put_adapter(client->adapter);
+ v4l2_device_unregister(&vou_dev->v4l2_dev);
+}
+
+static struct platform_driver sh_vou = {
+ .remove_new = sh_vou_remove,
+ .driver = {
+ .name = "sh-vou",
+ },
+};
+
+module_platform_driver_probe(sh_vou, sh_vou_probe);
+
+MODULE_DESCRIPTION("SuperH VOU driver");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1.0");
+MODULE_ALIAS("platform:sh-vou");
diff --git a/drivers/media/platform/renesas/vsp1/Makefile b/drivers/media/platform/renesas/vsp1/Makefile
new file mode 100644
index 0000000000..4bb4dcbef7
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_pipe.o
+vsp1-y += vsp1_dl.o vsp1_drm.o vsp1_video.o
+vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
+vsp1-y += vsp1_clu.o vsp1_hsit.o vsp1_lut.o
+vsp1-y += vsp1_brx.o vsp1_sru.o vsp1_uds.o
+vsp1-y += vsp1_hgo.o vsp1_hgt.o vsp1_histo.o
+vsp1-y += vsp1_lif.o vsp1_uif.o
+
+obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/renesas/vsp1/vsp1.h b/drivers/media/platform/renesas/vsp1/vsp1.h
new file mode 100644
index 0000000000..2f6f0c6ae5
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1.h -- R-Car VSP1 Driver
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_H__
+#define __VSP1_H__
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_regs.h"
+
+struct clk;
+struct device;
+struct rcar_fcp_device;
+struct reset_control;
+
+struct vsp1_drm;
+struct vsp1_entity;
+struct vsp1_platform_data;
+struct vsp1_brx;
+struct vsp1_clu;
+struct vsp1_hgo;
+struct vsp1_hgt;
+struct vsp1_hsit;
+struct vsp1_lif;
+struct vsp1_lut;
+struct vsp1_rwpf;
+struct vsp1_sru;
+struct vsp1_uds;
+struct vsp1_uif;
+
+#define VSP1_MAX_LIF 2
+#define VSP1_MAX_RPF 5
+#define VSP1_MAX_UDS 3
+#define VSP1_MAX_UIF 2
+#define VSP1_MAX_WPF 4
+
+#define VSP1_HAS_LUT BIT(1)
+#define VSP1_HAS_SRU BIT(2)
+#define VSP1_HAS_BRU BIT(3)
+#define VSP1_HAS_CLU BIT(4)
+#define VSP1_HAS_WPF_VFLIP BIT(5)
+#define VSP1_HAS_WPF_HFLIP BIT(6)
+#define VSP1_HAS_HGO BIT(7)
+#define VSP1_HAS_HGT BIT(8)
+#define VSP1_HAS_BRS BIT(9)
+#define VSP1_HAS_EXT_DL BIT(10)
+#define VSP1_HAS_NON_ZERO_LBA BIT(11)
+
+struct vsp1_device_info {
+ u32 version;
+ const char *model;
+ unsigned int gen;
+ unsigned int features;
+ unsigned int lif_count;
+ unsigned int rpf_count;
+ unsigned int uds_count;
+ unsigned int uif_count;
+ unsigned int wpf_count;
+ unsigned int num_bru_inputs;
+ u8 soc;
+ bool uapi;
+};
+
+#define vsp1_feature(vsp1, f) ((vsp1)->info->features & (f))
+
+struct vsp1_device {
+ struct device *dev;
+ const struct vsp1_device_info *info;
+ u32 version;
+
+ void __iomem *mmio;
+ struct rcar_fcp_device *fcp;
+ struct device *bus_master;
+ struct reset_control *rstc;
+
+ struct vsp1_brx *brs;
+ struct vsp1_brx *bru;
+ struct vsp1_clu *clu;
+ struct vsp1_hgo *hgo;
+ struct vsp1_hgt *hgt;
+ struct vsp1_hsit *hsi;
+ struct vsp1_hsit *hst;
+ struct vsp1_lif *lif[VSP1_MAX_LIF];
+ struct vsp1_lut *lut;
+ struct vsp1_rwpf *rpf[VSP1_MAX_RPF];
+ struct vsp1_sru *sru;
+ struct vsp1_uds *uds[VSP1_MAX_UDS];
+ struct vsp1_uif *uif[VSP1_MAX_UIF];
+ struct vsp1_rwpf *wpf[VSP1_MAX_WPF];
+
+ struct list_head entities;
+ struct list_head videos;
+
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct media_entity_operations media_ops;
+
+ struct vsp1_drm *drm;
+};
+
+int vsp1_device_get(struct vsp1_device *vsp1);
+void vsp1_device_put(struct vsp1_device *vsp1);
+
+int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index);
+
+static inline u32 vsp1_read(struct vsp1_device *vsp1, u32 reg)
+{
+ return ioread32(vsp1->mmio + reg);
+}
+
+static inline void vsp1_write(struct vsp1_device *vsp1, u32 reg, u32 data)
+{
+ iowrite32(data, vsp1->mmio + reg);
+}
+
+#endif /* __VSP1_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_brx.c b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
new file mode 100644
index 0000000000..89385b4cab
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_brx.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_brx.c -- R-Car VSP1 Blend ROP Unit (BRU and BRS)
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define BRX_MIN_SIZE 1U
+#define BRX_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_brx_write(struct vsp1_brx *brx,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, brx->base + reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static int brx_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_brx *brx =
+ container_of(ctrl->handler, struct vsp1_brx, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_BG_COLOR:
+ brx->bgcolor = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops brx_ctrl_ops = {
+ .s_ctrl = brx_s_ctrl,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/*
+ * The BRx can't perform format conversion, all sink and source formats must be
+ * identical. We pick the format on the first sink pad (pad 0) and propagate it
+ * to all other pads.
+ */
+
+static int brx_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ };
+
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes,
+ ARRAY_SIZE(codes));
+}
+
+static int brx_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index)
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fse->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ return -EINVAL;
+
+ fse->min_width = BRX_MIN_SIZE;
+ fse->max_width = BRX_MAX_SIZE;
+ fse->min_height = BRX_MIN_SIZE;
+ fse->max_height = BRX_MAX_SIZE;
+
+ return 0;
+}
+
+static struct v4l2_rect *brx_get_compose(struct vsp1_brx *brx,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad)
+{
+ return v4l2_subdev_get_try_compose(&brx->entity.subdev, sd_state, pad);
+}
+
+static void brx_try_format(struct vsp1_brx *brx,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ switch (pad) {
+ case BRX_PAD_SINK(0):
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+ break;
+
+ default:
+ /* The BRx can't perform format conversion. */
+ format = vsp1_entity_get_pad_format(&brx->entity, sd_state,
+ BRX_PAD_SINK(0));
+ fmt->code = format->code;
+ break;
+ }
+
+ fmt->width = clamp(fmt->width, BRX_MIN_SIZE, BRX_MAX_SIZE);
+ fmt->height = clamp(fmt->height, BRX_MIN_SIZE, BRX_MAX_SIZE);
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int brx_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_brx *brx = to_brx(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&brx->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
+ fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ brx_try_format(brx, config, fmt->pad, &fmt->format);
+
+ format = vsp1_entity_get_pad_format(&brx->entity, config, fmt->pad);
+ *format = fmt->format;
+
+ /* Reset the compose rectangle. */
+ if (fmt->pad != brx->entity.source_pad) {
+ struct v4l2_rect *compose;
+
+ compose = brx_get_compose(brx, config, fmt->pad);
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = format->width;
+ compose->height = format->height;
+ }
+
+ /* Propagate the format code to all pads. */
+ if (fmt->pad == BRX_PAD_SINK(0)) {
+ unsigned int i;
+
+ for (i = 0; i <= brx->entity.source_pad; ++i) {
+ format = vsp1_entity_get_pad_format(&brx->entity,
+ config, i);
+ format->code = fmt->format.code;
+ }
+ }
+
+done:
+ mutex_unlock(&brx->entity.lock);
+ return ret;
+}
+
+static int brx_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_brx *brx = to_brx(subdev);
+ struct v4l2_subdev_state *config;
+
+ if (sel->pad == brx->entity.source_pad)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = BRX_MAX_SIZE;
+ sel->r.height = BRX_MAX_SIZE;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
+ sel->which);
+ if (!config)
+ return -EINVAL;
+
+ mutex_lock(&brx->entity.lock);
+ sel->r = *brx_get_compose(brx, config, sel->pad);
+ mutex_unlock(&brx->entity.lock);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int brx_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_brx *brx = to_brx(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *compose;
+ int ret = 0;
+
+ if (sel->pad == brx->entity.source_pad)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+
+ mutex_lock(&brx->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&brx->entity, sd_state,
+ sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * The compose rectangle top left corner must be inside the output
+ * frame.
+ */
+ format = vsp1_entity_get_pad_format(&brx->entity, config,
+ brx->entity.source_pad);
+ sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
+ sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
+
+ /*
+ * Scaling isn't supported, the compose rectangle size must be identical
+ * to the sink format size.
+ */
+ format = vsp1_entity_get_pad_format(&brx->entity, config, sel->pad);
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+
+ compose = brx_get_compose(brx, config, sel->pad);
+ *compose = sel->r;
+
+done:
+ mutex_unlock(&brx->entity.lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops brx_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = brx_enum_mbus_code,
+ .enum_frame_size = brx_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = brx_set_format,
+ .get_selection = brx_get_selection,
+ .set_selection = brx_set_selection,
+};
+
+static const struct v4l2_subdev_ops brx_ops = {
+ .pad = &brx_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void brx_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_brx *brx = to_brx(&entity->subdev);
+ struct v4l2_mbus_framefmt *format;
+ unsigned int flags;
+ unsigned int i;
+
+ format = vsp1_entity_get_pad_format(&brx->entity, brx->entity.config,
+ brx->entity.source_pad);
+
+ /*
+ * The hardware is extremely flexible but we have no userspace API to
+ * expose all the parameters, nor is it clear whether we would have use
+ * cases for all the supported modes. Let's just hardcode the parameters
+ * to sane default values for now.
+ */
+
+ /*
+ * Disable dithering and enable color data normalization unless the
+ * format at the pipeline output is premultiplied.
+ */
+ flags = pipe->output ? pipe->output->format.flags : 0;
+ vsp1_brx_write(brx, dlb, VI6_BRU_INCTRL,
+ flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA ?
+ 0 : VI6_BRU_INCTRL_NRM);
+
+ /*
+ * Set the background position to cover the whole output image and
+ * configure its color.
+ */
+ vsp1_brx_write(brx, dlb, VI6_BRU_VIRRPF_SIZE,
+ (format->width << VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT) |
+ (format->height << VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT));
+ vsp1_brx_write(brx, dlb, VI6_BRU_VIRRPF_LOC, 0);
+
+ vsp1_brx_write(brx, dlb, VI6_BRU_VIRRPF_COL, brx->bgcolor |
+ (0xff << VI6_BRU_VIRRPF_COL_A_SHIFT));
+
+ /*
+ * Route BRU input 1 as SRC input to the ROP unit and configure the ROP
+ * unit with a NOP operation to make BRU input 1 available as the
+ * Blend/ROP unit B SRC input. Only needed for BRU, the BRS has no ROP
+ * unit.
+ */
+ if (entity->type == VSP1_ENTITY_BRU)
+ vsp1_brx_write(brx, dlb, VI6_BRU_ROP,
+ VI6_BRU_ROP_DSTSEL_BRUIN(1) |
+ VI6_BRU_ROP_CROP(VI6_ROP_NOP) |
+ VI6_BRU_ROP_AROP(VI6_ROP_NOP));
+
+ for (i = 0; i < brx->entity.source_pad; ++i) {
+ bool premultiplied = false;
+ u32 ctrl = 0;
+
+ /*
+ * Configure all Blend/ROP units corresponding to an enabled BRx
+ * input for alpha blending. Blend/ROP units corresponding to
+ * disabled BRx inputs are used in ROP NOP mode to ignore the
+ * SRC input.
+ */
+ if (brx->inputs[i].rpf) {
+ ctrl |= VI6_BRU_CTRL_RBC;
+
+ premultiplied = brx->inputs[i].rpf->format.flags
+ & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA;
+ } else {
+ ctrl |= VI6_BRU_CTRL_CROP(VI6_ROP_NOP)
+ | VI6_BRU_CTRL_AROP(VI6_ROP_NOP);
+ }
+
+ /*
+ * Select the virtual RPF as the Blend/ROP unit A DST input to
+ * serve as a background color.
+ */
+ if (i == 0)
+ ctrl |= VI6_BRU_CTRL_DSTSEL_VRPF;
+
+ /*
+ * Route inputs 0 to 3 as SRC inputs to Blend/ROP units A to D
+ * in that order. In the BRU the Blend/ROP unit B SRC is
+ * hardwired to the ROP unit output, the corresponding register
+ * bits must be set to 0. The BRS has no ROP unit and doesn't
+ * need any special processing.
+ */
+ if (!(entity->type == VSP1_ENTITY_BRU && i == 1))
+ ctrl |= VI6_BRU_CTRL_SRCSEL_BRUIN(i);
+
+ vsp1_brx_write(brx, dlb, VI6_BRU_CTRL(i), ctrl);
+
+ /*
+ * Hardcode the blending formula to
+ *
+ * DSTc = DSTc * (1 - SRCa) + SRCc * SRCa
+ * DSTa = DSTa * (1 - SRCa) + SRCa
+ *
+ * when the SRC input isn't premultiplied, and to
+ *
+ * DSTc = DSTc * (1 - SRCa) + SRCc
+ * DSTa = DSTa * (1 - SRCa) + SRCa
+ *
+ * otherwise.
+ */
+ vsp1_brx_write(brx, dlb, VI6_BRU_BLD(i),
+ VI6_BRU_BLD_CCMDX_255_SRC_A |
+ (premultiplied ? VI6_BRU_BLD_CCMDY_COEFY :
+ VI6_BRU_BLD_CCMDY_SRC_A) |
+ VI6_BRU_BLD_ACMDX_255_SRC_A |
+ VI6_BRU_BLD_ACMDY_COEFY |
+ (0xff << VI6_BRU_BLD_COEFY_SHIFT));
+ }
+}
+
+static const struct vsp1_entity_operations brx_entity_ops = {
+ .configure_stream = brx_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_brx *vsp1_brx_create(struct vsp1_device *vsp1,
+ enum vsp1_entity_type type)
+{
+ struct vsp1_brx *brx;
+ unsigned int num_pads;
+ const char *name;
+ int ret;
+
+ brx = devm_kzalloc(vsp1->dev, sizeof(*brx), GFP_KERNEL);
+ if (brx == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ brx->base = type == VSP1_ENTITY_BRU ? VI6_BRU_BASE : VI6_BRS_BASE;
+ brx->entity.ops = &brx_entity_ops;
+ brx->entity.type = type;
+
+ if (type == VSP1_ENTITY_BRU) {
+ num_pads = vsp1->info->num_bru_inputs + 1;
+ name = "bru";
+ } else {
+ num_pads = 3;
+ name = "brs";
+ }
+
+ ret = vsp1_entity_init(vsp1, &brx->entity, name, num_pads, &brx_ops,
+ MEDIA_ENT_F_PROC_VIDEO_COMPOSER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&brx->ctrls, 1);
+ v4l2_ctrl_new_std(&brx->ctrls, &brx_ctrl_ops, V4L2_CID_BG_COLOR,
+ 0, 0xffffff, 1, 0);
+
+ brx->bgcolor = 0;
+
+ brx->entity.subdev.ctrl_handler = &brx->ctrls;
+
+ if (brx->ctrls.error) {
+ dev_err(vsp1->dev, "%s: failed to initialize controls\n", name);
+ ret = brx->ctrls.error;
+ vsp1_entity_destroy(&brx->entity);
+ return ERR_PTR(ret);
+ }
+
+ return brx;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_brx.h b/drivers/media/platform/renesas/vsp1/vsp1_brx.h
new file mode 100644
index 0000000000..6abbb8c334
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_brx.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_brx.h -- R-Car VSP1 Blend ROP Unit (BRU and BRS)
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_BRX_H__
+#define __VSP1_BRX_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+struct vsp1_rwpf;
+
+#define BRX_PAD_SINK(n) (n)
+
+struct vsp1_brx {
+ struct vsp1_entity entity;
+ unsigned int base;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ struct {
+ struct vsp1_rwpf *rpf;
+ } inputs[VSP1_MAX_RPF];
+
+ u32 bgcolor;
+};
+
+static inline struct vsp1_brx *to_brx(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_brx, entity.subdev);
+}
+
+struct vsp1_brx *vsp1_brx_create(struct vsp1_device *vsp1,
+ enum vsp1_entity_type type);
+
+#endif /* __VSP1_BRX_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_clu.c b/drivers/media/platform/renesas/vsp1/vsp1_clu.c
new file mode 100644
index 0000000000..c5217fee24
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_clu.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_clu.c -- R-Car VSP1 Cubic Look-Up Table
+ *
+ * Copyright (C) 2015-2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_clu.h"
+#include "vsp1_dl.h"
+
+#define CLU_MIN_SIZE 4U
+#define CLU_MAX_SIZE 8190U
+
+#define CLU_SIZE (17 * 17 * 17)
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_clu_write(struct vsp1_clu *clu,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_CLU_TABLE (V4L2_CID_USER_BASE | 0x1001)
+#define V4L2_CID_VSP1_CLU_MODE (V4L2_CID_USER_BASE | 0x1002)
+#define V4L2_CID_VSP1_CLU_MODE_2D 0
+#define V4L2_CID_VSP1_CLU_MODE_3D 1
+
+static int clu_set_table(struct vsp1_clu *clu, struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_dl_body *dlb;
+ unsigned int i;
+
+ dlb = vsp1_dl_body_get(clu->pool);
+ if (!dlb)
+ return -ENOMEM;
+
+ vsp1_dl_body_write(dlb, VI6_CLU_ADDR, 0);
+ for (i = 0; i < CLU_SIZE; ++i)
+ vsp1_dl_body_write(dlb, VI6_CLU_DATA, ctrl->p_new.p_u32[i]);
+
+ spin_lock_irq(&clu->lock);
+ swap(clu->clu, dlb);
+ spin_unlock_irq(&clu->lock);
+
+ vsp1_dl_body_put(dlb);
+ return 0;
+}
+
+static int clu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_clu *clu =
+ container_of(ctrl->handler, struct vsp1_clu, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_CLU_TABLE:
+ clu_set_table(clu, ctrl);
+ break;
+
+ case V4L2_CID_VSP1_CLU_MODE:
+ clu->mode = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops clu_ctrl_ops = {
+ .s_ctrl = clu_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config clu_table_control = {
+ .ops = &clu_ctrl_ops,
+ .id = V4L2_CID_VSP1_CLU_TABLE,
+ .name = "Look-Up Table",
+ .type = V4L2_CTRL_TYPE_U32,
+ .min = 0x00000000,
+ .max = 0x00ffffff,
+ .step = 1,
+ .def = 0,
+ .dims = { 17, 17, 17 },
+};
+
+static const char * const clu_mode_menu[] = {
+ "2D",
+ "3D",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config clu_mode_control = {
+ .ops = &clu_ctrl_ops,
+ .id = V4L2_CID_VSP1_CLU_MODE,
+ .name = "Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .def = 1,
+ .qmenu = clu_mode_menu,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static const unsigned int clu_codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+};
+
+static int clu_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, clu_codes,
+ ARRAY_SIZE(clu_codes));
+}
+
+static int clu_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ CLU_MIN_SIZE,
+ CLU_MIN_SIZE, CLU_MAX_SIZE,
+ CLU_MAX_SIZE);
+}
+
+static int clu_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, clu_codes,
+ ARRAY_SIZE(clu_codes),
+ CLU_MIN_SIZE, CLU_MIN_SIZE,
+ CLU_MAX_SIZE, CLU_MAX_SIZE);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_pad_ops clu_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = clu_enum_mbus_code,
+ .enum_frame_size = clu_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = clu_set_format,
+};
+
+static const struct v4l2_subdev_ops clu_ops = {
+ .pad = &clu_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void clu_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_clu *clu = to_clu(&entity->subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /*
+ * The yuv_mode can't be changed during streaming. Cache it internally
+ * for future runtime configuration calls.
+ */
+ format = vsp1_entity_get_pad_format(&clu->entity,
+ clu->entity.config,
+ CLU_PAD_SINK);
+ clu->yuv_mode = format->code == MEDIA_BUS_FMT_AYUV8_1X32;
+}
+
+static void clu_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_clu *clu = to_clu(&entity->subdev);
+ struct vsp1_dl_body *clu_dlb;
+ unsigned long flags;
+ u32 ctrl = VI6_CLU_CTRL_AAI | VI6_CLU_CTRL_MVS | VI6_CLU_CTRL_EN;
+
+ /* 2D mode can only be used with the YCbCr pixel encoding. */
+ if (clu->mode == V4L2_CID_VSP1_CLU_MODE_2D && clu->yuv_mode)
+ ctrl |= VI6_CLU_CTRL_AX1I_2D | VI6_CLU_CTRL_AX2I_2D
+ | VI6_CLU_CTRL_OS0_2D | VI6_CLU_CTRL_OS1_2D
+ | VI6_CLU_CTRL_OS2_2D | VI6_CLU_CTRL_M2D;
+
+ vsp1_clu_write(clu, dlb, VI6_CLU_CTRL, ctrl);
+
+ spin_lock_irqsave(&clu->lock, flags);
+ clu_dlb = clu->clu;
+ clu->clu = NULL;
+ spin_unlock_irqrestore(&clu->lock, flags);
+
+ if (clu_dlb) {
+ vsp1_dl_list_add_body(dl, clu_dlb);
+
+ /* Release our local reference. */
+ vsp1_dl_body_put(clu_dlb);
+ }
+}
+
+static void clu_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_clu *clu = to_clu(&entity->subdev);
+
+ vsp1_dl_body_pool_destroy(clu->pool);
+}
+
+static const struct vsp1_entity_operations clu_entity_ops = {
+ .configure_stream = clu_configure_stream,
+ .configure_frame = clu_configure_frame,
+ .destroy = clu_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_clu *vsp1_clu_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_clu *clu;
+ int ret;
+
+ clu = devm_kzalloc(vsp1->dev, sizeof(*clu), GFP_KERNEL);
+ if (clu == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&clu->lock);
+
+ clu->entity.ops = &clu_entity_ops;
+ clu->entity.type = VSP1_ENTITY_CLU;
+
+ ret = vsp1_entity_init(vsp1, &clu->entity, "clu", 2, &clu_ops,
+ MEDIA_ENT_F_PROC_VIDEO_LUT);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /*
+ * Pre-allocate a body pool, with 3 bodies allowing a userspace update
+ * before the hardware has committed a previous set of tables, handling
+ * both the queued and pending dl entries. One extra entry is added to
+ * the CLU_SIZE to allow for the VI6_CLU_ADDR header.
+ */
+ clu->pool = vsp1_dl_body_pool_create(clu->entity.vsp1, 3, CLU_SIZE + 1,
+ 0);
+ if (!clu->pool)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&clu->ctrls, 2);
+ v4l2_ctrl_new_custom(&clu->ctrls, &clu_table_control, NULL);
+ v4l2_ctrl_new_custom(&clu->ctrls, &clu_mode_control, NULL);
+
+ clu->entity.subdev.ctrl_handler = &clu->ctrls;
+
+ if (clu->ctrls.error) {
+ dev_err(vsp1->dev, "clu: failed to initialize controls\n");
+ ret = clu->ctrls.error;
+ vsp1_entity_destroy(&clu->entity);
+ return ERR_PTR(ret);
+ }
+
+ v4l2_ctrl_handler_setup(&clu->ctrls);
+
+ return clu;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_clu.h b/drivers/media/platform/renesas/vsp1/vsp1_clu.h
new file mode 100644
index 0000000000..cef2f44481
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_clu.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_clu.h -- R-Car VSP1 Cubic Look-Up Table
+ *
+ * Copyright (C) 2015 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_CLU_H__
+#define __VSP1_CLU_H__
+
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+struct vsp1_dl_body;
+
+#define CLU_PAD_SINK 0
+#define CLU_PAD_SOURCE 1
+
+struct vsp1_clu {
+ struct vsp1_entity entity;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ bool yuv_mode;
+ spinlock_t lock;
+ unsigned int mode;
+ struct vsp1_dl_body *clu;
+ struct vsp1_dl_body_pool *pool;
+};
+
+static inline struct vsp1_clu *to_clu(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_clu, entity.subdev);
+}
+
+struct vsp1_clu *vsp1_clu_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_CLU_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_dl.c b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
new file mode 100644
index 0000000000..ad3fa1c9cc
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_dl.c
@@ -0,0 +1,1169 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_dl.c -- R-Car VSP1 Display List
+ *
+ * Copyright (C) 2015 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+
+#define VSP1_DL_NUM_ENTRIES 256
+
+#define VSP1_DLH_INT_ENABLE (1 << 1)
+#define VSP1_DLH_AUTO_START (1 << 0)
+
+#define VSP1_DLH_EXT_PRE_CMD_EXEC (1 << 9)
+#define VSP1_DLH_EXT_POST_CMD_EXEC (1 << 8)
+
+struct vsp1_dl_header_list {
+ u32 num_bytes;
+ u32 addr;
+} __packed;
+
+struct vsp1_dl_header {
+ u32 num_lists;
+ struct vsp1_dl_header_list lists[8];
+ u32 next_header;
+ u32 flags;
+} __packed;
+
+/**
+ * struct vsp1_dl_ext_header - Extended display list header
+ * @padding: padding zero bytes for alignment
+ * @pre_ext_dl_num_cmd: number of pre-extended command bodies to parse
+ * @flags: enables or disables execution of the pre and post command
+ * @pre_ext_dl_plist: start address of pre-extended display list bodies
+ * @post_ext_dl_num_cmd: number of post-extended command bodies to parse
+ * @post_ext_dl_plist: start address of post-extended display list bodies
+ */
+struct vsp1_dl_ext_header {
+ u32 padding;
+
+ /*
+ * The datasheet represents flags as stored before pre_ext_dl_num_cmd,
+ * expecting 32-bit accesses. The flags are appropriate to the whole
+ * header, not just the pre_ext command, and thus warrant being
+ * separated out. Due to byte ordering, and representing as 16 bit
+ * values here, the flags must be positioned after the
+ * pre_ext_dl_num_cmd.
+ */
+ u16 pre_ext_dl_num_cmd;
+ u16 flags;
+ u32 pre_ext_dl_plist;
+
+ u32 post_ext_dl_num_cmd;
+ u32 post_ext_dl_plist;
+} __packed;
+
+struct vsp1_dl_header_extended {
+ struct vsp1_dl_header header;
+ struct vsp1_dl_ext_header ext;
+} __packed;
+
+struct vsp1_dl_entry {
+ u32 addr;
+ u32 data;
+} __packed;
+
+/**
+ * struct vsp1_pre_ext_dl_body - Pre Extended Display List Body
+ * @opcode: Extended display list command operation code
+ * @flags: Pre-extended command flags. These are specific to each command
+ * @address_set: Source address set pointer. Must have 16-byte alignment
+ * @reserved: Zero bits for alignment.
+ */
+struct vsp1_pre_ext_dl_body {
+ u32 opcode;
+ u32 flags;
+ u32 address_set;
+ u32 reserved;
+} __packed;
+
+/**
+ * struct vsp1_dl_body - Display list body
+ * @list: entry in the display list list of bodies
+ * @free: entry in the pool free body list
+ * @refcnt: reference tracking for the body
+ * @pool: pool to which this body belongs
+ * @entries: array of entries
+ * @dma: DMA address of the entries
+ * @size: size of the DMA memory in bytes
+ * @num_entries: number of stored entries
+ * @max_entries: number of entries available
+ */
+struct vsp1_dl_body {
+ struct list_head list;
+ struct list_head free;
+
+ refcount_t refcnt;
+
+ struct vsp1_dl_body_pool *pool;
+
+ struct vsp1_dl_entry *entries;
+ dma_addr_t dma;
+ size_t size;
+
+ unsigned int num_entries;
+ unsigned int max_entries;
+};
+
+/**
+ * struct vsp1_dl_body_pool - display list body pool
+ * @dma: DMA address of the entries
+ * @size: size of the full DMA memory pool in bytes
+ * @mem: CPU memory pointer for the pool
+ * @bodies: Array of DLB structures for the pool
+ * @free: List of free DLB entries
+ * @lock: Protects the free list
+ * @vsp1: the VSP1 device
+ */
+struct vsp1_dl_body_pool {
+ /* DMA allocation */
+ dma_addr_t dma;
+ size_t size;
+ void *mem;
+
+ /* Body management */
+ struct vsp1_dl_body *bodies;
+ struct list_head free;
+ spinlock_t lock;
+
+ struct vsp1_device *vsp1;
+};
+
+/**
+ * struct vsp1_dl_cmd_pool - Display List commands pool
+ * @dma: DMA address of the entries
+ * @size: size of the full DMA memory pool in bytes
+ * @mem: CPU memory pointer for the pool
+ * @cmds: Array of command structures for the pool
+ * @free: Free pool entries
+ * @lock: Protects the free list
+ * @vsp1: the VSP1 device
+ */
+struct vsp1_dl_cmd_pool {
+ /* DMA allocation */
+ dma_addr_t dma;
+ size_t size;
+ void *mem;
+
+ struct vsp1_dl_ext_cmd *cmds;
+ struct list_head free;
+
+ spinlock_t lock;
+
+ struct vsp1_device *vsp1;
+};
+
+/**
+ * struct vsp1_dl_list - Display list
+ * @list: entry in the display list manager lists
+ * @dlm: the display list manager
+ * @header: display list header
+ * @extension: extended display list header. NULL for normal lists
+ * @dma: DMA address for the header
+ * @body0: first display list body
+ * @bodies: list of extra display list bodies
+ * @pre_cmd: pre command to be issued through extended dl header
+ * @post_cmd: post command to be issued through extended dl header
+ * @has_chain: if true, indicates that there's a partition chain
+ * @chain: entry in the display list partition chain
+ * @flags: display list flags, a combination of VSP1_DL_FRAME_END_*
+ */
+struct vsp1_dl_list {
+ struct list_head list;
+ struct vsp1_dl_manager *dlm;
+
+ struct vsp1_dl_header *header;
+ struct vsp1_dl_ext_header *extension;
+ dma_addr_t dma;
+
+ struct vsp1_dl_body *body0;
+ struct list_head bodies;
+
+ struct vsp1_dl_ext_cmd *pre_cmd;
+ struct vsp1_dl_ext_cmd *post_cmd;
+
+ bool has_chain;
+ struct list_head chain;
+
+ unsigned int flags;
+};
+
+/**
+ * struct vsp1_dl_manager - Display List manager
+ * @index: index of the related WPF
+ * @singleshot: execute the display list in single-shot mode
+ * @vsp1: the VSP1 device
+ * @lock: protects the free, active, queued, and pending lists
+ * @free: array of all free display lists
+ * @active: list currently being processed (loaded) by hardware
+ * @queued: list queued to the hardware (written to the DL registers)
+ * @pending: list waiting to be queued to the hardware
+ * @pool: body pool for the display list bodies
+ * @cmdpool: commands pool for extended display list
+ */
+struct vsp1_dl_manager {
+ unsigned int index;
+ bool singleshot;
+ struct vsp1_device *vsp1;
+
+ spinlock_t lock;
+ struct list_head free;
+ struct vsp1_dl_list *active;
+ struct vsp1_dl_list *queued;
+ struct vsp1_dl_list *pending;
+
+ struct vsp1_dl_body_pool *pool;
+ struct vsp1_dl_cmd_pool *cmdpool;
+};
+
+/* -----------------------------------------------------------------------------
+ * Display List Body Management
+ */
+
+/**
+ * vsp1_dl_body_pool_create - Create a pool of bodies from a single allocation
+ * @vsp1: The VSP1 device
+ * @num_bodies: The number of bodies to allocate
+ * @num_entries: The maximum number of entries that a body can contain
+ * @extra_size: Extra allocation provided for the bodies
+ *
+ * Allocate a pool of display list bodies each with enough memory to contain the
+ * requested number of entries plus the @extra_size.
+ *
+ * Return a pointer to a pool on success or NULL if memory can't be allocated.
+ */
+struct vsp1_dl_body_pool *
+vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
+ unsigned int num_entries, size_t extra_size)
+{
+ struct vsp1_dl_body_pool *pool;
+ size_t dlb_size;
+ unsigned int i;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->vsp1 = vsp1;
+
+ /*
+ * TODO: 'extra_size' is only used by vsp1_dlm_create(), to allocate
+ * extra memory for the display list header. We need only one header per
+ * display list, not per display list body, thus this allocation is
+ * extraneous and should be reworked in the future.
+ */
+ dlb_size = num_entries * sizeof(struct vsp1_dl_entry) + extra_size;
+ pool->size = dlb_size * num_bodies;
+
+ pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL);
+ if (!pool->bodies) {
+ kfree(pool);
+ return NULL;
+ }
+
+ pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
+ GFP_KERNEL);
+ if (!pool->mem) {
+ kfree(pool->bodies);
+ kfree(pool);
+ return NULL;
+ }
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free);
+
+ for (i = 0; i < num_bodies; ++i) {
+ struct vsp1_dl_body *dlb = &pool->bodies[i];
+
+ dlb->pool = pool;
+ dlb->max_entries = num_entries;
+
+ dlb->dma = pool->dma + i * dlb_size;
+ dlb->entries = pool->mem + i * dlb_size;
+
+ list_add_tail(&dlb->free, &pool->free);
+ }
+
+ return pool;
+}
+
+/**
+ * vsp1_dl_body_pool_destroy - Release a body pool
+ * @pool: The body pool
+ *
+ * Release all components of a pool allocation.
+ */
+void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (pool->mem)
+ dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
+ pool->dma);
+
+ kfree(pool->bodies);
+ kfree(pool);
+}
+
+/**
+ * vsp1_dl_body_get - Obtain a body from a pool
+ * @pool: The body pool
+ *
+ * Obtain a body from the pool without blocking.
+ *
+ * Returns a display list body or NULL if there are none available.
+ */
+struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool)
+{
+ struct vsp1_dl_body *dlb = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ if (!list_empty(&pool->free)) {
+ dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free);
+ list_del(&dlb->free);
+ refcount_set(&dlb->refcnt, 1);
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return dlb;
+}
+
+/**
+ * vsp1_dl_body_put - Return a body back to its pool
+ * @dlb: The display list body
+ *
+ * Return a body back to the pool, and reset the num_entries to clear the list.
+ */
+void vsp1_dl_body_put(struct vsp1_dl_body *dlb)
+{
+ unsigned long flags;
+
+ if (!dlb)
+ return;
+
+ if (!refcount_dec_and_test(&dlb->refcnt))
+ return;
+
+ dlb->num_entries = 0;
+
+ spin_lock_irqsave(&dlb->pool->lock, flags);
+ list_add_tail(&dlb->free, &dlb->pool->free);
+ spin_unlock_irqrestore(&dlb->pool->lock, flags);
+}
+
+/**
+ * vsp1_dl_body_write - Write a register to a display list body
+ * @dlb: The body
+ * @reg: The register address
+ * @data: The register value
+ *
+ * Write the given register and value to the display list body. The maximum
+ * number of entries that can be written in a body is specified when the body is
+ * allocated by vsp1_dl_body_alloc().
+ */
+void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ if (WARN_ONCE(dlb->num_entries >= dlb->max_entries,
+ "DLB size exceeded (max %u)", dlb->max_entries))
+ return;
+
+ dlb->entries[dlb->num_entries].addr = reg;
+ dlb->entries[dlb->num_entries].data = data;
+ dlb->num_entries++;
+}
+
+/* -----------------------------------------------------------------------------
+ * Display List Extended Command Management
+ */
+
+enum vsp1_extcmd_type {
+ VSP1_EXTCMD_AUTODISP,
+ VSP1_EXTCMD_AUTOFLD,
+};
+
+struct vsp1_extended_command_info {
+ u16 opcode;
+ size_t body_size;
+};
+
+static const struct vsp1_extended_command_info vsp1_extended_commands[] = {
+ [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 },
+ [VSP1_EXTCMD_AUTOFLD] = { 0x03, 160 },
+};
+
+/**
+ * vsp1_dl_cmd_pool_create - Create a pool of commands from a single allocation
+ * @vsp1: The VSP1 device
+ * @type: The command pool type
+ * @num_cmds: The number of commands to allocate
+ *
+ * Allocate a pool of commands each with enough memory to contain the private
+ * data of each command. The allocation sizes are dependent upon the command
+ * type.
+ *
+ * Return a pointer to the pool on success or NULL if memory can't be allocated.
+ */
+static struct vsp1_dl_cmd_pool *
+vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
+ unsigned int num_cmds)
+{
+ struct vsp1_dl_cmd_pool *pool;
+ unsigned int i;
+ size_t cmd_size;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ pool->vsp1 = vsp1;
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free);
+
+ pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL);
+ if (!pool->cmds) {
+ kfree(pool);
+ return NULL;
+ }
+
+ cmd_size = sizeof(struct vsp1_pre_ext_dl_body) +
+ vsp1_extended_commands[type].body_size;
+ cmd_size = ALIGN(cmd_size, 16);
+
+ pool->size = cmd_size * num_cmds;
+ pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma,
+ GFP_KERNEL);
+ if (!pool->mem) {
+ kfree(pool->cmds);
+ kfree(pool);
+ return NULL;
+ }
+
+ for (i = 0; i < num_cmds; ++i) {
+ struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i];
+ size_t cmd_offset = i * cmd_size;
+ /* data_offset must be 16 byte aligned for DMA. */
+ size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) +
+ cmd_offset;
+
+ cmd->pool = pool;
+ cmd->opcode = vsp1_extended_commands[type].opcode;
+
+ /*
+ * TODO: Auto-disp can utilise more than one extended body
+ * command per cmd.
+ */
+ cmd->num_cmds = 1;
+ cmd->cmds = pool->mem + cmd_offset;
+ cmd->cmd_dma = pool->dma + cmd_offset;
+
+ cmd->data = pool->mem + data_offset;
+ cmd->data_dma = pool->dma + data_offset;
+
+ list_add_tail(&cmd->free, &pool->free);
+ }
+
+ return pool;
+}
+
+static
+struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool)
+{
+ struct vsp1_dl_ext_cmd *cmd = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ if (!list_empty(&pool->free)) {
+ cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd,
+ free);
+ list_del(&cmd->free);
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return cmd;
+}
+
+static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd)
+{
+ unsigned long flags;
+
+ if (!cmd)
+ return;
+
+ /* Reset flags, these mark data usage. */
+ cmd->flags = 0;
+
+ spin_lock_irqsave(&cmd->pool->lock, flags);
+ list_add_tail(&cmd->free, &cmd->pool->free);
+ spin_unlock_irqrestore(&cmd->pool->lock, flags);
+}
+
+static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (pool->mem)
+ dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem,
+ pool->dma);
+
+ kfree(pool->cmds);
+ kfree(pool);
+}
+
+struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+
+ if (dl->pre_cmd)
+ return dl->pre_cmd;
+
+ dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool);
+
+ return dl->pre_cmd;
+}
+
+/* ----------------------------------------------------------------------------
+ * Display List Transaction Management
+ */
+
+static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_dl_list *dl;
+ size_t header_offset;
+
+ dl = kzalloc(sizeof(*dl), GFP_KERNEL);
+ if (!dl)
+ return NULL;
+
+ INIT_LIST_HEAD(&dl->bodies);
+ dl->dlm = dlm;
+
+ /* Get a default body for our list. */
+ dl->body0 = vsp1_dl_body_get(dlm->pool);
+ if (!dl->body0) {
+ kfree(dl);
+ return NULL;
+ }
+
+ header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
+
+ dl->header = ((void *)dl->body0->entries) + header_offset;
+ dl->dma = dl->body0->dma + header_offset;
+
+ memset(dl->header, 0, sizeof(*dl->header));
+ dl->header->lists[0].addr = dl->body0->dma;
+
+ return dl;
+}
+
+static void vsp1_dl_list_bodies_put(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_body *dlb, *tmp;
+
+ list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) {
+ list_del(&dlb->list);
+ vsp1_dl_body_put(dlb);
+ }
+}
+
+static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
+{
+ vsp1_dl_body_put(dl->body0);
+ vsp1_dl_list_bodies_put(dl);
+
+ kfree(dl);
+}
+
+/**
+ * vsp1_dl_list_get - Get a free display list
+ * @dlm: The display list manager
+ *
+ * Get a display list from the pool of free lists and return it.
+ *
+ * This function must be called without the display list manager lock held.
+ */
+struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_dl_list *dl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlm->lock, flags);
+
+ if (!list_empty(&dlm->free)) {
+ dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
+ list_del(&dl->list);
+
+ /*
+ * The display list chain must be initialised to ensure every
+ * display list can assert list_empty() if it is not in a chain.
+ */
+ INIT_LIST_HEAD(&dl->chain);
+ }
+
+ spin_unlock_irqrestore(&dlm->lock, flags);
+
+ return dl;
+}
+
+/* This function must be called with the display list manager lock held.*/
+static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_list *dl_next;
+
+ if (!dl)
+ return;
+
+ /*
+ * Release any linked display-lists which were chained for a single
+ * hardware operation.
+ */
+ if (dl->has_chain) {
+ list_for_each_entry(dl_next, &dl->chain, chain)
+ __vsp1_dl_list_put(dl_next);
+ }
+
+ dl->has_chain = false;
+
+ vsp1_dl_list_bodies_put(dl);
+
+ vsp1_dl_ext_cmd_put(dl->pre_cmd);
+ vsp1_dl_ext_cmd_put(dl->post_cmd);
+
+ dl->pre_cmd = NULL;
+ dl->post_cmd = NULL;
+
+ /*
+ * body0 is reused as as an optimisation as presently every display list
+ * has at least one body, thus we reinitialise the entries list.
+ */
+ dl->body0->num_entries = 0;
+
+ list_add_tail(&dl->list, &dl->dlm->free);
+}
+
+/**
+ * vsp1_dl_list_put - Release a display list
+ * @dl: The display list
+ *
+ * Release the display list and return it to the pool of free lists.
+ *
+ * Passing a NULL pointer to this function is safe, in that case no operation
+ * will be performed.
+ */
+void vsp1_dl_list_put(struct vsp1_dl_list *dl)
+{
+ unsigned long flags;
+
+ if (!dl)
+ return;
+
+ spin_lock_irqsave(&dl->dlm->lock, flags);
+ __vsp1_dl_list_put(dl);
+ spin_unlock_irqrestore(&dl->dlm->lock, flags);
+}
+
+/**
+ * vsp1_dl_list_get_body0 - Obtain the default body for the display list
+ * @dl: The display list
+ *
+ * Obtain a pointer to the internal display list body allowing this to be passed
+ * directly to configure operations.
+ */
+struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl)
+{
+ return dl->body0;
+}
+
+/**
+ * vsp1_dl_list_add_body - Add a body to the display list
+ * @dl: The display list
+ * @dlb: The body
+ *
+ * Add a display list body to a display list. Registers contained in bodies are
+ * processed after registers contained in the main display list, in the order in
+ * which bodies are added.
+ *
+ * Adding a body to a display list passes ownership of the body to the list. The
+ * caller retains its reference to the body when adding it to the display list,
+ * but is not allowed to add new entries to the body.
+ *
+ * The reference must be explicitly released by a call to vsp1_dl_body_put()
+ * when the body isn't needed anymore.
+ */
+int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb)
+{
+ refcount_inc(&dlb->refcnt);
+
+ list_add_tail(&dlb->list, &dl->bodies);
+
+ return 0;
+}
+
+/**
+ * vsp1_dl_list_add_chain - Add a display list to a chain
+ * @head: The head display list
+ * @dl: The new display list
+ *
+ * Add a display list to an existing display list chain. The chained lists
+ * will be automatically processed by the hardware without intervention from
+ * the CPU. A display list end interrupt will only complete after the last
+ * display list in the chain has completed processing.
+ *
+ * Adding a display list to a chain passes ownership of the display list to
+ * the head display list item. The chain is released when the head dl item is
+ * put back with __vsp1_dl_list_put().
+ */
+int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
+ struct vsp1_dl_list *dl)
+{
+ head->has_chain = true;
+ list_add_tail(&dl->chain, &head->chain);
+ return 0;
+}
+
+static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd)
+{
+ cmd->cmds[0].opcode = cmd->opcode;
+ cmd->cmds[0].flags = cmd->flags;
+ cmd->cmds[0].address_set = cmd->data_dma;
+ cmd->cmds[0].reserved = 0;
+}
+
+static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+ struct vsp1_dl_header_list *hdr = dl->header->lists;
+ struct vsp1_dl_body *dlb;
+ unsigned int num_lists = 0;
+
+ /*
+ * Fill the header with the display list bodies addresses and sizes. The
+ * address of the first body has already been filled when the display
+ * list was allocated.
+ */
+
+ hdr->num_bytes = dl->body0->num_entries
+ * sizeof(*dl->header->lists);
+
+ list_for_each_entry(dlb, &dl->bodies, list) {
+ num_lists++;
+ hdr++;
+
+ hdr->addr = dlb->dma;
+ hdr->num_bytes = dlb->num_entries
+ * sizeof(*dl->header->lists);
+ }
+
+ dl->header->num_lists = num_lists;
+ dl->header->flags = 0;
+
+ /*
+ * Enable the interrupt for the end of each frame. In continuous mode
+ * chained lists are used with one list per frame, so enable the
+ * interrupt for each list. In singleshot mode chained lists are used
+ * to partition a single frame, so enable the interrupt for the last
+ * list only.
+ */
+ if (!dlm->singleshot || is_last)
+ dl->header->flags |= VSP1_DLH_INT_ENABLE;
+
+ /*
+ * In continuous mode enable auto-start for all lists, as the VSP must
+ * loop on the same list until a new one is queued. In singleshot mode
+ * enable auto-start for all lists but the last to chain processing of
+ * partitions without software intervention.
+ */
+ if (!dlm->singleshot || !is_last)
+ dl->header->flags |= VSP1_DLH_AUTO_START;
+
+ if (!is_last) {
+ /*
+ * If this is not the last display list in the chain, queue the
+ * next item for automatic processing by the hardware.
+ */
+ struct vsp1_dl_list *next = list_next_entry(dl, chain);
+
+ dl->header->next_header = next->dma;
+ } else if (!dlm->singleshot) {
+ /*
+ * if the display list manager works in continuous mode, the VSP
+ * should loop over the display list continuously until
+ * instructed to do otherwise.
+ */
+ dl->header->next_header = dl->dma;
+ }
+
+ if (!dl->extension)
+ return;
+
+ dl->extension->flags = 0;
+
+ if (dl->pre_cmd) {
+ dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma;
+ dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds;
+ dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC;
+
+ vsp1_dl_ext_cmd_fill_header(dl->pre_cmd);
+ }
+
+ if (dl->post_cmd) {
+ dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma;
+ dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds;
+ dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC;
+
+ vsp1_dl_ext_cmd_fill_header(dl->post_cmd);
+ }
+}
+
+static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_device *vsp1 = dlm->vsp1;
+
+ if (!dlm->queued)
+ return false;
+
+ /*
+ * Check whether the VSP1 has taken the update. The hardware indicates
+ * this by clearing the UPDHDR bit in the CMD register.
+ */
+ return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR);
+}
+
+static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+ struct vsp1_device *vsp1 = dlm->vsp1;
+
+ /*
+ * Program the display list header address. If the hardware is idle
+ * (single-shot mode or first frame in continuous mode) it will then be
+ * started independently. If the hardware is operating, the
+ * VI6_DL_HDR_REF_ADDR register will be updated with the display list
+ * address.
+ */
+ vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
+}
+
+static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+
+ /*
+ * If a previous display list has been queued to the hardware but not
+ * processed yet, the VSP can start processing it at any time. In that
+ * case we can't replace the queued list by the new one, as we could
+ * race with the hardware. We thus mark the update as pending, it will
+ * be queued up to the hardware by the frame end interrupt handler.
+ *
+ * If a display list is already pending we simply drop it as the new
+ * display list is assumed to contain a more recent configuration. It is
+ * an error if the already pending list has the
+ * VSP1_DL_FRAME_END_INTERNAL flag set, as there is then a process
+ * waiting for that list to complete. This shouldn't happen as the
+ * waiting process should perform proper locking, but warn just in
+ * case.
+ */
+ if (vsp1_dl_list_hw_update_pending(dlm)) {
+ WARN_ON(dlm->pending &&
+ (dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL));
+ __vsp1_dl_list_put(dlm->pending);
+ dlm->pending = dl;
+ return;
+ }
+
+ /*
+ * Pass the new display list to the hardware and mark it as queued. It
+ * will become active when the hardware starts processing it.
+ */
+ vsp1_dl_list_hw_enqueue(dl);
+
+ __vsp1_dl_list_put(dlm->queued);
+ dlm->queued = dl;
+}
+
+static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+
+ /*
+ * When working in single-shot mode, the caller guarantees that the
+ * hardware is idle at this point. Just commit the head display list
+ * to hardware. Chained lists will be started automatically.
+ */
+ vsp1_dl_list_hw_enqueue(dl);
+
+ dlm->active = dl;
+}
+
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags)
+{
+ struct vsp1_dl_manager *dlm = dl->dlm;
+ struct vsp1_dl_list *dl_next;
+ unsigned long flags;
+
+ /* Fill the header for the head and chained display lists. */
+ vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
+
+ list_for_each_entry(dl_next, &dl->chain, chain) {
+ bool last = list_is_last(&dl_next->chain, &dl->chain);
+
+ vsp1_dl_list_fill_header(dl_next, last);
+ }
+
+ dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED;
+
+ spin_lock_irqsave(&dlm->lock, flags);
+
+ if (dlm->singleshot)
+ vsp1_dl_list_commit_singleshot(dl);
+ else
+ vsp1_dl_list_commit_continuous(dl);
+
+ spin_unlock_irqrestore(&dlm->lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Display List Manager
+ */
+
+/**
+ * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
+ * @dlm: the display list manager
+ *
+ * Return a set of flags that indicates display list completion status.
+ *
+ * The VSP1_DL_FRAME_END_COMPLETED flag indicates that the previous display list
+ * has completed at frame end. If the flag is not returned display list
+ * completion has been delayed by one frame because the display list commit
+ * raced with the frame end interrupt. The function always returns with the flag
+ * set in single-shot mode as display list processing is then not continuous and
+ * races never occur.
+ *
+ * The following flags are only supported for continuous mode.
+ *
+ * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the display list that just
+ * became active had been queued with the internal notification flag.
+ *
+ * The VSP1_DL_FRAME_END_WRITEBACK flag indicates that the previously active
+ * display list had been queued with the writeback flag.
+ */
+unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_device *vsp1 = dlm->vsp1;
+ u32 status = vsp1_read(vsp1, VI6_STATUS);
+ unsigned int flags = 0;
+
+ spin_lock(&dlm->lock);
+
+ /*
+ * The mem-to-mem pipelines work in single-shot mode. No new display
+ * list can be queued, we don't have to do anything.
+ */
+ if (dlm->singleshot) {
+ __vsp1_dl_list_put(dlm->active);
+ dlm->active = NULL;
+ flags |= VSP1_DL_FRAME_END_COMPLETED;
+ goto done;
+ }
+
+ /*
+ * If the commit operation raced with the interrupt and occurred after
+ * the frame end event but before interrupt processing, the hardware
+ * hasn't taken the update into account yet. We have to skip one frame
+ * and retry.
+ */
+ if (vsp1_dl_list_hw_update_pending(dlm))
+ goto done;
+
+ /*
+ * Progressive streams report only TOP fields. If we have a BOTTOM
+ * field, we are interlaced, and expect the frame to complete on the
+ * next frame end interrupt.
+ */
+ if (status & VI6_STATUS_FLD_STD(dlm->index))
+ goto done;
+
+ /*
+ * If the active display list has the writeback flag set, the frame
+ * completion marks the end of the writeback capture. Return the
+ * VSP1_DL_FRAME_END_WRITEBACK flag and reset the display list's
+ * writeback flag.
+ */
+ if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) {
+ flags |= VSP1_DL_FRAME_END_WRITEBACK;
+ dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK;
+ }
+
+ /*
+ * The device starts processing the queued display list right after the
+ * frame end interrupt. The display list thus becomes active.
+ */
+ if (dlm->queued) {
+ if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL)
+ flags |= VSP1_DL_FRAME_END_INTERNAL;
+ dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL;
+
+ __vsp1_dl_list_put(dlm->active);
+ dlm->active = dlm->queued;
+ dlm->queued = NULL;
+ flags |= VSP1_DL_FRAME_END_COMPLETED;
+ }
+
+ /*
+ * Now that the VSP has started processing the queued display list, we
+ * can queue the pending display list to the hardware if one has been
+ * prepared.
+ */
+ if (dlm->pending) {
+ vsp1_dl_list_hw_enqueue(dlm->pending);
+ dlm->queued = dlm->pending;
+ dlm->pending = NULL;
+ }
+
+done:
+ spin_unlock(&dlm->lock);
+
+ return flags;
+}
+
+/* Hardware Setup */
+void vsp1_dlm_setup(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+ u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
+ | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
+ | VI6_DL_CTRL_DLE;
+ u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT)
+ | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT;
+
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
+ for (i = 0; i < vsp1->info->wpf_count; ++i)
+ vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl);
+ }
+
+ vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
+ vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
+}
+
+void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlm->lock, flags);
+
+ __vsp1_dl_list_put(dlm->active);
+ __vsp1_dl_list_put(dlm->queued);
+ __vsp1_dl_list_put(dlm->pending);
+
+ spin_unlock_irqrestore(&dlm->lock, flags);
+
+ dlm->active = NULL;
+ dlm->queued = NULL;
+ dlm->pending = NULL;
+}
+
+struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm)
+{
+ return vsp1_dl_body_get(dlm->pool);
+}
+
+struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
+ unsigned int index,
+ unsigned int prealloc)
+{
+ struct vsp1_dl_manager *dlm;
+ size_t header_size;
+ unsigned int i;
+
+ dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
+ if (!dlm)
+ return NULL;
+
+ dlm->index = index;
+ dlm->singleshot = vsp1->info->uapi;
+ dlm->vsp1 = vsp1;
+
+ spin_lock_init(&dlm->lock);
+ INIT_LIST_HEAD(&dlm->free);
+
+ /*
+ * Initialize the display list body and allocate DMA memory for the body
+ * and the header. Both are allocated together to avoid memory
+ * fragmentation, with the header located right after the body in
+ * memory. An extra body is allocated on top of the prealloc to account
+ * for the cached body used by the vsp1_pipeline object.
+ */
+ header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ?
+ sizeof(struct vsp1_dl_header_extended) :
+ sizeof(struct vsp1_dl_header);
+
+ header_size = ALIGN(header_size, 8);
+
+ dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1,
+ VSP1_DL_NUM_ENTRIES, header_size);
+ if (!dlm->pool)
+ return NULL;
+
+ for (i = 0; i < prealloc; ++i) {
+ struct vsp1_dl_list *dl;
+
+ dl = vsp1_dl_list_alloc(dlm);
+ if (!dl) {
+ vsp1_dlm_destroy(dlm);
+ return NULL;
+ }
+
+ /* The extended header immediately follows the header. */
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL))
+ dl->extension = (void *)dl->header
+ + sizeof(*dl->header);
+
+ list_add_tail(&dl->list, &dlm->free);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) {
+ dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1,
+ VSP1_EXTCMD_AUTOFLD, prealloc);
+ if (!dlm->cmdpool) {
+ vsp1_dlm_destroy(dlm);
+ return NULL;
+ }
+ }
+
+ return dlm;
+}
+
+void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
+{
+ struct vsp1_dl_list *dl, *next;
+
+ if (!dlm)
+ return;
+
+ list_for_each_entry_safe(dl, next, &dlm->free, list) {
+ list_del(&dl->list);
+ vsp1_dl_list_free(dl);
+ }
+
+ vsp1_dl_body_pool_destroy(dlm->pool);
+ vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool);
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_dl.h b/drivers/media/platform/renesas/vsp1/vsp1_dl.h
new file mode 100644
index 0000000000..bebe16483c
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_dl.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_dl.h -- R-Car VSP1 Display List
+ *
+ * Copyright (C) 2015 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_DL_H__
+#define __VSP1_DL_H__
+
+#include <linux/types.h>
+
+struct vsp1_device;
+struct vsp1_dl_body;
+struct vsp1_dl_body_pool;
+struct vsp1_dl_list;
+struct vsp1_dl_manager;
+
+/* Keep these flags in sync with VSP1_DU_STATUS_* in include/media/vsp1.h. */
+#define VSP1_DL_FRAME_END_COMPLETED BIT(0)
+#define VSP1_DL_FRAME_END_WRITEBACK BIT(1)
+#define VSP1_DL_FRAME_END_INTERNAL BIT(2)
+
+/**
+ * struct vsp1_dl_ext_cmd - Extended Display command
+ * @pool: pool to which this command belongs
+ * @free: entry in the pool of free commands list
+ * @opcode: command type opcode
+ * @flags: flags used by the command
+ * @cmds: array of command bodies for this extended cmd
+ * @num_cmds: quantity of commands in @cmds array
+ * @cmd_dma: DMA address of the command body
+ * @data: memory allocation for command-specific data
+ * @data_dma: DMA address for command-specific data
+ */
+struct vsp1_dl_ext_cmd {
+ struct vsp1_dl_cmd_pool *pool;
+ struct list_head free;
+
+ u8 opcode;
+ u32 flags;
+
+ struct vsp1_pre_ext_dl_body *cmds;
+ unsigned int num_cmds;
+ dma_addr_t cmd_dma;
+
+ void *data;
+ dma_addr_t data_dma;
+};
+
+void vsp1_dlm_setup(struct vsp1_device *vsp1);
+
+struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
+ unsigned int index,
+ unsigned int prealloc);
+void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm);
+void vsp1_dlm_reset(struct vsp1_dl_manager *dlm);
+unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm);
+struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm);
+
+struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
+void vsp1_dl_list_put(struct vsp1_dl_list *dl);
+struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl);
+struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl);
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags);
+
+struct vsp1_dl_body_pool *
+vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies,
+ unsigned int num_entries, size_t extra_size);
+void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool);
+struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool);
+void vsp1_dl_body_put(struct vsp1_dl_body *dlb);
+
+void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data);
+int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb);
+int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, struct vsp1_dl_list *dl);
+
+#endif /* __VSP1_DL_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
new file mode 100644
index 0000000000..9b087bd8df
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
@@ -0,0 +1,1000 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_drm.c -- R-Car VSP1 DRM/KMS Interface
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+#include <media/vsp1.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_dl.h"
+#include "vsp1_drm.h"
+#include "vsp1_lif.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_uif.h"
+
+#define BRX_NAME(e) (e)->type == VSP1_ENTITY_BRU ? "BRU" : "BRS"
+
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
+
+static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe,
+ unsigned int completion)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+
+ if (drm_pipe->du_complete) {
+ struct vsp1_entity *uif = drm_pipe->uif;
+ unsigned int status = completion
+ & (VSP1_DU_STATUS_COMPLETE |
+ VSP1_DU_STATUS_WRITEBACK);
+ u32 crc;
+
+ crc = uif ? vsp1_uif_get_crc(to_uif(&uif->subdev)) : 0;
+ drm_pipe->du_complete(drm_pipe->du_private, status, crc);
+ }
+
+ if (completion & VSP1_DL_FRAME_END_INTERNAL) {
+ drm_pipe->force_brx_release = false;
+ wake_up(&drm_pipe->wait_queue);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Configuration
+ */
+
+/*
+ * Insert the UIF in the pipeline between the prev and next entities. If no UIF
+ * is available connect the two entities directly.
+ */
+static int vsp1_du_insert_uif(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_entity *uif,
+ struct vsp1_entity *prev, unsigned int prev_pad,
+ struct vsp1_entity *next, unsigned int next_pad)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ if (!uif) {
+ /*
+ * If there's no UIF to be inserted, connect the previous and
+ * next entities directly.
+ */
+ prev->sink = next;
+ prev->sink_pad = next_pad;
+ return 0;
+ }
+
+ prev->sink = uif;
+ prev->sink_pad = UIF_PAD_SINK;
+
+ format.pad = prev_pad;
+
+ ret = v4l2_subdev_call(&prev->subdev, pad, get_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ format.pad = UIF_PAD_SINK;
+
+ ret = v4l2_subdev_call(&uif->subdev, pad, set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on UIF sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code);
+
+ /*
+ * The UIF doesn't mangle the format between its sink and source pads,
+ * so there is no need to retrieve the format on its source pad.
+ */
+
+ uif->sink = next;
+ uif->sink_pad = next_pad;
+
+ return 0;
+}
+
+/* Setup one RPF and the connected BRx sink pad. */
+static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_rwpf *rpf,
+ struct vsp1_entity *uif,
+ unsigned int brx_input)
+{
+ struct v4l2_subdev_selection sel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct v4l2_rect *crop;
+ int ret;
+
+ /*
+ * Configure the format on the RPF sink pad and propagate it up to the
+ * BRx sink pad.
+ */
+ crop = &vsp1->drm->inputs[rpf->entity.index].crop;
+
+ format.pad = RWPF_PAD_SINK;
+ format.format.width = crop->width + crop->left;
+ format.format.height = crop->height + crop->top;
+ format.format.code = rpf->fmtinfo->mbus;
+ format.format.field = V4L2_FIELD_NONE;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: set format %ux%u (%x) on RPF%u sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, rpf->entity.index);
+
+ sel.pad = RWPF_PAD_SINK;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = *crop;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_selection, NULL,
+ &sel);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: set selection (%u,%u)/%ux%u on RPF%u sink\n",
+ __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height,
+ rpf->entity.index);
+
+ /*
+ * RPF source, hardcode the format to ARGB8888 to turn on format
+ * conversion if needed.
+ */
+ format.pad = RWPF_PAD_SOURCE;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, get_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev,
+ "%s: got format %ux%u (%x) on RPF%u source\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, rpf->entity.index);
+
+ format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
+
+ ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ /* Insert and configure the UIF if available. */
+ ret = vsp1_du_insert_uif(vsp1, pipe, uif, &rpf->entity, RWPF_PAD_SOURCE,
+ pipe->brx, brx_input);
+ if (ret < 0)
+ return ret;
+
+ /* BRx sink, propagate the format from the RPF source. */
+ format.pad = brx_input;
+
+ ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, BRX_NAME(pipe->brx), format.pad);
+
+ sel.pad = brx_input;
+ sel.target = V4L2_SEL_TGT_COMPOSE;
+ sel.r = vsp1->drm->inputs[rpf->entity.index].compose;
+
+ ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_selection, NULL,
+ &sel);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set selection (%u,%u)/%ux%u on %s pad %u\n",
+ __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height,
+ BRX_NAME(pipe->brx), sel.pad);
+
+ return 0;
+}
+
+/* Setup the BRx source pad. */
+static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe);
+static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe);
+
+static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct vsp1_entity *brx;
+ int ret;
+
+ /*
+ * Pick a BRx:
+ * - If we need more than two inputs, use the BRU.
+ * - Otherwise, if we are not forced to release our BRx, keep it.
+ * - Else, use any free BRx (randomly starting with the BRU).
+ */
+ if (pipe->num_inputs > 2)
+ brx = &vsp1->bru->entity;
+ else if (pipe->brx && !drm_pipe->force_brx_release)
+ brx = pipe->brx;
+ else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe)
+ brx = &vsp1->bru->entity;
+ else
+ brx = &vsp1->brs->entity;
+
+ /* Switch BRx if needed. */
+ if (brx != pipe->brx) {
+ struct vsp1_entity *released_brx = NULL;
+
+ /* Release our BRx if we have one. */
+ if (pipe->brx) {
+ dev_dbg(vsp1->dev, "%s: pipe %u: releasing %s\n",
+ __func__, pipe->lif->index,
+ BRX_NAME(pipe->brx));
+
+ /*
+ * The BRx might be acquired by the other pipeline in
+ * the next step. We must thus remove it from the list
+ * of entities for this pipeline. The other pipeline's
+ * hardware configuration will reconfigure the BRx
+ * routing.
+ *
+ * However, if the other pipeline doesn't acquire our
+ * BRx, we need to keep it in the list, otherwise the
+ * hardware configuration step won't disconnect it from
+ * the pipeline. To solve this, store the released BRx
+ * pointer to add it back to the list of entities later
+ * if it isn't acquired by the other pipeline.
+ */
+ released_brx = pipe->brx;
+
+ list_del(&pipe->brx->list_pipe);
+ pipe->brx->sink = NULL;
+ pipe->brx->pipe = NULL;
+ pipe->brx = NULL;
+ }
+
+ /*
+ * If the BRx we need is in use, force the owner pipeline to
+ * switch to the other BRx and wait until the switch completes.
+ */
+ if (brx->pipe) {
+ struct vsp1_drm_pipeline *owner_pipe;
+
+ dev_dbg(vsp1->dev, "%s: pipe %u: waiting for %s\n",
+ __func__, pipe->lif->index, BRX_NAME(brx));
+
+ owner_pipe = to_vsp1_drm_pipeline(brx->pipe);
+ owner_pipe->force_brx_release = true;
+
+ vsp1_du_pipeline_setup_inputs(vsp1, &owner_pipe->pipe);
+ vsp1_du_pipeline_configure(&owner_pipe->pipe);
+
+ ret = wait_event_timeout(owner_pipe->wait_queue,
+ !owner_pipe->force_brx_release,
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_warn(vsp1->dev,
+ "DRM pipeline %u reconfiguration timeout\n",
+ owner_pipe->pipe.lif->index);
+ }
+
+ /*
+ * If the BRx we have released previously hasn't been acquired
+ * by the other pipeline, add it back to the entities list (with
+ * the pipe pointer NULL) to let vsp1_du_pipeline_configure()
+ * disconnect it from the hardware pipeline.
+ */
+ if (released_brx && !released_brx->pipe)
+ list_add_tail(&released_brx->list_pipe,
+ &pipe->entities);
+
+ /* Add the BRx to the pipeline. */
+ dev_dbg(vsp1->dev, "%s: pipe %u: acquired %s\n",
+ __func__, pipe->lif->index, BRX_NAME(brx));
+
+ pipe->brx = brx;
+ pipe->brx->pipe = pipe;
+ pipe->brx->sink = &pipe->output->entity;
+ pipe->brx->sink_pad = 0;
+
+ list_add_tail(&pipe->brx->list_pipe, &pipe->entities);
+ }
+
+ /*
+ * Configure the format on the BRx source and verify that it matches the
+ * requested format. We don't set the media bus code as it is configured
+ * on the BRx sink pad 0 and propagated inside the entity, not on the
+ * source pad.
+ */
+ format.pad = brx->source_pad;
+ format.format.width = drm_pipe->width;
+ format.format.height = drm_pipe->height;
+ format.format.field = V4L2_FIELD_NONE;
+
+ ret = v4l2_subdev_call(&brx->subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, BRX_NAME(brx), brx->source_pad);
+
+ if (format.format.width != drm_pipe->width ||
+ format.format.height != drm_pipe->height) {
+ dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__);
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static unsigned int rpf_zpos(struct vsp1_device *vsp1, struct vsp1_rwpf *rpf)
+{
+ return vsp1->drm->inputs[rpf->entity.index].zpos;
+}
+
+/* Setup the input side of the pipeline (RPFs and BRx). */
+static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+ struct vsp1_rwpf *inputs[VSP1_MAX_RPF] = { NULL, };
+ struct vsp1_entity *uif;
+ bool use_uif = false;
+ struct vsp1_brx *brx;
+ unsigned int i;
+ int ret;
+
+ /* Count the number of enabled inputs and sort them by Z-order. */
+ pipe->num_inputs = 0;
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf = vsp1->rpf[i];
+ unsigned int j;
+
+ if (!pipe->inputs[i])
+ continue;
+
+ /* Insert the RPF in the sorted RPFs array. */
+ for (j = pipe->num_inputs++; j > 0; --j) {
+ if (rpf_zpos(vsp1, inputs[j-1]) <= rpf_zpos(vsp1, rpf))
+ break;
+ inputs[j] = inputs[j-1];
+ }
+
+ inputs[j] = rpf;
+ }
+
+ /*
+ * Setup the BRx. This must be done before setting up the RPF input
+ * pipelines as the BRx sink compose rectangles depend on the BRx source
+ * format.
+ */
+ ret = vsp1_du_pipeline_setup_brx(vsp1, pipe);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "%s: failed to setup %s source\n", __func__,
+ BRX_NAME(pipe->brx));
+ return ret;
+ }
+
+ brx = to_brx(&pipe->brx->subdev);
+
+ /* Setup the RPF input pipeline for every enabled input. */
+ for (i = 0; i < pipe->brx->source_pad; ++i) {
+ struct vsp1_rwpf *rpf = inputs[i];
+
+ if (!rpf) {
+ brx->inputs[i].rpf = NULL;
+ continue;
+ }
+
+ if (!rpf->entity.pipe) {
+ rpf->entity.pipe = pipe;
+ list_add_tail(&rpf->entity.list_pipe, &pipe->entities);
+ }
+
+ brx->inputs[i].rpf = rpf;
+ rpf->brx_input = i;
+ rpf->entity.sink = pipe->brx;
+ rpf->entity.sink_pad = i;
+
+ dev_dbg(vsp1->dev, "%s: connecting RPF.%u to %s:%u\n",
+ __func__, rpf->entity.index, BRX_NAME(pipe->brx), i);
+
+ uif = drm_pipe->crc.source == VSP1_DU_CRC_PLANE &&
+ drm_pipe->crc.index == i ? drm_pipe->uif : NULL;
+ if (uif)
+ use_uif = true;
+ ret = vsp1_du_pipeline_setup_rpf(vsp1, pipe, rpf, uif, i);
+ if (ret < 0) {
+ dev_err(vsp1->dev,
+ "%s: failed to setup RPF.%u\n",
+ __func__, rpf->entity.index);
+ return ret;
+ }
+ }
+
+ /* Insert and configure the UIF at the BRx output if available. */
+ uif = drm_pipe->crc.source == VSP1_DU_CRC_OUTPUT ? drm_pipe->uif : NULL;
+ if (uif)
+ use_uif = true;
+ ret = vsp1_du_insert_uif(vsp1, pipe, uif,
+ pipe->brx, pipe->brx->source_pad,
+ &pipe->output->entity, 0);
+ if (ret < 0)
+ dev_err(vsp1->dev, "%s: failed to setup UIF after %s\n",
+ __func__, BRX_NAME(pipe->brx));
+
+ /* If the DRM pipe does not have a UIF there is nothing we can update. */
+ if (!drm_pipe->uif)
+ return 0;
+
+ /*
+ * If the UIF is not in use schedule it for removal by setting its pipe
+ * pointer to NULL, vsp1_du_pipeline_configure() will remove it from the
+ * hardware pipeline and from the pipeline's list of entities. Otherwise
+ * make sure it is present in the pipeline's list of entities if it
+ * wasn't already.
+ */
+ if (!use_uif) {
+ drm_pipe->uif->pipe = NULL;
+ } else if (!drm_pipe->uif->pipe) {
+ drm_pipe->uif->pipe = pipe;
+ list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities);
+ }
+
+ return 0;
+}
+
+/* Setup the output side of the pipeline (WPF and LIF). */
+static int vsp1_du_pipeline_setup_output(struct vsp1_device *vsp1,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ format.pad = RWPF_PAD_SINK;
+ format.format.width = drm_pipe->width;
+ format.format.height = drm_pipe->height;
+ format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
+ format.format.field = V4L2_FIELD_NONE;
+
+ ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF%u sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, pipe->output->entity.index);
+
+ format.pad = RWPF_PAD_SOURCE;
+ ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, get_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF%u source\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, pipe->output->entity.index);
+
+ format.pad = LIF_PAD_SINK;
+ ret = v4l2_subdev_call(&pipe->lif->subdev, pad, set_fmt, NULL,
+ &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF%u sink\n",
+ __func__, format.format.width, format.format.height,
+ format.format.code, pipe->lif->index);
+
+ /*
+ * Verify that the format at the output of the pipeline matches the
+ * requested frame size and media bus code.
+ */
+ if (format.format.width != drm_pipe->width ||
+ format.format.height != drm_pipe->height ||
+ format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) {
+ dev_dbg(vsp1->dev, "%s: format mismatch on LIF%u\n", __func__,
+ pipe->lif->index);
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+/* Configure all entities in the pipeline. */
+static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe);
+ struct vsp1_entity *entity;
+ struct vsp1_entity *next;
+ struct vsp1_dl_list *dl;
+ struct vsp1_dl_body *dlb;
+ unsigned int dl_flags = 0;
+
+ if (drm_pipe->force_brx_release)
+ dl_flags |= VSP1_DL_FRAME_END_INTERNAL;
+ if (pipe->output->writeback)
+ dl_flags |= VSP1_DL_FRAME_END_WRITEBACK;
+
+ dl = vsp1_dl_list_get(pipe->output->dlm);
+ dlb = vsp1_dl_list_get_body0(dl);
+
+ list_for_each_entry_safe(entity, next, &pipe->entities, list_pipe) {
+ /* Disconnect unused entities from the pipeline. */
+ if (!entity->pipe) {
+ vsp1_dl_body_write(dlb, entity->route->reg,
+ VI6_DPR_NODE_UNUSED);
+
+ entity->sink = NULL;
+ list_del(&entity->list_pipe);
+
+ continue;
+ }
+
+ vsp1_entity_route_setup(entity, pipe, dlb);
+ vsp1_entity_configure_stream(entity, pipe, dl, dlb);
+ vsp1_entity_configure_frame(entity, pipe, dl, dlb);
+ vsp1_entity_configure_partition(entity, pipe, dl, dlb);
+ }
+
+ vsp1_dl_list_commit(dl, dl_flags);
+}
+
+static int vsp1_du_pipeline_set_rwpf_format(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rwpf,
+ u32 pixelformat, unsigned int pitch)
+{
+ const struct vsp1_format_info *fmtinfo;
+ unsigned int chroma_hsub;
+
+ fmtinfo = vsp1_get_format_info(vsp1, pixelformat);
+ if (!fmtinfo) {
+ dev_dbg(vsp1->dev, "Unsupported pixel format %08x\n",
+ pixelformat);
+ return -EINVAL;
+ }
+
+ /*
+ * Only formats with three planes can affect the chroma planes pitch.
+ * All formats with two planes have a horizontal subsampling value of 2,
+ * but combine U and V in a single chroma plane, which thus results in
+ * the luma plane and chroma plane having the same pitch.
+ */
+ chroma_hsub = (fmtinfo->planes == 3) ? fmtinfo->hsub : 1;
+
+ rwpf->fmtinfo = fmtinfo;
+ rwpf->format.num_planes = fmtinfo->planes;
+ rwpf->format.plane_fmt[0].bytesperline = pitch;
+ rwpf->format.plane_fmt[1].bytesperline = pitch / chroma_hsub;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * DU Driver API
+ */
+
+int vsp1_du_init(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ if (!vsp1)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_init);
+
+/**
+ * vsp1_du_setup_lif - Setup the output part of the VSP pipeline
+ * @dev: the VSP device
+ * @pipe_index: the DRM pipeline index
+ * @cfg: the LIF configuration
+ *
+ * Configure the output part of VSP DRM pipeline for the given frame @cfg.width
+ * and @cfg.height. This sets up formats on the BRx source pad, the WPF sink and
+ * source pads, and the LIF sink pad.
+ *
+ * The @pipe_index argument selects which DRM pipeline to setup. The number of
+ * available pipelines depend on the VSP instance.
+ *
+ * As the media bus code on the blend unit source pad is conditioned by the
+ * configuration of its sink 0 pad, we also set up the formats on all blend unit
+ * sinks, even if the configuration will be overwritten later by
+ * vsp1_du_setup_rpf(). This ensures that the blend unit configuration is set to
+ * a well defined state.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
+ const struct vsp1_du_lif_config *cfg)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_drm_pipeline *drm_pipe;
+ struct vsp1_pipeline *pipe;
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
+ if (pipe_index >= vsp1->info->lif_count)
+ return -EINVAL;
+
+ drm_pipe = &vsp1->drm->pipe[pipe_index];
+ pipe = &drm_pipe->pipe;
+
+ if (!cfg) {
+ struct vsp1_brx *brx;
+
+ mutex_lock(&vsp1->drm->lock);
+
+ brx = to_brx(&pipe->brx->subdev);
+
+ /*
+ * NULL configuration means the CRTC is being disabled, stop
+ * the pipeline and turn the light off.
+ */
+ ret = vsp1_pipeline_stop(pipe);
+ if (ret == -ETIMEDOUT)
+ dev_err(vsp1->dev, "DRM pipeline stop timeout\n");
+
+ for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i) {
+ struct vsp1_rwpf *rpf = pipe->inputs[i];
+
+ if (!rpf)
+ continue;
+
+ /*
+ * Remove the RPF from the pipe and the list of BRx
+ * inputs.
+ */
+ WARN_ON(!rpf->entity.pipe);
+ rpf->entity.pipe = NULL;
+ list_del(&rpf->entity.list_pipe);
+ pipe->inputs[i] = NULL;
+
+ brx->inputs[rpf->brx_input].rpf = NULL;
+ }
+
+ drm_pipe->du_complete = NULL;
+ pipe->num_inputs = 0;
+
+ dev_dbg(vsp1->dev, "%s: pipe %u: releasing %s\n",
+ __func__, pipe->lif->index,
+ BRX_NAME(pipe->brx));
+
+ list_del(&pipe->brx->list_pipe);
+ pipe->brx->pipe = NULL;
+ pipe->brx = NULL;
+
+ mutex_unlock(&vsp1->drm->lock);
+
+ vsp1_dlm_reset(pipe->output->dlm);
+ vsp1_device_put(vsp1);
+
+ dev_dbg(vsp1->dev, "%s: pipeline disabled\n", __func__);
+
+ return 0;
+ }
+
+ /* Reset the underrun counter */
+ pipe->underrun_count = 0;
+
+ drm_pipe->width = cfg->width;
+ drm_pipe->height = cfg->height;
+ pipe->interlaced = cfg->interlaced;
+
+ dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u%s\n",
+ __func__, pipe_index, cfg->width, cfg->height,
+ pipe->interlaced ? "i" : "");
+
+ mutex_lock(&vsp1->drm->lock);
+
+ /* Setup formats through the pipeline. */
+ ret = vsp1_du_pipeline_setup_inputs(vsp1, pipe);
+ if (ret < 0)
+ goto unlock;
+
+ ret = vsp1_du_pipeline_setup_output(vsp1, pipe);
+ if (ret < 0)
+ goto unlock;
+
+ /* Enable the VSP1. */
+ ret = vsp1_device_get(vsp1);
+ if (ret < 0)
+ goto unlock;
+
+ /*
+ * Register a callback to allow us to notify the DRM driver of frame
+ * completion events.
+ */
+ drm_pipe->du_complete = cfg->callback;
+ drm_pipe->du_private = cfg->callback_data;
+
+ /* Disable the display interrupts. */
+ vsp1_write(vsp1, VI6_DISP_IRQ_STA(pipe_index), 0);
+ vsp1_write(vsp1, VI6_DISP_IRQ_ENB(pipe_index), 0);
+
+ /* Configure all entities in the pipeline. */
+ vsp1_du_pipeline_configure(pipe);
+
+unlock:
+ mutex_unlock(&vsp1->drm->lock);
+
+ if (ret < 0)
+ return ret;
+
+ /* Start the pipeline. */
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ vsp1_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ dev_dbg(vsp1->dev, "%s: pipeline enabled\n", __func__);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_setup_lif);
+
+/**
+ * vsp1_du_atomic_begin - Prepare for an atomic update
+ * @dev: the VSP device
+ * @pipe_index: the DRM pipeline index
+ */
+void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index)
+{
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
+
+/**
+ * vsp1_du_atomic_update - Setup one RPF input of the VSP pipeline
+ * @dev: the VSP device
+ * @pipe_index: the DRM pipeline index
+ * @rpf_index: index of the RPF to setup (0-based)
+ * @cfg: the RPF configuration
+ *
+ * Configure the VSP to perform image composition through RPF @rpf_index as
+ * described by the @cfg configuration. The image to compose is referenced by
+ * @cfg.mem and composed using the @cfg.src crop rectangle and the @cfg.dst
+ * composition rectangle. The Z-order is configurable with higher @zpos values
+ * displayed on top.
+ *
+ * If the @cfg configuration is NULL, the RPF will be disabled. Calling the
+ * function on a disabled RPF is allowed.
+ *
+ * Image format as stored in memory is expressed as a V4L2 @cfg.pixelformat
+ * value. The memory pitch is configurable to allow for padding at end of lines,
+ * or simply for images that extend beyond the crop rectangle boundaries. The
+ * @cfg.pitch value is expressed in bytes and applies to all planes for
+ * multiplanar formats.
+ *
+ * The source memory buffer is referenced by the DMA address of its planes in
+ * the @cfg.mem array. Up to two planes are supported. The second plane DMA
+ * address is ignored for formats using a single plane.
+ *
+ * This function isn't reentrant, the caller needs to serialize calls.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
+ unsigned int rpf_index,
+ const struct vsp1_du_atomic_config *cfg)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
+ struct vsp1_rwpf *rpf;
+ int ret;
+
+ if (rpf_index >= vsp1->info->rpf_count)
+ return -EINVAL;
+
+ rpf = vsp1->rpf[rpf_index];
+
+ if (!cfg) {
+ dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__,
+ rpf_index);
+
+ /*
+ * Remove the RPF from the pipeline's inputs. Keep it in the
+ * pipeline's entity list to let vsp1_du_pipeline_configure()
+ * remove it from the hardware pipeline.
+ */
+ rpf->entity.pipe = NULL;
+ drm_pipe->pipe.inputs[rpf_index] = NULL;
+ return 0;
+ }
+
+ dev_dbg(vsp1->dev,
+ "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad, %pad } zpos %u\n",
+ __func__, rpf_index,
+ cfg->src.left, cfg->src.top, cfg->src.width, cfg->src.height,
+ cfg->dst.left, cfg->dst.top, cfg->dst.width, cfg->dst.height,
+ cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1],
+ &cfg->mem[2], cfg->zpos);
+
+ /*
+ * Store the format, stride, memory buffer address, crop and compose
+ * rectangles and Z-order position and for the input.
+ */
+ ret = vsp1_du_pipeline_set_rwpf_format(vsp1, rpf, cfg->pixelformat,
+ cfg->pitch);
+ if (ret < 0)
+ return ret;
+
+ rpf->alpha = cfg->alpha;
+
+ rpf->mem.addr[0] = cfg->mem[0];
+ rpf->mem.addr[1] = cfg->mem[1];
+ rpf->mem.addr[2] = cfg->mem[2];
+
+ rpf->format.flags = cfg->premult ? V4L2_PIX_FMT_FLAG_PREMUL_ALPHA : 0;
+
+ vsp1->drm->inputs[rpf_index].crop = cfg->src;
+ vsp1->drm->inputs[rpf_index].compose = cfg->dst;
+ vsp1->drm->inputs[rpf_index].zpos = cfg->zpos;
+
+ drm_pipe->pipe.inputs[rpf_index] = rpf;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_update);
+
+/**
+ * vsp1_du_atomic_flush - Commit an atomic update
+ * @dev: the VSP device
+ * @pipe_index: the DRM pipeline index
+ * @cfg: atomic pipe configuration
+ */
+void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
+ const struct vsp1_du_atomic_pipe_config *cfg)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index];
+ struct vsp1_pipeline *pipe = &drm_pipe->pipe;
+ int ret;
+
+ drm_pipe->crc = cfg->crc;
+
+ mutex_lock(&vsp1->drm->lock);
+
+ if (cfg->writeback.pixelformat) {
+ const struct vsp1_du_writeback_config *wb_cfg = &cfg->writeback;
+
+ ret = vsp1_du_pipeline_set_rwpf_format(vsp1, pipe->output,
+ wb_cfg->pixelformat,
+ wb_cfg->pitch);
+ if (WARN_ON(ret < 0))
+ goto done;
+
+ pipe->output->mem.addr[0] = wb_cfg->mem[0];
+ pipe->output->mem.addr[1] = wb_cfg->mem[1];
+ pipe->output->mem.addr[2] = wb_cfg->mem[2];
+ pipe->output->writeback = true;
+ }
+
+ vsp1_du_pipeline_setup_inputs(vsp1, pipe);
+ vsp1_du_pipeline_configure(pipe);
+
+done:
+ mutex_unlock(&vsp1->drm->lock);
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_flush);
+
+int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ /*
+ * As all the buffers allocated by the DU driver are coherent, we can
+ * skip cache sync. This will need to be revisited when support for
+ * non-coherent buffers will be added to the DU driver.
+ */
+ return dma_map_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+EXPORT_SYMBOL_GPL(vsp1_du_map_sg);
+
+void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ dma_unmap_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg);
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+int vsp1_drm_init(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+
+ vsp1->drm = devm_kzalloc(vsp1->dev, sizeof(*vsp1->drm), GFP_KERNEL);
+ if (!vsp1->drm)
+ return -ENOMEM;
+
+ mutex_init(&vsp1->drm->lock);
+
+ /* Create one DRM pipeline per LIF. */
+ for (i = 0; i < vsp1->info->lif_count; ++i) {
+ struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[i];
+ struct vsp1_pipeline *pipe = &drm_pipe->pipe;
+
+ init_waitqueue_head(&drm_pipe->wait_queue);
+
+ vsp1_pipeline_init(pipe);
+
+ pipe->frame_end = vsp1_du_pipeline_frame_end;
+
+ /*
+ * The output side of the DRM pipeline is static, add the
+ * corresponding entities manually.
+ */
+ pipe->output = vsp1->wpf[i];
+ pipe->lif = &vsp1->lif[i]->entity;
+
+ pipe->output->entity.pipe = pipe;
+ pipe->output->entity.sink = pipe->lif;
+ pipe->output->entity.sink_pad = 0;
+ list_add_tail(&pipe->output->entity.list_pipe, &pipe->entities);
+
+ pipe->lif->pipe = pipe;
+ list_add_tail(&pipe->lif->list_pipe, &pipe->entities);
+
+ /*
+ * CRC computation is initially disabled, don't add the UIF to
+ * the pipeline.
+ */
+ if (i < vsp1->info->uif_count)
+ drm_pipe->uif = &vsp1->uif[i]->entity;
+ }
+
+ /* Disable all RPFs initially. */
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *input = vsp1->rpf[i];
+
+ INIT_LIST_HEAD(&input->entity.list_pipe);
+ }
+
+ return 0;
+}
+
+void vsp1_drm_cleanup(struct vsp1_device *vsp1)
+{
+ mutex_destroy(&vsp1->drm->lock);
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.h b/drivers/media/platform/renesas/vsp1/vsp1_drm.h
new file mode 100644
index 0000000000..ab8b7e3161
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_drm.h -- R-Car VSP1 DRM/KMS Interface
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_DRM_H__
+#define __VSP1_DRM_H__
+
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+
+#include <media/vsp1.h>
+
+#include "vsp1_pipe.h"
+
+/**
+ * struct vsp1_drm_pipeline - State for the API exposed to the DRM driver
+ * @pipe: the VSP1 pipeline used for display
+ * @width: output display width
+ * @height: output display height
+ * @force_brx_release: when set, release the BRx during the next reconfiguration
+ * @wait_queue: wait queue to wait for BRx release completion
+ * @uif: UIF entity if available for the pipeline
+ * @crc: CRC computation configuration
+ * @du_complete: frame completion callback for the DU driver (optional)
+ * @du_private: data to be passed to the du_complete callback
+ */
+struct vsp1_drm_pipeline {
+ struct vsp1_pipeline pipe;
+
+ unsigned int width;
+ unsigned int height;
+
+ bool force_brx_release;
+ wait_queue_head_t wait_queue;
+
+ struct vsp1_entity *uif;
+ struct vsp1_du_crc_config crc;
+
+ /* Frame synchronisation */
+ void (*du_complete)(void *data, unsigned int status, u32 crc);
+ void *du_private;
+};
+
+/**
+ * struct vsp1_drm - State for the API exposed to the DRM driver
+ * @pipe: the VSP1 DRM pipeline used for display
+ * @lock: protects the BRU and BRS allocation
+ * @inputs: source crop rectangle, destination compose rectangle and z-order
+ * position for every input (indexed by RPF index)
+ */
+struct vsp1_drm {
+ struct vsp1_drm_pipeline pipe[VSP1_MAX_LIF];
+ struct mutex lock;
+
+ struct {
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ unsigned int zpos;
+ } inputs[VSP1_MAX_RPF];
+};
+
+static inline struct vsp1_drm_pipeline *
+to_vsp1_drm_pipeline(struct vsp1_pipeline *pipe)
+{
+ return container_of(pipe, struct vsp1_drm_pipeline, pipe);
+}
+
+int vsp1_drm_init(struct vsp1_device *vsp1);
+void vsp1_drm_cleanup(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_DRM_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
new file mode 100644
index 0000000000..1aac44d687
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -0,0 +1,1021 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_drv.c -- R-Car VSP1 Driver
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/videodev2.h>
+
+#include <media/rcar-fcp.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_clu.h"
+#include "vsp1_dl.h"
+#include "vsp1_drm.h"
+#include "vsp1_hgo.h"
+#include "vsp1_hgt.h"
+#include "vsp1_hsit.h"
+#include "vsp1_lif.h"
+#include "vsp1_lut.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_sru.h"
+#include "vsp1_uds.h"
+#include "vsp1_uif.h"
+#include "vsp1_video.h"
+
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
+
+static irqreturn_t vsp1_irq_handler(int irq, void *data)
+{
+ u32 mask = VI6_WPF_IRQ_STA_DFE | VI6_WPF_IRQ_STA_FRE |
+ VI6_WPF_IRQ_STA_UND;
+ struct vsp1_device *vsp1 = data;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int i;
+ u32 status;
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+
+ if (wpf == NULL)
+ continue;
+
+ status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
+ vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
+
+ if ((status & VI6_WPF_IRQ_STA_UND) && wpf->entity.pipe) {
+ wpf->entity.pipe->underrun_count++;
+
+ dev_warn_ratelimited(vsp1->dev,
+ "Underrun occurred at WPF%u (total underruns %u)\n",
+ i, wpf->entity.pipe->underrun_count);
+ }
+
+ if (status & VI6_WPF_IRQ_STA_DFE) {
+ vsp1_pipeline_frame_end(wpf->entity.pipe);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Entities
+ */
+
+/*
+ * vsp1_create_sink_links - Create links from all sources to the given sink
+ *
+ * This function creates media links from all valid sources to the given sink
+ * pad. Links that would be invalid according to the VSP1 hardware capabilities
+ * are skipped. Those include all links
+ *
+ * - from a UDS to a UDS (UDS entities can't be chained)
+ * - from an entity to itself (no loops are allowed)
+ *
+ * Furthermore, the BRS can't be connected to histogram generators, but no
+ * special check is currently needed as all VSP instances that include a BRS
+ * have no histogram generator.
+ */
+static int vsp1_create_sink_links(struct vsp1_device *vsp1,
+ struct vsp1_entity *sink)
+{
+ struct media_entity *entity = &sink->subdev.entity;
+ struct vsp1_entity *source;
+ unsigned int pad;
+ int ret;
+
+ list_for_each_entry(source, &vsp1->entities, list_dev) {
+ u32 flags;
+
+ if (source->type == sink->type)
+ continue;
+
+ if (source->type == VSP1_ENTITY_HGO ||
+ source->type == VSP1_ENTITY_HGT ||
+ source->type == VSP1_ENTITY_LIF ||
+ source->type == VSP1_ENTITY_WPF)
+ continue;
+
+ flags = source->type == VSP1_ENTITY_RPF &&
+ sink->type == VSP1_ENTITY_WPF &&
+ source->index == sink->index
+ ? MEDIA_LNK_FL_ENABLED : 0;
+
+ for (pad = 0; pad < entity->num_pads; ++pad) {
+ if (!(entity->pads[pad].flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ ret = media_create_pad_link(&source->subdev.entity,
+ source->source_pad,
+ entity, pad, flags);
+ if (ret < 0)
+ return ret;
+
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ source->sink = sink;
+ }
+ }
+
+ return 0;
+}
+
+static int vsp1_uapi_create_links(struct vsp1_device *vsp1)
+{
+ struct vsp1_entity *entity;
+ unsigned int i;
+ int ret;
+
+ list_for_each_entry(entity, &vsp1->entities, list_dev) {
+ if (entity->type == VSP1_ENTITY_LIF ||
+ entity->type == VSP1_ENTITY_RPF)
+ continue;
+
+ ret = vsp1_create_sink_links(vsp1, entity);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (vsp1->hgo) {
+ ret = media_create_pad_link(&vsp1->hgo->histo.entity.subdev.entity,
+ HISTO_PAD_SOURCE,
+ &vsp1->hgo->histo.video.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (vsp1->hgt) {
+ ret = media_create_pad_link(&vsp1->hgt->histo.entity.subdev.entity,
+ HISTO_PAD_SOURCE,
+ &vsp1->hgt->histo.video.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < vsp1->info->lif_count; ++i) {
+ if (!vsp1->lif[i])
+ continue;
+
+ ret = media_create_pad_link(&vsp1->wpf[i]->entity.subdev.entity,
+ RWPF_PAD_SOURCE,
+ &vsp1->lif[i]->entity.subdev.entity,
+ LIF_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf = vsp1->rpf[i];
+
+ ret = media_create_pad_link(&rpf->video->video.entity, 0,
+ &rpf->entity.subdev.entity,
+ RWPF_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ /*
+ * Connect the video device to the WPF. All connections are
+ * immutable.
+ */
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+
+ ret = media_create_pad_link(&wpf->entity.subdev.entity,
+ RWPF_PAD_SOURCE,
+ &wpf->video->video.entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vsp1_destroy_entities(struct vsp1_device *vsp1)
+{
+ struct vsp1_entity *entity, *_entity;
+ struct vsp1_video *video, *_video;
+
+ list_for_each_entry_safe(entity, _entity, &vsp1->entities, list_dev) {
+ list_del(&entity->list_dev);
+ vsp1_entity_destroy(entity);
+ }
+
+ list_for_each_entry_safe(video, _video, &vsp1->videos, list) {
+ list_del(&video->list);
+ vsp1_video_cleanup(video);
+ }
+
+ v4l2_device_unregister(&vsp1->v4l2_dev);
+ if (vsp1->info->uapi)
+ media_device_unregister(&vsp1->media_dev);
+ media_device_cleanup(&vsp1->media_dev);
+
+ if (!vsp1->info->uapi)
+ vsp1_drm_cleanup(vsp1);
+}
+
+static int vsp1_create_entities(struct vsp1_device *vsp1)
+{
+ struct media_device *mdev = &vsp1->media_dev;
+ struct v4l2_device *vdev = &vsp1->v4l2_dev;
+ struct vsp1_entity *entity;
+ unsigned int i;
+ int ret;
+
+ mdev->dev = vsp1->dev;
+ mdev->hw_revision = vsp1->version;
+ strscpy(mdev->model, vsp1->info->model, sizeof(mdev->model));
+ media_device_init(mdev);
+
+ vsp1->media_ops.link_setup = vsp1_entity_link_setup;
+ /*
+ * Don't perform link validation when the userspace API is disabled as
+ * the pipeline is configured internally by the driver in that case, and
+ * its configuration can thus be trusted.
+ */
+ if (vsp1->info->uapi)
+ vsp1->media_ops.link_validate = v4l2_subdev_link_validate;
+
+ vdev->mdev = mdev;
+ ret = v4l2_device_register(vsp1->dev, vdev);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "V4L2 device registration failed (%d)\n",
+ ret);
+ goto done;
+ }
+
+ /* Instantiate all the entities. */
+ if (vsp1_feature(vsp1, VSP1_HAS_BRS)) {
+ vsp1->brs = vsp1_brx_create(vsp1, VSP1_ENTITY_BRS);
+ if (IS_ERR(vsp1->brs)) {
+ ret = PTR_ERR(vsp1->brs);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_BRU)) {
+ vsp1->bru = vsp1_brx_create(vsp1, VSP1_ENTITY_BRU);
+ if (IS_ERR(vsp1->bru)) {
+ ret = PTR_ERR(vsp1->bru);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_CLU)) {
+ vsp1->clu = vsp1_clu_create(vsp1);
+ if (IS_ERR(vsp1->clu)) {
+ ret = PTR_ERR(vsp1->clu);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->clu->entity.list_dev, &vsp1->entities);
+ }
+
+ vsp1->hsi = vsp1_hsit_create(vsp1, true);
+ if (IS_ERR(vsp1->hsi)) {
+ ret = PTR_ERR(vsp1->hsi);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities);
+
+ vsp1->hst = vsp1_hsit_create(vsp1, false);
+ if (IS_ERR(vsp1->hst)) {
+ ret = PTR_ERR(vsp1->hst);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HGO) && vsp1->info->uapi) {
+ vsp1->hgo = vsp1_hgo_create(vsp1);
+ if (IS_ERR(vsp1->hgo)) {
+ ret = PTR_ERR(vsp1->hgo);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hgo->histo.entity.list_dev,
+ &vsp1->entities);
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_HGT) && vsp1->info->uapi) {
+ vsp1->hgt = vsp1_hgt_create(vsp1);
+ if (IS_ERR(vsp1->hgt)) {
+ ret = PTR_ERR(vsp1->hgt);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->hgt->histo.entity.list_dev,
+ &vsp1->entities);
+ }
+
+ /*
+ * The LIFs are only supported when used in conjunction with the DU, in
+ * which case the userspace API is disabled. If the userspace API is
+ * enabled skip the LIFs, even when present.
+ */
+ if (!vsp1->info->uapi) {
+ for (i = 0; i < vsp1->info->lif_count; ++i) {
+ struct vsp1_lif *lif;
+
+ lif = vsp1_lif_create(vsp1, i);
+ if (IS_ERR(lif)) {
+ ret = PTR_ERR(lif);
+ goto done;
+ }
+
+ vsp1->lif[i] = lif;
+ list_add_tail(&lif->entity.list_dev, &vsp1->entities);
+ }
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_LUT)) {
+ vsp1->lut = vsp1_lut_create(vsp1);
+ if (IS_ERR(vsp1->lut)) {
+ ret = PTR_ERR(vsp1->lut);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf;
+
+ rpf = vsp1_rpf_create(vsp1, i);
+ if (IS_ERR(rpf)) {
+ ret = PTR_ERR(rpf);
+ goto done;
+ }
+
+ vsp1->rpf[i] = rpf;
+ list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
+
+ if (vsp1->info->uapi) {
+ struct vsp1_video *video = vsp1_video_create(vsp1, rpf);
+
+ if (IS_ERR(video)) {
+ ret = PTR_ERR(video);
+ goto done;
+ }
+
+ list_add_tail(&video->list, &vsp1->videos);
+ }
+ }
+
+ if (vsp1_feature(vsp1, VSP1_HAS_SRU)) {
+ vsp1->sru = vsp1_sru_create(vsp1);
+ if (IS_ERR(vsp1->sru)) {
+ ret = PTR_ERR(vsp1->sru);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->info->uds_count; ++i) {
+ struct vsp1_uds *uds;
+
+ uds = vsp1_uds_create(vsp1, i);
+ if (IS_ERR(uds)) {
+ ret = PTR_ERR(uds);
+ goto done;
+ }
+
+ vsp1->uds[i] = uds;
+ list_add_tail(&uds->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->info->uif_count; ++i) {
+ struct vsp1_uif *uif;
+
+ uif = vsp1_uif_create(vsp1, i);
+ if (IS_ERR(uif)) {
+ ret = PTR_ERR(uif);
+ goto done;
+ }
+
+ vsp1->uif[i] = uif;
+ list_add_tail(&uif->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf;
+
+ wpf = vsp1_wpf_create(vsp1, i);
+ if (IS_ERR(wpf)) {
+ ret = PTR_ERR(wpf);
+ goto done;
+ }
+
+ vsp1->wpf[i] = wpf;
+ list_add_tail(&wpf->entity.list_dev, &vsp1->entities);
+
+ if (vsp1->info->uapi) {
+ struct vsp1_video *video = vsp1_video_create(vsp1, wpf);
+
+ if (IS_ERR(video)) {
+ ret = PTR_ERR(video);
+ goto done;
+ }
+
+ list_add_tail(&video->list, &vsp1->videos);
+ }
+ }
+
+ /* Register all subdevs. */
+ list_for_each_entry(entity, &vsp1->entities, list_dev) {
+ ret = v4l2_device_register_subdev(&vsp1->v4l2_dev,
+ &entity->subdev);
+ if (ret < 0)
+ goto done;
+ }
+
+ /*
+ * Create links and register subdev nodes if the userspace API is
+ * enabled or initialize the DRM pipeline otherwise.
+ */
+ if (vsp1->info->uapi) {
+ ret = vsp1_uapi_create_links(vsp1);
+ if (ret < 0)
+ goto done;
+
+ ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
+ if (ret < 0)
+ goto done;
+
+ ret = media_device_register(mdev);
+ } else {
+ ret = vsp1_drm_init(vsp1);
+ }
+
+done:
+ if (ret < 0)
+ vsp1_destroy_entities(vsp1);
+
+ return ret;
+}
+
+int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index)
+{
+ unsigned int timeout;
+ u32 status;
+
+ status = vsp1_read(vsp1, VI6_STATUS);
+ if (!(status & VI6_STATUS_SYS_ACT(index)))
+ return 0;
+
+ vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(index));
+ for (timeout = 10; timeout > 0; --timeout) {
+ status = vsp1_read(vsp1, VI6_STATUS);
+ if (!(status & VI6_STATUS_SYS_ACT(index)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (!timeout) {
+ dev_err(vsp1->dev, "failed to reset wpf.%u\n", index);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int vsp1_device_init(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+ int ret;
+
+ /* Reset any channel that might be running. */
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ ret = vsp1_reset_wpf(vsp1, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << VI6_CLK_DCSWT_CSTPW_SHIFT) |
+ (8 << VI6_CLK_DCSWT_CSTRW_SHIFT));
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i)
+ vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED);
+
+ for (i = 0; i < vsp1->info->uds_count; ++i)
+ vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED);
+
+ for (i = 0; i < vsp1->info->uif_count; ++i)
+ vsp1_write(vsp1, VI6_DPR_UIF_ROUTE(i), VI6_DPR_NODE_UNUSED);
+
+ vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_LUT_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_CLU_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_HST_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED);
+
+ if (vsp1_feature(vsp1, VSP1_HAS_BRS))
+ vsp1_write(vsp1, VI6_DPR_ILV_BRS_ROUTE, VI6_DPR_NODE_UNUSED);
+
+ vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+ vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
+ vsp1_dlm_setup(vsp1);
+
+ return 0;
+}
+
+static void vsp1_mask_all_interrupts(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+
+ for (i = 0; i < vsp1->info->lif_count; ++i)
+ vsp1_write(vsp1, VI6_DISP_IRQ_ENB(i), 0);
+ for (i = 0; i < vsp1->info->wpf_count; ++i)
+ vsp1_write(vsp1, VI6_WPF_IRQ_ENB(i), 0);
+}
+
+/*
+ * vsp1_device_get - Acquire the VSP1 device
+ *
+ * Make sure the device is not suspended and initialize it if needed.
+ *
+ * Return 0 on success or a negative error code otherwise.
+ */
+int vsp1_device_get(struct vsp1_device *vsp1)
+{
+ return pm_runtime_resume_and_get(vsp1->dev);
+}
+
+/*
+ * vsp1_device_put - Release the VSP1 device
+ *
+ * Decrement the VSP1 reference count and cleanup the device if the last
+ * reference is released.
+ */
+void vsp1_device_put(struct vsp1_device *vsp1)
+{
+ pm_runtime_put_sync(vsp1->dev);
+}
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+static int __maybe_unused vsp1_pm_suspend(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ /*
+ * When used as part of a display pipeline, the VSP is stopped and
+ * restarted explicitly by the DU.
+ */
+ if (!vsp1->drm)
+ vsp1_video_suspend(vsp1);
+
+ pm_runtime_force_suspend(vsp1->dev);
+
+ return 0;
+}
+
+static int __maybe_unused vsp1_pm_resume(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ pm_runtime_force_resume(vsp1->dev);
+
+ /*
+ * When used as part of a display pipeline, the VSP is stopped and
+ * restarted explicitly by the DU.
+ */
+ if (!vsp1->drm)
+ vsp1_video_resume(vsp1);
+
+ return 0;
+}
+
+static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ rcar_fcp_disable(vsp1->fcp);
+ reset_control_assert(vsp1->rstc);
+
+ return 0;
+}
+
+static int __maybe_unused vsp1_pm_runtime_resume(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_deassert(vsp1->rstc);
+ if (ret < 0)
+ return ret;
+
+ if (vsp1->info) {
+ /*
+ * On R-Car Gen2 and RZ/G1, vsp1 register access after deassert
+ * can cause lock-up. It is a special case and needs some delay
+ * to avoid this lock-up.
+ */
+ if (vsp1->info->gen == 2)
+ udelay(1);
+
+ ret = vsp1_device_init(vsp1);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = rcar_fcp_enable(vsp1->fcp);
+
+done:
+ if (ret < 0)
+ reset_control_assert(vsp1->rstc);
+
+ return ret;
+}
+
+static const struct dev_pm_ops vsp1_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume)
+ SET_RUNTIME_PM_OPS(vsp1_pm_runtime_suspend, vsp1_pm_runtime_resume, NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Driver
+ */
+
+static const struct vsp1_device_info vsp1_device_infos[] = {
+ {
+ .version = VI6_IP_VERSION_MODEL_VSPS_H2,
+ .model = "VSP1-S",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
+ | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .uds_count = 3,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPR_H2,
+ .model = "VSP1-R",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .uds_count = 3,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
+ .model = "VSP1-D",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT,
+ .lif_count = 1,
+ .rpf_count = 4,
+ .uds_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPS_M2,
+ .model = "VSP1-S",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
+ | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .uds_count = 1,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPS_V2H,
+ .model = "VSP1V-S",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
+ | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 4,
+ .uds_count = 1,
+ .wpf_count = 4,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V2H,
+ .model = "VSP1V-D",
+ .gen = 2,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT,
+ .lif_count = 1,
+ .rpf_count = 4,
+ .uds_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 4,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
+ .model = "VSP2-I",
+ .gen = 3,
+ .features = VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT
+ | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_HFLIP
+ | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 1,
+ .uds_count = 1,
+ .wpf_count = 1,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPBD_GEN3,
+ .model = "VSP2-BD",
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
+ .model = "VSP2-BC",
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO
+ | VSP1_HAS_LUT | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 5,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPBS_GEN3,
+ .model = "VSP2-BS",
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP,
+ .rpf_count = 2,
+ .wpf_count = 1,
+ .uapi = true,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
+ .model = "VSP2-D",
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 1,
+ .wpf_count = 2,
+ .num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V3,
+ .model = "VSP2-D",
+ .soc = VI6_IP_VERSION_SOC_V3H,
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_BRU,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V3,
+ .model = "VSP2-D",
+ .soc = VI6_IP_VERSION_SOC_V3M,
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_NON_ZERO_LBA,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPDL_GEN3,
+ .model = "VSP2-DL",
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
+ .lif_count = 2,
+ .rpf_count = 5,
+ .uif_count = 2,
+ .wpf_count = 2,
+ .num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_GEN4,
+ .model = "VSP2-D",
+ .gen = 4,
+ .features = VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 2,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ },
+};
+
+static const struct vsp1_device_info rzg2l_vsp2_device_info = {
+ .version = VI6_IP_VERSION_MODEL_VSPD_RZG2L,
+ .model = "VSP2-D",
+ .soc = VI6_IP_VERSION_SOC_RZG2L,
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL
+ | VSP1_HAS_NON_ZERO_LBA,
+ .lif_count = 1,
+ .rpf_count = 2,
+ .wpf_count = 1,
+};
+
+static const struct vsp1_device_info *vsp1_lookup_info(struct vsp1_device *vsp1)
+{
+ const struct vsp1_device_info *info;
+ unsigned int i;
+ u32 model;
+ u32 soc;
+
+ /*
+ * Try the info stored in match data first for devices that don't have
+ * a version register.
+ */
+ info = of_device_get_match_data(vsp1->dev);
+ if (info) {
+ vsp1->version = VI6_IP_VERSION_VSP_SW | info->version | info->soc;
+ return info;
+ }
+
+ vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
+ model = vsp1->version & VI6_IP_VERSION_MODEL_MASK;
+ soc = vsp1->version & VI6_IP_VERSION_SOC_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
+ info = &vsp1_device_infos[i];
+
+ if (model == info->version && (!info->soc || soc == info->soc))
+ return info;
+ }
+
+ dev_err(vsp1->dev, "unsupported IP version 0x%08x\n", vsp1->version);
+
+ return NULL;
+}
+
+static int vsp1_probe(struct platform_device *pdev)
+{
+ struct vsp1_device *vsp1;
+ struct device_node *fcp_node;
+ int ret;
+ int irq;
+
+ vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL);
+ if (vsp1 == NULL)
+ return -ENOMEM;
+
+ vsp1->dev = &pdev->dev;
+ INIT_LIST_HEAD(&vsp1->entities);
+ INIT_LIST_HEAD(&vsp1->videos);
+
+ platform_set_drvdata(pdev, vsp1);
+
+ /* I/O and IRQ resources (clock managed by the clock PM domain). */
+ vsp1->mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vsp1->mmio))
+ return PTR_ERR(vsp1->mmio);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ vsp1->rstc = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(vsp1->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vsp1->rstc),
+ "failed to get reset control\n");
+
+ /* FCP (optional). */
+ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
+ if (fcp_node) {
+ vsp1->fcp = rcar_fcp_get(fcp_node);
+ of_node_put(fcp_node);
+ if (IS_ERR(vsp1->fcp)) {
+ dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
+ PTR_ERR(vsp1->fcp));
+ return PTR_ERR(vsp1->fcp);
+ }
+
+ /*
+ * When the FCP is present, it handles all bus master accesses
+ * for the VSP and must thus be used in place of the VSP device
+ * to map DMA buffers.
+ */
+ vsp1->bus_master = rcar_fcp_get_device(vsp1->fcp);
+ } else {
+ vsp1->bus_master = vsp1->dev;
+ }
+
+ /* Configure device parameters based on the version register. */
+ pm_runtime_enable(&pdev->dev);
+
+ ret = vsp1_device_get(vsp1);
+ if (ret < 0)
+ goto done;
+
+ vsp1->info = vsp1_lookup_info(vsp1);
+ if (!vsp1->info) {
+ vsp1_device_put(vsp1);
+ ret = -ENXIO;
+ goto done;
+ }
+
+ dev_dbg(&pdev->dev, "IP version 0x%08x\n", vsp1->version);
+
+ /*
+ * Previous use of the hardware (e.g. by the bootloader) could leave
+ * some interrupts enabled and pending.
+ *
+ * TODO: Investigate if this shouldn't be better handled by using the
+ * device reset provided by the CPG.
+ */
+ vsp1_mask_all_interrupts(vsp1);
+
+ vsp1_device_put(vsp1);
+
+ ret = devm_request_irq(&pdev->dev, irq, vsp1_irq_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), vsp1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto done;
+ }
+
+ /* Instantiate entities. */
+ ret = vsp1_create_entities(vsp1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create entities\n");
+ goto done;
+ }
+
+done:
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ rcar_fcp_put(vsp1->fcp);
+ }
+
+ return ret;
+}
+
+static void vsp1_remove(struct platform_device *pdev)
+{
+ struct vsp1_device *vsp1 = platform_get_drvdata(pdev);
+
+ vsp1_destroy_entities(vsp1);
+ rcar_fcp_put(vsp1->fcp);
+
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct of_device_id vsp1_of_match[] = {
+ { .compatible = "renesas,vsp1" },
+ { .compatible = "renesas,vsp2" },
+ { .compatible = "renesas,r9a07g044-vsp2", .data = &rzg2l_vsp2_device_info },
+ { },
+};
+MODULE_DEVICE_TABLE(of, vsp1_of_match);
+
+static struct platform_driver vsp1_platform_driver = {
+ .probe = vsp1_probe,
+ .remove_new = vsp1_remove,
+ .driver = {
+ .name = "vsp1",
+ .pm = &vsp1_pm_ops,
+ .of_match_table = vsp1_of_match,
+ },
+};
+
+module_platform_driver(vsp1_platform_driver);
+
+MODULE_ALIAS("vsp1");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas VSP1 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.c b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
new file mode 100644
index 0000000000..c31f05a80b
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_entity.c -- R-Car VSP1 Base Entity
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_entity.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+
+void vsp1_entity_route_setup(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_entity *source;
+ u32 route;
+
+ if (entity->type == VSP1_ENTITY_HGO) {
+ u32 smppt;
+
+ /*
+ * The HGO is a special case, its routing is configured on the
+ * sink pad.
+ */
+ source = entity->sources[0];
+ smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT)
+ | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT);
+
+ vsp1_dl_body_write(dlb, VI6_DPR_HGO_SMPPT, smppt);
+ return;
+ } else if (entity->type == VSP1_ENTITY_HGT) {
+ u32 smppt;
+
+ /*
+ * The HGT is a special case, its routing is configured on the
+ * sink pad.
+ */
+ source = entity->sources[0];
+ smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT)
+ | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT);
+
+ vsp1_dl_body_write(dlb, VI6_DPR_HGT_SMPPT, smppt);
+ return;
+ }
+
+ source = entity;
+ if (source->route->reg == 0)
+ return;
+
+ route = source->sink->route->inputs[source->sink_pad];
+ /*
+ * The ILV and BRS share the same data path route. The extra BRSSEL bit
+ * selects between the ILV and BRS.
+ */
+ if (source->type == VSP1_ENTITY_BRS)
+ route |= VI6_DPR_ROUTE_BRSSEL;
+ vsp1_dl_body_write(dlb, source->route->reg, route);
+}
+
+void vsp1_entity_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ if (entity->ops->configure_stream)
+ entity->ops->configure_stream(entity, pipe, dl, dlb);
+}
+
+void vsp1_entity_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ if (entity->ops->configure_frame)
+ entity->ops->configure_frame(entity, pipe, dl, dlb);
+}
+
+void vsp1_entity_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ if (entity->ops->configure_partition)
+ entity->ops->configure_partition(entity, pipe, dl, dlb);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+/**
+ * vsp1_entity_get_pad_config - Get the pad configuration for an entity
+ * @entity: the entity
+ * @sd_state: the TRY state
+ * @which: configuration selector (ACTIVE or TRY)
+ *
+ * When called with which set to V4L2_SUBDEV_FORMAT_ACTIVE the caller must hold
+ * the entity lock to access the returned configuration.
+ *
+ * Return the pad configuration requested by the which argument. The TRY
+ * configuration is passed explicitly to the function through the cfg argument
+ * and simply returned when requested. The ACTIVE configuration comes from the
+ * entity structure.
+ */
+struct v4l2_subdev_state *
+vsp1_entity_get_pad_config(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return entity->config;
+ case V4L2_SUBDEV_FORMAT_TRY:
+ default:
+ return sd_state;
+ }
+}
+
+/**
+ * vsp1_entity_get_pad_format - Get a pad format from storage for an entity
+ * @entity: the entity
+ * @sd_state: the state storage
+ * @pad: the pad number
+ *
+ * Return the format stored in the given configuration for an entity's pad. The
+ * configuration can be an ACTIVE or TRY configuration.
+ */
+struct v4l2_mbus_framefmt *
+vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad)
+{
+ return v4l2_subdev_get_try_format(&entity->subdev, sd_state, pad);
+}
+
+/**
+ * vsp1_entity_get_pad_selection - Get a pad selection from storage for entity
+ * @entity: the entity
+ * @sd_state: the state storage
+ * @pad: the pad number
+ * @target: the selection target
+ *
+ * Return the selection rectangle stored in the given configuration for an
+ * entity's pad. The configuration can be an ACTIVE or TRY configuration. The
+ * selection target can be COMPOSE or CROP.
+ */
+struct v4l2_rect *
+vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, unsigned int target)
+{
+ switch (target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ return v4l2_subdev_get_try_compose(&entity->subdev, sd_state,
+ pad);
+ case V4L2_SEL_TGT_CROP:
+ return v4l2_subdev_get_try_crop(&entity->subdev, sd_state,
+ pad);
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * vsp1_entity_init_cfg - Initialize formats on all pads
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ *
+ * Initialize all pad formats with default values in the given pad config. This
+ * function can be used as a handler for the subdev pad::init_cfg operation.
+ */
+int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state)
+{
+ unsigned int pad;
+
+ for (pad = 0; pad < subdev->entity.num_pads - 1; ++pad) {
+ struct v4l2_subdev_format format = {
+ .pad = pad,
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &format);
+ }
+
+ return 0;
+}
+
+/*
+ * vsp1_subdev_get_pad_format - Subdev pad get_fmt handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: V4L2 subdev format
+ *
+ * This function implements the subdev get_fmt pad operation. It can be used as
+ * a direct drop-in for the operation handler.
+ */
+int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+ struct v4l2_subdev_state *config;
+
+ config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which);
+ if (!config)
+ return -EINVAL;
+
+ mutex_lock(&entity->lock);
+ fmt->format = *vsp1_entity_get_pad_format(entity, config, fmt->pad);
+ mutex_unlock(&entity->lock);
+
+ return 0;
+}
+
+/*
+ * vsp1_subdev_enum_mbus_code - Subdev pad enum_mbus_code handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: Media bus code enumeration
+ * @codes: Array of supported media bus codes
+ * @ncodes: Number of supported media bus codes
+ *
+ * This function implements the subdev enum_mbus_code pad operation for entities
+ * that do not support format conversion. It enumerates the given supported
+ * media bus codes on the sink pad and reports a source pad format identical to
+ * the sink pad.
+ */
+int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code,
+ const unsigned int *codes, unsigned int ncodes)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+
+ if (code->pad == 0) {
+ if (code->index >= ncodes)
+ return -EINVAL;
+
+ code->code = codes[code->index];
+ } else {
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+
+ /*
+ * The entity can't perform format conversion, the sink format
+ * is always identical to the source format.
+ */
+ if (code->index)
+ return -EINVAL;
+
+ config = vsp1_entity_get_pad_config(entity, sd_state,
+ code->which);
+ if (!config)
+ return -EINVAL;
+
+ mutex_lock(&entity->lock);
+ format = vsp1_entity_get_pad_format(entity, config, 0);
+ code->code = format->code;
+ mutex_unlock(&entity->lock);
+ }
+
+ return 0;
+}
+
+/*
+ * vsp1_subdev_enum_frame_size - Subdev pad enum_frame_size handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: Frame size enumeration
+ * @min_width: Minimum image width
+ * @min_height: Minimum image height
+ * @max_width: Maximum image width
+ * @max_height: Maximum image height
+ *
+ * This function implements the subdev enum_frame_size pad operation for
+ * entities that do not support scaling or cropping. It reports the given
+ * minimum and maximum frame width and height on the sink pad, and a fixed
+ * source pad size identical to the sink pad.
+ */
+int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse,
+ unsigned int min_width, unsigned int min_height,
+ unsigned int max_width, unsigned int max_height)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ config = vsp1_entity_get_pad_config(entity, sd_state, fse->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(entity, config, fse->pad);
+
+ mutex_lock(&entity->lock);
+
+ if (fse->index || fse->code != format->code) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (fse->pad == 0) {
+ fse->min_width = min_width;
+ fse->max_width = max_width;
+ fse->min_height = min_height;
+ fse->max_height = max_height;
+ } else {
+ /*
+ * The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+done:
+ mutex_unlock(&entity->lock);
+ return ret;
+}
+
+/*
+ * vsp1_subdev_set_pad_format - Subdev pad set_fmt handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: V4L2 subdev format
+ * @codes: Array of supported media bus codes
+ * @ncodes: Number of supported media bus codes
+ * @min_width: Minimum image width
+ * @min_height: Minimum image height
+ * @max_width: Maximum image width
+ * @max_height: Maximum image height
+ *
+ * This function implements the subdev set_fmt pad operation for entities that
+ * do not support scaling or cropping. It defaults to the first supplied media
+ * bus code if the requested code isn't supported, clamps the size to the
+ * supplied minimum and maximum, and propagates the sink pad format to the
+ * source pad.
+ */
+int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt,
+ const unsigned int *codes, unsigned int ncodes,
+ unsigned int min_width, unsigned int min_height,
+ unsigned int max_width, unsigned int max_height)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *selection;
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&entity->lock);
+
+ config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ format = vsp1_entity_get_pad_format(entity, config, fmt->pad);
+
+ if (fmt->pad == entity->source_pad) {
+ /* The output format can't be modified. */
+ fmt->format = *format;
+ goto done;
+ }
+
+ /*
+ * Default to the first media bus code if the requested format is not
+ * supported.
+ */
+ for (i = 0; i < ncodes; ++i) {
+ if (fmt->format.code == codes[i])
+ break;
+ }
+
+ format->code = i < ncodes ? codes[i] : codes[0];
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ min_width, max_width);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ min_height, max_height);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(entity, config, entity->source_pad);
+ *format = fmt->format;
+
+ /* Reset the crop and compose rectangles. */
+ selection = vsp1_entity_get_pad_selection(entity, config, fmt->pad,
+ V4L2_SEL_TGT_CROP);
+ selection->left = 0;
+ selection->top = 0;
+ selection->width = format->width;
+ selection->height = format->height;
+
+ selection = vsp1_entity_get_pad_selection(entity, config, fmt->pad,
+ V4L2_SEL_TGT_COMPOSE);
+ selection->left = 0;
+ selection->top = 0;
+ selection->width = format->width;
+ selection->height = format->height;
+
+done:
+ mutex_unlock(&entity->lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static inline struct vsp1_entity *
+media_entity_to_vsp1_entity(struct media_entity *entity)
+{
+ return container_of(entity, struct vsp1_entity, subdev.entity);
+}
+
+static int vsp1_entity_link_setup_source(const struct media_pad *source_pad,
+ const struct media_pad *sink_pad,
+ u32 flags)
+{
+ struct vsp1_entity *source;
+
+ source = media_entity_to_vsp1_entity(source_pad->entity);
+
+ if (!source->route)
+ return 0;
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ struct vsp1_entity *sink
+ = media_entity_to_vsp1_entity(sink_pad->entity);
+
+ /*
+ * Fan-out is limited to one for the normal data path plus
+ * optional HGO and HGT. We ignore the HGO and HGT here.
+ */
+ if (sink->type != VSP1_ENTITY_HGO &&
+ sink->type != VSP1_ENTITY_HGT) {
+ if (source->sink)
+ return -EBUSY;
+ source->sink = sink;
+ source->sink_pad = sink_pad->index;
+ }
+ } else {
+ source->sink = NULL;
+ source->sink_pad = 0;
+ }
+
+ return 0;
+}
+
+static int vsp1_entity_link_setup_sink(const struct media_pad *source_pad,
+ const struct media_pad *sink_pad,
+ u32 flags)
+{
+ struct vsp1_entity *sink;
+ struct vsp1_entity *source;
+
+ sink = media_entity_to_vsp1_entity(sink_pad->entity);
+ source = media_entity_to_vsp1_entity(source_pad->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ /* Fan-in is limited to one. */
+ if (sink->sources[sink_pad->index])
+ return -EBUSY;
+
+ sink->sources[sink_pad->index] = source;
+ } else {
+ sink->sources[sink_pad->index] = NULL;
+ }
+
+ return 0;
+}
+
+int vsp1_entity_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ if (local->flags & MEDIA_PAD_FL_SOURCE)
+ return vsp1_entity_link_setup_source(local, remote, flags);
+ else
+ return vsp1_entity_link_setup_sink(remote, local, flags);
+}
+
+/**
+ * vsp1_entity_remote_pad - Find the pad at the remote end of a link
+ * @pad: Pad at the local end of the link
+ *
+ * Search for a remote pad connected to the given pad by iterating over all
+ * links originating or terminating at that pad until an enabled link is found.
+ *
+ * Our link setup implementation guarantees that the output fan-out will not be
+ * higher than one for the data pipelines, except for the links to the HGO and
+ * HGT that can be enabled in addition to a regular data link. When traversing
+ * outgoing links this function ignores HGO and HGT entities and should thus be
+ * used in place of the generic media_pad_remote_pad_first() function to
+ * traverse data pipelines.
+ *
+ * Return a pointer to the pad at the remote end of the first found enabled
+ * link, or NULL if no enabled link has been found.
+ */
+struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad)
+{
+ struct media_link *link;
+
+ list_for_each_entry(link, &pad->entity->links, list) {
+ struct vsp1_entity *entity;
+
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ /* If we're the sink the source will never be an HGO or HGT. */
+ if (link->sink == pad)
+ return link->source;
+
+ if (link->source != pad)
+ continue;
+
+ /* If the sink isn't a subdevice it can't be an HGO or HGT. */
+ if (!is_media_entity_v4l2_subdev(link->sink->entity))
+ return link->sink;
+
+ entity = media_entity_to_vsp1_entity(link->sink->entity);
+ if (entity->type != VSP1_ENTITY_HGO &&
+ entity->type != VSP1_ENTITY_HGT)
+ return link->sink;
+ }
+
+ return NULL;
+
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+#define VSP1_ENTITY_ROUTE(ent) \
+ { VSP1_ENTITY_##ent, 0, VI6_DPR_##ent##_ROUTE, \
+ { VI6_DPR_NODE_##ent }, VI6_DPR_NODE_##ent }
+
+#define VSP1_ENTITY_ROUTE_RPF(idx) \
+ { VSP1_ENTITY_RPF, idx, VI6_DPR_RPF_ROUTE(idx), \
+ { 0, }, VI6_DPR_NODE_RPF(idx) }
+
+#define VSP1_ENTITY_ROUTE_UDS(idx) \
+ { VSP1_ENTITY_UDS, idx, VI6_DPR_UDS_ROUTE(idx), \
+ { VI6_DPR_NODE_UDS(idx) }, VI6_DPR_NODE_UDS(idx) }
+
+#define VSP1_ENTITY_ROUTE_UIF(idx) \
+ { VSP1_ENTITY_UIF, idx, VI6_DPR_UIF_ROUTE(idx), \
+ { VI6_DPR_NODE_UIF(idx) }, VI6_DPR_NODE_UIF(idx) }
+
+#define VSP1_ENTITY_ROUTE_WPF(idx) \
+ { VSP1_ENTITY_WPF, idx, 0, \
+ { VI6_DPR_NODE_WPF(idx) }, VI6_DPR_NODE_WPF(idx) }
+
+static const struct vsp1_route vsp1_routes[] = {
+ { VSP1_ENTITY_BRS, 0, VI6_DPR_ILV_BRS_ROUTE,
+ { VI6_DPR_NODE_BRS_IN(0), VI6_DPR_NODE_BRS_IN(1) }, 0 },
+ { VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE,
+ { VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1),
+ VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3),
+ VI6_DPR_NODE_BRU_IN(4) }, VI6_DPR_NODE_BRU_OUT },
+ VSP1_ENTITY_ROUTE(CLU),
+ { VSP1_ENTITY_HGO, 0, 0, { 0, }, 0 },
+ { VSP1_ENTITY_HGT, 0, 0, { 0, }, 0 },
+ VSP1_ENTITY_ROUTE(HSI),
+ VSP1_ENTITY_ROUTE(HST),
+ { VSP1_ENTITY_LIF, 0, 0, { 0, }, 0 },
+ { VSP1_ENTITY_LIF, 1, 0, { 0, }, 0 },
+ VSP1_ENTITY_ROUTE(LUT),
+ VSP1_ENTITY_ROUTE_RPF(0),
+ VSP1_ENTITY_ROUTE_RPF(1),
+ VSP1_ENTITY_ROUTE_RPF(2),
+ VSP1_ENTITY_ROUTE_RPF(3),
+ VSP1_ENTITY_ROUTE_RPF(4),
+ VSP1_ENTITY_ROUTE(SRU),
+ VSP1_ENTITY_ROUTE_UDS(0),
+ VSP1_ENTITY_ROUTE_UDS(1),
+ VSP1_ENTITY_ROUTE_UDS(2),
+ VSP1_ENTITY_ROUTE_UIF(0), /* Named UIF4 in the documentation */
+ VSP1_ENTITY_ROUTE_UIF(1), /* Named UIF5 in the documentation */
+ VSP1_ENTITY_ROUTE_WPF(0),
+ VSP1_ENTITY_ROUTE_WPF(1),
+ VSP1_ENTITY_ROUTE_WPF(2),
+ VSP1_ENTITY_ROUTE_WPF(3),
+};
+
+int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
+ const char *name, unsigned int num_pads,
+ const struct v4l2_subdev_ops *ops, u32 function)
+{
+ static struct lock_class_key key;
+ struct v4l2_subdev *subdev;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_routes); ++i) {
+ if (vsp1_routes[i].type == entity->type &&
+ vsp1_routes[i].index == entity->index) {
+ entity->route = &vsp1_routes[i];
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(vsp1_routes))
+ return -EINVAL;
+
+ mutex_init(&entity->lock);
+
+ entity->vsp1 = vsp1;
+ entity->source_pad = num_pads - 1;
+
+ /* Allocate and initialize pads. */
+ entity->pads = devm_kcalloc(vsp1->dev,
+ num_pads, sizeof(*entity->pads),
+ GFP_KERNEL);
+ if (entity->pads == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pads - 1; ++i)
+ entity->pads[i].flags = MEDIA_PAD_FL_SINK;
+
+ entity->sources = devm_kcalloc(vsp1->dev, max(num_pads - 1, 1U),
+ sizeof(*entity->sources), GFP_KERNEL);
+ if (entity->sources == NULL)
+ return -ENOMEM;
+
+ /* Single-pad entities only have a sink. */
+ entity->pads[num_pads - 1].flags = num_pads > 1 ? MEDIA_PAD_FL_SOURCE
+ : MEDIA_PAD_FL_SINK;
+
+ /* Initialize the media entity. */
+ ret = media_entity_pads_init(&entity->subdev.entity, num_pads,
+ entity->pads);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &entity->subdev;
+ v4l2_subdev_init(subdev, ops);
+
+ subdev->entity.function = function;
+ subdev->entity.ops = &vsp1->media_ops;
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ snprintf(subdev->name, sizeof(subdev->name), "%s %s",
+ dev_name(vsp1->dev), name);
+
+ vsp1_entity_init_cfg(subdev, NULL);
+
+ /*
+ * Allocate the pad configuration to store formats and selection
+ * rectangles.
+ */
+ /*
+ * FIXME: Drop this call, drivers are not supposed to use
+ * __v4l2_subdev_state_alloc().
+ */
+ entity->config = __v4l2_subdev_state_alloc(&entity->subdev,
+ "vsp1:config->lock", &key);
+ if (IS_ERR(entity->config)) {
+ media_entity_cleanup(&entity->subdev.entity);
+ return PTR_ERR(entity->config);
+ }
+
+ return 0;
+}
+
+void vsp1_entity_destroy(struct vsp1_entity *entity)
+{
+ if (entity->ops && entity->ops->destroy)
+ entity->ops->destroy(entity);
+ if (entity->subdev.ctrl_handler)
+ v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
+ __v4l2_subdev_state_free(entity->config);
+ media_entity_cleanup(&entity->subdev.entity);
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_entity.h b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
new file mode 100644
index 0000000000..17f98a6a97
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_entity.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_entity.h -- R-Car VSP1 Base Entity
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_ENTITY_H__
+#define __VSP1_ENTITY_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <media/v4l2-subdev.h>
+
+struct vsp1_device;
+struct vsp1_dl_body;
+struct vsp1_dl_list;
+struct vsp1_pipeline;
+struct vsp1_partition;
+struct vsp1_partition_window;
+
+enum vsp1_entity_type {
+ VSP1_ENTITY_BRS,
+ VSP1_ENTITY_BRU,
+ VSP1_ENTITY_CLU,
+ VSP1_ENTITY_HGO,
+ VSP1_ENTITY_HGT,
+ VSP1_ENTITY_HSI,
+ VSP1_ENTITY_HST,
+ VSP1_ENTITY_LIF,
+ VSP1_ENTITY_LUT,
+ VSP1_ENTITY_RPF,
+ VSP1_ENTITY_SRU,
+ VSP1_ENTITY_UDS,
+ VSP1_ENTITY_UIF,
+ VSP1_ENTITY_WPF,
+};
+
+#define VSP1_ENTITY_MAX_INPUTS 5 /* For the BRU */
+
+/*
+ * struct vsp1_route - Entity routing configuration
+ * @type: Entity type this routing entry is associated with
+ * @index: Entity index this routing entry is associated with
+ * @reg: Output routing configuration register
+ * @inputs: Target node value for each input
+ * @output: Target node value for entity output
+ *
+ * Each $vsp1_route entry describes routing configuration for the entity
+ * specified by the entry's @type and @index. @reg indicates the register that
+ * holds output routing configuration for the entity, and the @inputs array
+ * store the target node value for each input of the entity. The @output field
+ * stores the target node value of the entity output when used as a source for
+ * histogram generation.
+ */
+struct vsp1_route {
+ enum vsp1_entity_type type;
+ unsigned int index;
+ unsigned int reg;
+ unsigned int inputs[VSP1_ENTITY_MAX_INPUTS];
+ unsigned int output;
+};
+
+/**
+ * struct vsp1_entity_operations - Entity operations
+ * @destroy: Destroy the entity.
+ * @configure_stream: Setup the hardware parameters for the stream which do
+ * not vary between frames (pipeline, formats). Note that
+ * the vsp1_dl_list argument is only valid for display
+ * pipeline and will be NULL for mem-to-mem pipelines.
+ * @configure_frame: Configure the runtime parameters for each frame.
+ * @configure_partition: Configure partition specific parameters.
+ * @max_width: Return the max supported width of data that the entity can
+ * process in a single operation.
+ * @partition: Process the partition construction based on this entity's
+ * configuration.
+ */
+struct vsp1_entity_operations {
+ void (*destroy)(struct vsp1_entity *);
+ void (*configure_stream)(struct vsp1_entity *, struct vsp1_pipeline *,
+ struct vsp1_dl_list *, struct vsp1_dl_body *);
+ void (*configure_frame)(struct vsp1_entity *, struct vsp1_pipeline *,
+ struct vsp1_dl_list *, struct vsp1_dl_body *);
+ void (*configure_partition)(struct vsp1_entity *,
+ struct vsp1_pipeline *,
+ struct vsp1_dl_list *,
+ struct vsp1_dl_body *);
+ unsigned int (*max_width)(struct vsp1_entity *, struct vsp1_pipeline *);
+ void (*partition)(struct vsp1_entity *, struct vsp1_pipeline *,
+ struct vsp1_partition *, unsigned int,
+ struct vsp1_partition_window *);
+};
+
+struct vsp1_entity {
+ struct vsp1_device *vsp1;
+
+ const struct vsp1_entity_operations *ops;
+
+ enum vsp1_entity_type type;
+ unsigned int index;
+ const struct vsp1_route *route;
+
+ struct vsp1_pipeline *pipe;
+
+ struct list_head list_dev;
+ struct list_head list_pipe;
+
+ struct media_pad *pads;
+ unsigned int source_pad;
+
+ struct vsp1_entity **sources;
+ struct vsp1_entity *sink;
+ unsigned int sink_pad;
+
+ struct v4l2_subdev subdev;
+ struct v4l2_subdev_state *config;
+
+ struct mutex lock; /* Protects the pad config */
+};
+
+static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_entity, subdev);
+}
+
+int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
+ const char *name, unsigned int num_pads,
+ const struct v4l2_subdev_ops *ops, u32 function);
+void vsp1_entity_destroy(struct vsp1_entity *entity);
+
+int vsp1_entity_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags);
+
+struct v4l2_subdev_state *
+vsp1_entity_get_pad_config(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which);
+struct v4l2_mbus_framefmt *
+vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad);
+struct v4l2_rect *
+vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, unsigned int target);
+int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state);
+
+void vsp1_entity_route_setup(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb);
+
+void vsp1_entity_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb);
+
+void vsp1_entity_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb);
+
+void vsp1_entity_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb);
+
+struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad);
+
+int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt);
+int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt,
+ const unsigned int *codes, unsigned int ncodes,
+ unsigned int min_width, unsigned int min_height,
+ unsigned int max_width, unsigned int max_height);
+int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code,
+ const unsigned int *codes, unsigned int ncodes);
+int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse,
+ unsigned int min_w, unsigned int min_h,
+ unsigned int max_w, unsigned int max_h);
+
+#endif /* __VSP1_ENTITY_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hgo.c b/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
new file mode 100644
index 0000000000..e6492deb0a
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_hgo.c -- R-Car VSP1 Histogram Generator 1D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_hgo.h"
+
+#define HGO_DATA_SIZE ((2 + 256) * 4)
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_hgo_read(struct vsp1_hgo *hgo, u32 reg)
+{
+ return vsp1_read(hgo->histo.entity.vsp1, reg);
+}
+
+static inline void vsp1_hgo_write(struct vsp1_hgo *hgo,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame End Handler
+ */
+
+void vsp1_hgo_frame_end(struct vsp1_entity *entity)
+{
+ struct vsp1_hgo *hgo = to_hgo(&entity->subdev);
+ struct vsp1_histogram_buffer *buf;
+ unsigned int i;
+ size_t size;
+ u32 *data;
+
+ buf = vsp1_histogram_buffer_get(&hgo->histo);
+ if (!buf)
+ return;
+
+ data = buf->addr;
+
+ if (hgo->num_bins == 256) {
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM);
+
+ for (i = 0; i < 256; ++i) {
+ vsp1_write(hgo->histo.entity.vsp1,
+ VI6_HGO_EXT_HIST_ADDR, i);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_EXT_HIST_DATA);
+ }
+
+ size = (2 + 256) * sizeof(u32);
+ } else if (hgo->max_rgb) {
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM);
+
+ for (i = 0; i < 64; ++i)
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_HISTO(i));
+
+ size = (2 + 64) * sizeof(u32);
+ } else {
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_R_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_B_MAXMIN);
+
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_R_SUM);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM);
+ *data++ = vsp1_hgo_read(hgo, VI6_HGO_B_SUM);
+
+ for (i = 0; i < 64; ++i) {
+ data[i] = vsp1_hgo_read(hgo, VI6_HGO_R_HISTO(i));
+ data[i+64] = vsp1_hgo_read(hgo, VI6_HGO_G_HISTO(i));
+ data[i+128] = vsp1_hgo_read(hgo, VI6_HGO_B_HISTO(i));
+ }
+
+ size = (6 + 64 * 3) * sizeof(u32);
+ }
+
+ vsp1_histogram_buffer_complete(&hgo->histo, buf, size);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_HGO_MAX_RGB (V4L2_CID_USER_BASE | 0x1001)
+#define V4L2_CID_VSP1_HGO_NUM_BINS (V4L2_CID_USER_BASE | 0x1002)
+
+static const struct v4l2_ctrl_config hgo_max_rgb_control = {
+ .id = V4L2_CID_VSP1_HGO_MAX_RGB,
+ .name = "Maximum RGB Mode",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .step = 1,
+ .flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT,
+};
+
+static const s64 hgo_num_bins[] = {
+ 64, 256,
+};
+
+static const struct v4l2_ctrl_config hgo_num_bins_control = {
+ .id = V4L2_CID_VSP1_HGO_NUM_BINS,
+ .name = "Number of Bins",
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .min = 0,
+ .max = 1,
+ .def = 0,
+ .qmenu_int = hgo_num_bins,
+ .flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hgo_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_hgo *hgo = to_hgo(&entity->subdev);
+ struct v4l2_rect *compose;
+ struct v4l2_rect *crop;
+ unsigned int hratio;
+ unsigned int vratio;
+
+ crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK, V4L2_SEL_TGT_CROP);
+ compose = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK,
+ V4L2_SEL_TGT_COMPOSE);
+
+ vsp1_hgo_write(hgo, dlb, VI6_HGO_REGRST, VI6_HGO_REGRST_RCLEA);
+
+ vsp1_hgo_write(hgo, dlb, VI6_HGO_OFFSET,
+ (crop->left << VI6_HGO_OFFSET_HOFFSET_SHIFT) |
+ (crop->top << VI6_HGO_OFFSET_VOFFSET_SHIFT));
+ vsp1_hgo_write(hgo, dlb, VI6_HGO_SIZE,
+ (crop->width << VI6_HGO_SIZE_HSIZE_SHIFT) |
+ (crop->height << VI6_HGO_SIZE_VSIZE_SHIFT));
+
+ mutex_lock(hgo->ctrls.handler.lock);
+ hgo->max_rgb = hgo->ctrls.max_rgb->cur.val;
+ if (hgo->ctrls.num_bins)
+ hgo->num_bins = hgo_num_bins[hgo->ctrls.num_bins->cur.val];
+ mutex_unlock(hgo->ctrls.handler.lock);
+
+ hratio = crop->width * 2 / compose->width / 3;
+ vratio = crop->height * 2 / compose->height / 3;
+ vsp1_hgo_write(hgo, dlb, VI6_HGO_MODE,
+ (hgo->num_bins == 256 ? VI6_HGO_MODE_STEP : 0) |
+ (hgo->max_rgb ? VI6_HGO_MODE_MAXRGB : 0) |
+ (hratio << VI6_HGO_MODE_HRATIO_SHIFT) |
+ (vratio << VI6_HGO_MODE_VRATIO_SHIFT));
+}
+
+static const struct vsp1_entity_operations hgo_entity_ops = {
+ .configure_stream = hgo_configure_stream,
+ .destroy = vsp1_histogram_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+static const unsigned int hgo_mbus_formats[] = {
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+};
+
+struct vsp1_hgo *vsp1_hgo_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_hgo *hgo;
+ int ret;
+
+ hgo = devm_kzalloc(vsp1->dev, sizeof(*hgo), GFP_KERNEL);
+ if (hgo == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&hgo->ctrls.handler,
+ vsp1->info->gen >= 3 ? 2 : 1);
+ hgo->ctrls.max_rgb = v4l2_ctrl_new_custom(&hgo->ctrls.handler,
+ &hgo_max_rgb_control, NULL);
+ if (vsp1->info->gen >= 3)
+ hgo->ctrls.num_bins =
+ v4l2_ctrl_new_custom(&hgo->ctrls.handler,
+ &hgo_num_bins_control, NULL);
+
+ hgo->max_rgb = false;
+ hgo->num_bins = 64;
+
+ hgo->histo.entity.subdev.ctrl_handler = &hgo->ctrls.handler;
+
+ /* Initialize the video device and queue for statistics data. */
+ ret = vsp1_histogram_init(vsp1, &hgo->histo, VSP1_ENTITY_HGO, "hgo",
+ &hgo_entity_ops, hgo_mbus_formats,
+ ARRAY_SIZE(hgo_mbus_formats),
+ HGO_DATA_SIZE, V4L2_META_FMT_VSP1_HGO);
+ if (ret < 0) {
+ vsp1_entity_destroy(&hgo->histo.entity);
+ return ERR_PTR(ret);
+ }
+
+ return hgo;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hgo.h b/drivers/media/platform/renesas/vsp1/vsp1_hgo.h
new file mode 100644
index 0000000000..6b0c8580e1
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgo.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_hgo.h -- R-Car VSP1 Histogram Generator 1D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_HGO_H__
+#define __VSP1_HGO_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_histo.h"
+
+struct vsp1_device;
+
+struct vsp1_hgo {
+ struct vsp1_histogram histo;
+
+ struct {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *max_rgb;
+ struct v4l2_ctrl *num_bins;
+ } ctrls;
+
+ bool max_rgb;
+ unsigned int num_bins;
+};
+
+static inline struct vsp1_hgo *to_hgo(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hgo, histo.entity.subdev);
+}
+
+struct vsp1_hgo *vsp1_hgo_create(struct vsp1_device *vsp1);
+void vsp1_hgo_frame_end(struct vsp1_entity *hgo);
+
+#endif /* __VSP1_HGO_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hgt.c b/drivers/media/platform/renesas/vsp1/vsp1_hgt.c
new file mode 100644
index 0000000000..aa1c718e04
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgt.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_hgt.c -- R-Car VSP1 Histogram Generator 2D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Niklas Söderlund (niklas.soderlund@ragnatech.se)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_hgt.h"
+
+#define HGT_DATA_SIZE ((2 + 6 * 32) * 4)
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_hgt_read(struct vsp1_hgt *hgt, u32 reg)
+{
+ return vsp1_read(hgt->histo.entity.vsp1, reg);
+}
+
+static inline void vsp1_hgt_write(struct vsp1_hgt *hgt,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame End Handler
+ */
+
+void vsp1_hgt_frame_end(struct vsp1_entity *entity)
+{
+ struct vsp1_hgt *hgt = to_hgt(&entity->subdev);
+ struct vsp1_histogram_buffer *buf;
+ unsigned int m;
+ unsigned int n;
+ u32 *data;
+
+ buf = vsp1_histogram_buffer_get(&hgt->histo);
+ if (!buf)
+ return;
+
+ data = buf->addr;
+
+ *data++ = vsp1_hgt_read(hgt, VI6_HGT_MAXMIN);
+ *data++ = vsp1_hgt_read(hgt, VI6_HGT_SUM);
+
+ for (m = 0; m < 6; ++m)
+ for (n = 0; n < 32; ++n)
+ *data++ = vsp1_hgt_read(hgt, VI6_HGT_HISTO(m, n));
+
+ vsp1_histogram_buffer_complete(&hgt->histo, buf, HGT_DATA_SIZE);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_HGT_HUE_AREAS (V4L2_CID_USER_BASE | 0x1001)
+
+static int hgt_hue_areas_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ const u8 *values = ctrl->p_new.p_u8;
+ unsigned int i;
+
+ /*
+ * The hardware has constraints on the hue area boundaries beyond the
+ * control min, max and step. The values must match one of the following
+ * expressions.
+ *
+ * 0L <= 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U
+ * 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U <= 0L
+ *
+ * Start by verifying the common part...
+ */
+ for (i = 1; i < (HGT_NUM_HUE_AREAS * 2) - 1; ++i) {
+ if (values[i] > values[i+1])
+ return -EINVAL;
+ }
+
+ /* ... and handle 0L separately. */
+ if (values[0] > values[1] && values[11] > values[0])
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hgt_hue_areas_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_hgt *hgt = container_of(ctrl->handler, struct vsp1_hgt,
+ ctrls);
+
+ memcpy(hgt->hue_areas, ctrl->p_new.p_u8, sizeof(hgt->hue_areas));
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops hgt_hue_areas_ctrl_ops = {
+ .try_ctrl = hgt_hue_areas_try_ctrl,
+ .s_ctrl = hgt_hue_areas_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config hgt_hue_areas = {
+ .ops = &hgt_hue_areas_ctrl_ops,
+ .id = V4L2_CID_VSP1_HGT_HUE_AREAS,
+ .name = "Boundary Values for Hue Area",
+ .type = V4L2_CTRL_TYPE_U8,
+ .min = 0,
+ .max = 255,
+ .def = 0,
+ .step = 1,
+ .dims = { 12 },
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hgt_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_hgt *hgt = to_hgt(&entity->subdev);
+ struct v4l2_rect *compose;
+ struct v4l2_rect *crop;
+ unsigned int hratio;
+ unsigned int vratio;
+ u8 lower;
+ u8 upper;
+ unsigned int i;
+
+ crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK, V4L2_SEL_TGT_CROP);
+ compose = vsp1_entity_get_pad_selection(entity, entity->config,
+ HISTO_PAD_SINK,
+ V4L2_SEL_TGT_COMPOSE);
+
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_REGRST, VI6_HGT_REGRST_RCLEA);
+
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_OFFSET,
+ (crop->left << VI6_HGT_OFFSET_HOFFSET_SHIFT) |
+ (crop->top << VI6_HGT_OFFSET_VOFFSET_SHIFT));
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_SIZE,
+ (crop->width << VI6_HGT_SIZE_HSIZE_SHIFT) |
+ (crop->height << VI6_HGT_SIZE_VSIZE_SHIFT));
+
+ mutex_lock(hgt->ctrls.lock);
+ for (i = 0; i < HGT_NUM_HUE_AREAS; ++i) {
+ lower = hgt->hue_areas[i*2 + 0];
+ upper = hgt->hue_areas[i*2 + 1];
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_HUE_AREA(i),
+ (lower << VI6_HGT_HUE_AREA_LOWER_SHIFT) |
+ (upper << VI6_HGT_HUE_AREA_UPPER_SHIFT));
+ }
+ mutex_unlock(hgt->ctrls.lock);
+
+ hratio = crop->width * 2 / compose->width / 3;
+ vratio = crop->height * 2 / compose->height / 3;
+ vsp1_hgt_write(hgt, dlb, VI6_HGT_MODE,
+ (hratio << VI6_HGT_MODE_HRATIO_SHIFT) |
+ (vratio << VI6_HGT_MODE_VRATIO_SHIFT));
+}
+
+static const struct vsp1_entity_operations hgt_entity_ops = {
+ .configure_stream = hgt_configure_stream,
+ .destroy = vsp1_histogram_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+static const unsigned int hgt_mbus_formats[] = {
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+};
+
+struct vsp1_hgt *vsp1_hgt_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_hgt *hgt;
+ int ret;
+
+ hgt = devm_kzalloc(vsp1->dev, sizeof(*hgt), GFP_KERNEL);
+ if (hgt == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&hgt->ctrls, 1);
+ v4l2_ctrl_new_custom(&hgt->ctrls, &hgt_hue_areas, NULL);
+
+ hgt->histo.entity.subdev.ctrl_handler = &hgt->ctrls;
+
+ /* Initialize the video device and queue for statistics data. */
+ ret = vsp1_histogram_init(vsp1, &hgt->histo, VSP1_ENTITY_HGT, "hgt",
+ &hgt_entity_ops, hgt_mbus_formats,
+ ARRAY_SIZE(hgt_mbus_formats),
+ HGT_DATA_SIZE, V4L2_META_FMT_VSP1_HGT);
+ if (ret < 0) {
+ vsp1_entity_destroy(&hgt->histo.entity);
+ return ERR_PTR(ret);
+ }
+
+ v4l2_ctrl_handler_setup(&hgt->ctrls);
+
+ return hgt;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hgt.h b/drivers/media/platform/renesas/vsp1/vsp1_hgt.h
new file mode 100644
index 0000000000..38ec237bdd
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgt.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_hgt.h -- R-Car VSP1 Histogram Generator 2D
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Niklas Söderlund (niklas.soderlund@ragnatech.se)
+ */
+#ifndef __VSP1_HGT_H__
+#define __VSP1_HGT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_histo.h"
+
+struct vsp1_device;
+
+#define HGT_NUM_HUE_AREAS 6
+
+struct vsp1_hgt {
+ struct vsp1_histogram histo;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ u8 hue_areas[HGT_NUM_HUE_AREAS * 2];
+};
+
+static inline struct vsp1_hgt *to_hgt(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hgt, histo.entity.subdev);
+}
+
+struct vsp1_hgt *vsp1_hgt_create(struct vsp1_device *vsp1);
+void vsp1_hgt_frame_end(struct vsp1_entity *hgt);
+
+#endif /* __VSP1_HGT_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_histo.c b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
new file mode 100644
index 0000000000..f22449dd65
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_histo.c -- R-Car VSP1 Histogram API
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ * Copyright (C) 2016 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "vsp1.h"
+#include "vsp1_histo.h"
+#include "vsp1_pipe.h"
+
+#define HISTO_MIN_SIZE 4U
+#define HISTO_MAX_SIZE 8192U
+
+/* -----------------------------------------------------------------------------
+ * Buffer Operations
+ */
+
+static inline struct vsp1_histogram_buffer *
+to_vsp1_histogram_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct vsp1_histogram_buffer, buf);
+}
+
+struct vsp1_histogram_buffer *
+vsp1_histogram_buffer_get(struct vsp1_histogram *histo)
+{
+ struct vsp1_histogram_buffer *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+
+ if (list_empty(&histo->irqqueue))
+ goto done;
+
+ buf = list_first_entry(&histo->irqqueue, struct vsp1_histogram_buffer,
+ queue);
+ list_del(&buf->queue);
+ histo->readout = true;
+
+done:
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+ return buf;
+}
+
+void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ struct vsp1_histogram_buffer *buf,
+ size_t size)
+{
+ struct vsp1_pipeline *pipe = histo->entity.pipe;
+ unsigned long flags;
+
+ /*
+ * The pipeline pointer is guaranteed to be valid as this function is
+ * called from the frame completion interrupt handler, which can only
+ * occur when video streaming is active.
+ */
+ buf->buf.sequence = pipe->sequence;
+ buf->buf.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+ histo->readout = false;
+ wake_up(&histo->wait_queue);
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * videobuf2 Queue Operations
+ */
+
+static int histo_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
+
+ if (*nplanes) {
+ if (*nplanes != 1)
+ return -EINVAL;
+
+ if (sizes[0] < histo->data_size)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *nplanes = 1;
+ sizes[0] = histo->data_size;
+
+ return 0;
+}
+
+static int histo_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);
+
+ if (vb->num_planes != 1)
+ return -EINVAL;
+
+ if (vb2_plane_size(vb, 0) < histo->data_size)
+ return -EINVAL;
+
+ buf->addr = vb2_plane_vaddr(vb, 0);
+
+ return 0;
+}
+
+static void histo_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+ list_add_tail(&buf->queue, &histo->irqqueue);
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+}
+
+static int histo_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ return 0;
+}
+
+static void histo_stop_streaming(struct vb2_queue *vq)
+{
+ struct vsp1_histogram *histo = vb2_get_drv_priv(vq);
+ struct vsp1_histogram_buffer *buffer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&histo->irqlock, flags);
+
+ /* Remove all buffers from the IRQ queue. */
+ list_for_each_entry(buffer, &histo->irqqueue, queue)
+ vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&histo->irqqueue);
+
+ /* Wait for the buffer being read out (if any) to complete. */
+ wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock);
+
+ spin_unlock_irqrestore(&histo->irqlock, flags);
+}
+
+static const struct vb2_ops histo_video_queue_qops = {
+ .queue_setup = histo_queue_setup,
+ .buf_prepare = histo_buffer_prepare,
+ .buf_queue = histo_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = histo_start_streaming,
+ .stop_streaming = histo_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int histo_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+
+ if (code->pad == HISTO_PAD_SOURCE) {
+ code->code = MEDIA_BUS_FMT_FIXED;
+ return 0;
+ }
+
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code,
+ histo->formats,
+ histo->num_formats);
+}
+
+static int histo_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->pad != HISTO_PAD_SINK)
+ return -EINVAL;
+
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ HISTO_MIN_SIZE,
+ HISTO_MIN_SIZE, HISTO_MAX_SIZE,
+ HISTO_MAX_SIZE);
+}
+
+static int histo_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (sel->pad != HISTO_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&histo->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&histo->entity, sd_state,
+ sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ crop = vsp1_entity_get_pad_selection(&histo->entity, config,
+ HISTO_PAD_SINK,
+ V4L2_SEL_TGT_CROP);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = crop->width;
+ sel->r.height = crop->height;
+ break;
+
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ format = vsp1_entity_get_pad_format(&histo->entity, config,
+ HISTO_PAD_SINK);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ break;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *vsp1_entity_get_pad_selection(&histo->entity, config,
+ sel->pad, sel->target);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ mutex_unlock(&histo->entity.lock);
+ return ret;
+}
+
+static int histo_set_crop(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *selection;
+
+ /* The crop rectangle must be inside the input frame. */
+ format = vsp1_entity_get_pad_format(&histo->entity, sd_state,
+ HISTO_PAD_SINK);
+ sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
+ sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
+ sel->r.width = clamp_t(unsigned int, sel->r.width, HISTO_MIN_SIZE,
+ format->width - sel->r.left);
+ sel->r.height = clamp_t(unsigned int, sel->r.height, HISTO_MIN_SIZE,
+ format->height - sel->r.top);
+
+ /* Set the crop rectangle and reset the compose rectangle. */
+ selection = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
+ sel->pad, V4L2_SEL_TGT_CROP);
+ *selection = sel->r;
+
+ selection = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
+ sel->pad,
+ V4L2_SEL_TGT_COMPOSE);
+ *selection = sel->r;
+
+ return 0;
+}
+
+static int histo_set_compose(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_rect *compose;
+ struct v4l2_rect *crop;
+ unsigned int ratio;
+
+ /*
+ * The compose rectangle is used to configure downscaling, the top left
+ * corner is fixed to (0,0) and the size to 1/2 or 1/4 of the crop
+ * rectangle.
+ */
+ sel->r.left = 0;
+ sel->r.top = 0;
+
+ crop = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
+ sel->pad,
+ V4L2_SEL_TGT_CROP);
+
+ /*
+ * Clamp the width and height to acceptable values first and then
+ * compute the closest rounded dividing ratio.
+ *
+ * Ratio Rounded ratio
+ * --------------------------
+ * [1.0 1.5[ 1
+ * [1.5 3.0[ 2
+ * [3.0 4.0] 4
+ *
+ * The rounded ratio can be computed using
+ *
+ * 1 << (ceil(ratio * 2) / 3)
+ */
+ sel->r.width = clamp(sel->r.width, crop->width / 4, crop->width);
+ ratio = 1 << (crop->width * 2 / sel->r.width / 3);
+ sel->r.width = crop->width / ratio;
+
+
+ sel->r.height = clamp(sel->r.height, crop->height / 4, crop->height);
+ ratio = 1 << (crop->height * 2 / sel->r.height / 3);
+ sel->r.height = crop->height / ratio;
+
+ compose = vsp1_entity_get_pad_selection(&histo->entity, sd_state,
+ sel->pad,
+ V4L2_SEL_TGT_COMPOSE);
+ *compose = sel->r;
+
+ return 0;
+}
+
+static int histo_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+ struct v4l2_subdev_state *config;
+ int ret;
+
+ if (sel->pad != HISTO_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&histo->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&histo->entity, sd_state,
+ sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (sel->target == V4L2_SEL_TGT_CROP)
+ ret = histo_set_crop(subdev, config, sel);
+ else if (sel->target == V4L2_SEL_TGT_COMPOSE)
+ ret = histo_set_compose(subdev, config, sel);
+ else
+ ret = -EINVAL;
+
+done:
+ mutex_unlock(&histo->entity.lock);
+ return ret;
+}
+
+static int histo_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ if (fmt->pad == HISTO_PAD_SOURCE) {
+ fmt->format.code = MEDIA_BUS_FMT_FIXED;
+ fmt->format.width = 0;
+ fmt->format.height = 0;
+ fmt->format.field = V4L2_FIELD_NONE;
+ fmt->format.colorspace = V4L2_COLORSPACE_RAW;
+ return 0;
+ }
+
+ return vsp1_subdev_get_pad_format(subdev, sd_state, fmt);
+}
+
+static int histo_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(subdev);
+
+ if (fmt->pad != HISTO_PAD_SINK)
+ return histo_get_format(subdev, sd_state, fmt);
+
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt,
+ histo->formats, histo->num_formats,
+ HISTO_MIN_SIZE, HISTO_MIN_SIZE,
+ HISTO_MAX_SIZE, HISTO_MAX_SIZE);
+}
+
+static const struct v4l2_subdev_pad_ops histo_pad_ops = {
+ .enum_mbus_code = histo_enum_mbus_code,
+ .enum_frame_size = histo_enum_frame_size,
+ .get_fmt = histo_get_format,
+ .set_fmt = histo_set_format,
+ .get_selection = histo_get_selection,
+ .set_selection = histo_set_selection,
+};
+
+static const struct v4l2_subdev_ops histo_ops = {
+ .pad = &histo_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int histo_v4l2_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
+
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_VIDEO_OUTPUT_MPLANE
+ | V4L2_CAP_META_CAPTURE;
+
+ strscpy(cap->driver, "vsp1", sizeof(cap->driver));
+ strscpy(cap->card, histo->video.name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int histo_v4l2_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
+
+ if (f->index > 0 || f->type != histo->queue.type)
+ return -EINVAL;
+
+ f->pixelformat = histo->meta_format;
+
+ return 0;
+}
+
+static int histo_v4l2_get_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
+ struct v4l2_meta_format *meta = &format->fmt.meta;
+
+ if (format->type != histo->queue.type)
+ return -EINVAL;
+
+ memset(meta, 0, sizeof(*meta));
+
+ meta->dataformat = histo->meta_format;
+ meta->buffersize = histo->data_size;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops histo_v4l2_ioctl_ops = {
+ .vidioc_querycap = histo_v4l2_querycap,
+ .vidioc_enum_fmt_meta_cap = histo_v4l2_enum_format,
+ .vidioc_g_fmt_meta_cap = histo_v4l2_get_format,
+ .vidioc_s_fmt_meta_cap = histo_v4l2_get_format,
+ .vidioc_try_fmt_meta_cap = histo_v4l2_get_format,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 File Operations
+ */
+
+static const struct v4l2_file_operations histo_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static void vsp1_histogram_cleanup(struct vsp1_histogram *histo)
+{
+ if (video_is_registered(&histo->video))
+ video_unregister_device(&histo->video);
+
+ media_entity_cleanup(&histo->video.entity);
+}
+
+void vsp1_histogram_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_histogram *histo = subdev_to_histo(&entity->subdev);
+
+ vsp1_histogram_cleanup(histo);
+}
+
+int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo,
+ enum vsp1_entity_type type, const char *name,
+ const struct vsp1_entity_operations *ops,
+ const unsigned int *formats, unsigned int num_formats,
+ size_t data_size, u32 meta_format)
+{
+ int ret;
+
+ histo->formats = formats;
+ histo->num_formats = num_formats;
+ histo->data_size = data_size;
+ histo->meta_format = meta_format;
+
+ histo->pad.flags = MEDIA_PAD_FL_SINK;
+ histo->video.vfl_dir = VFL_DIR_RX;
+
+ mutex_init(&histo->lock);
+ spin_lock_init(&histo->irqlock);
+ INIT_LIST_HEAD(&histo->irqqueue);
+ init_waitqueue_head(&histo->wait_queue);
+
+ /* Initialize the VSP entity... */
+ histo->entity.ops = ops;
+ histo->entity.type = type;
+
+ ret = vsp1_entity_init(vsp1, &histo->entity, name, 2, &histo_ops,
+ MEDIA_ENT_F_PROC_VIDEO_STATISTICS);
+ if (ret < 0)
+ return ret;
+
+ /* ... and the media entity... */
+ ret = media_entity_pads_init(&histo->video.entity, 1, &histo->pad);
+ if (ret < 0)
+ return ret;
+
+ /* ... and the video node... */
+ histo->video.v4l2_dev = &vsp1->v4l2_dev;
+ histo->video.fops = &histo_v4l2_fops;
+ snprintf(histo->video.name, sizeof(histo->video.name),
+ "%s histo", histo->entity.subdev.name);
+ histo->video.vfl_type = VFL_TYPE_VIDEO;
+ histo->video.release = video_device_release_empty;
+ histo->video.ioctl_ops = &histo_v4l2_ioctl_ops;
+ histo->video.device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
+
+ video_set_drvdata(&histo->video, histo);
+
+ /* ... and the buffers queue... */
+ histo->queue.type = V4L2_BUF_TYPE_META_CAPTURE;
+ histo->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ histo->queue.lock = &histo->lock;
+ histo->queue.drv_priv = histo;
+ histo->queue.buf_struct_size = sizeof(struct vsp1_histogram_buffer);
+ histo->queue.ops = &histo_video_queue_qops;
+ histo->queue.mem_ops = &vb2_vmalloc_memops;
+ histo->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ histo->queue.dev = vsp1->dev;
+ ret = vb2_queue_init(&histo->queue);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "failed to initialize vb2 queue\n");
+ goto error;
+ }
+
+ /* ... and register the video device. */
+ histo->video.queue = &histo->queue;
+ ret = video_register_device(&histo->video, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "failed to register video device\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ vsp1_histogram_cleanup(histo);
+ return ret;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_histo.h b/drivers/media/platform/renesas/vsp1/vsp1_histo.h
new file mode 100644
index 0000000000..06f0298462
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_histo.h -- R-Car VSP1 Histogram API
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ * Copyright (C) 2016 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_HISTO_H__
+#define __VSP1_HISTO_H__
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define HISTO_PAD_SINK 0
+#define HISTO_PAD_SOURCE 1
+
+struct vsp1_histogram_buffer {
+ struct vb2_v4l2_buffer buf;
+ struct list_head queue;
+ void *addr;
+};
+
+struct vsp1_histogram {
+ struct vsp1_entity entity;
+ struct video_device video;
+ struct media_pad pad;
+
+ const u32 *formats;
+ unsigned int num_formats;
+ size_t data_size;
+ u32 meta_format;
+
+ struct mutex lock;
+ struct vb2_queue queue;
+
+ spinlock_t irqlock;
+ struct list_head irqqueue;
+
+ wait_queue_head_t wait_queue;
+ bool readout;
+};
+
+static inline struct vsp1_histogram *vdev_to_histo(struct video_device *vdev)
+{
+ return container_of(vdev, struct vsp1_histogram, video);
+}
+
+static inline struct vsp1_histogram *subdev_to_histo(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_histogram, entity.subdev);
+}
+
+int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo,
+ enum vsp1_entity_type type, const char *name,
+ const struct vsp1_entity_operations *ops,
+ const unsigned int *formats, unsigned int num_formats,
+ size_t data_size, u32 meta_format);
+void vsp1_histogram_destroy(struct vsp1_entity *entity);
+
+struct vsp1_histogram_buffer *
+vsp1_histogram_buffer_get(struct vsp1_histogram *histo);
+void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo,
+ struct vsp1_histogram_buffer *buf,
+ size_t size);
+
+#endif /* __VSP1_HISTO_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hsit.c b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
new file mode 100644
index 0000000000..361a870380
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hsit.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_hsit.c -- R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_hsit.h"
+
+#define HSIT_MIN_SIZE 4U
+#define HSIT_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_hsit_write(struct vsp1_hsit *hsit,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) |
+ (code->pad == HSIT_PAD_SOURCE && hsit->inverse))
+ code->code = MEDIA_BUS_FMT_ARGB8888_1X32;
+ else
+ code->code = MEDIA_BUS_FMT_AHSV8888_1X32;
+
+ return 0;
+}
+
+static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ HSIT_MIN_SIZE,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE,
+ HSIT_MAX_SIZE);
+}
+
+static int hsit_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_hsit *hsit = to_hsit(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&hsit->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&hsit->entity, sd_state,
+ fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ format = vsp1_entity_get_pad_format(&hsit->entity, config, fmt->pad);
+
+ if (fmt->pad == HSIT_PAD_SOURCE) {
+ /*
+ * The HST and HSI output format code and resolution can't be
+ * modified.
+ */
+ fmt->format = *format;
+ goto done;
+ }
+
+ format->code = hsit->inverse ? MEDIA_BUS_FMT_AHSV8888_1X32
+ : MEDIA_BUS_FMT_ARGB8888_1X32;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&hsit->entity, config,
+ HSIT_PAD_SOURCE);
+ *format = fmt->format;
+ format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
+ : MEDIA_BUS_FMT_AHSV8888_1X32;
+
+done:
+ mutex_unlock(&hsit->entity.lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops hsit_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = hsit_enum_mbus_code,
+ .enum_frame_size = hsit_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = hsit_set_format,
+};
+
+static const struct v4l2_subdev_ops hsit_ops = {
+ .pad = &hsit_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hsit_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_hsit *hsit = to_hsit(&entity->subdev);
+
+ if (hsit->inverse)
+ vsp1_hsit_write(hsit, dlb, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
+ else
+ vsp1_hsit_write(hsit, dlb, VI6_HST_CTRL, VI6_HST_CTRL_EN);
+}
+
+static const struct vsp1_entity_operations hsit_entity_ops = {
+ .configure_stream = hsit_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
+{
+ struct vsp1_hsit *hsit;
+ int ret;
+
+ hsit = devm_kzalloc(vsp1->dev, sizeof(*hsit), GFP_KERNEL);
+ if (hsit == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ hsit->inverse = inverse;
+
+ hsit->entity.ops = &hsit_entity_ops;
+
+ if (inverse)
+ hsit->entity.type = VSP1_ENTITY_HSI;
+ else
+ hsit->entity.type = VSP1_ENTITY_HST;
+
+ ret = vsp1_entity_init(vsp1, &hsit->entity, inverse ? "hsi" : "hst",
+ 2, &hsit_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return hsit;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hsit.h b/drivers/media/platform/renesas/vsp1/vsp1_hsit.h
new file mode 100644
index 0000000000..a658b1aa49
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hsit.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_hsit.h -- R-Car VSP1 Hue Saturation value (Inverse) Transform
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_HSIT_H__
+#define __VSP1_HSIT_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define HSIT_PAD_SINK 0
+#define HSIT_PAD_SOURCE 1
+
+struct vsp1_hsit {
+ struct vsp1_entity entity;
+ bool inverse;
+};
+
+static inline struct vsp1_hsit *to_hsit(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_hsit, entity.subdev);
+}
+
+struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse);
+
+#endif /* __VSP1_HSIT_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_lif.c b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
new file mode 100644
index 0000000000..0ab2e0c704
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_lif.c -- R-Car VSP1 LCD Controller Interface
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_lif.h"
+
+#define LIF_MIN_SIZE 2U
+#define LIF_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_lif_write(struct vsp1_lif *lif,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + lif->entity.index * VI6_LIF_OFFSET,
+ data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const unsigned int lif_codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+};
+
+static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, lif_codes,
+ ARRAY_SIZE(lif_codes));
+}
+
+static int lif_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ LIF_MIN_SIZE,
+ LIF_MIN_SIZE, LIF_MAX_SIZE,
+ LIF_MAX_SIZE);
+}
+
+static int lif_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, lif_codes,
+ ARRAY_SIZE(lif_codes),
+ LIF_MIN_SIZE, LIF_MIN_SIZE,
+ LIF_MAX_SIZE, LIF_MAX_SIZE);
+}
+
+static const struct v4l2_subdev_pad_ops lif_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = lif_enum_mbus_code,
+ .enum_frame_size = lif_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = lif_set_format,
+};
+
+static const struct v4l2_subdev_ops lif_ops = {
+ .pad = &lif_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void lif_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_lif *lif = to_lif(&entity->subdev);
+ unsigned int hbth;
+ unsigned int obth;
+ unsigned int lbth;
+
+ format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
+ LIF_PAD_SOURCE);
+
+ switch (entity->vsp1->version & VI6_IP_VERSION_MODEL_MASK) {
+ case VI6_IP_VERSION_MODEL_VSPD_GEN2:
+ case VI6_IP_VERSION_MODEL_VSPD_V2H:
+ hbth = 1536;
+ obth = min(128U, (format->width + 1) / 2 * format->height - 4);
+ lbth = 1520;
+ break;
+
+ case VI6_IP_VERSION_MODEL_VSPDL_GEN3:
+ case VI6_IP_VERSION_MODEL_VSPD_V3:
+ case VI6_IP_VERSION_MODEL_VSPD_RZG2L:
+ hbth = 0;
+ obth = 1500;
+ lbth = 0;
+ break;
+
+ case VI6_IP_VERSION_MODEL_VSPD_GEN3:
+ case VI6_IP_VERSION_MODEL_VSPD_GEN4:
+ default:
+ hbth = 0;
+ obth = 3000;
+ lbth = 0;
+ break;
+ }
+
+ vsp1_lif_write(lif, dlb, VI6_LIF_CSBTH,
+ (hbth << VI6_LIF_CSBTH_HBTH_SHIFT) |
+ (lbth << VI6_LIF_CSBTH_LBTH_SHIFT));
+
+ vsp1_lif_write(lif, dlb, VI6_LIF_CTRL,
+ (obth << VI6_LIF_CTRL_OBTH_SHIFT) |
+ (format->code == 0 ? VI6_LIF_CTRL_CFMT : 0) |
+ VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN);
+
+ /*
+ * On R-Car V3M and RZ/G2L the LIF0 buffer attribute register has to be
+ * set to a non-default value to guarantee proper operation (otherwise
+ * artifacts may appear on the output). The value required by the
+ * manual is not explained but is likely a buffer size or threshold.
+ */
+ if (vsp1_feature(entity->vsp1, VSP1_HAS_NON_ZERO_LBA))
+ vsp1_lif_write(lif, dlb, VI6_LIF_LBA,
+ VI6_LIF_LBA_LBA0 |
+ (1536 << VI6_LIF_LBA_LBA1_SHIFT));
+}
+
+static const struct vsp1_entity_operations lif_entity_ops = {
+ .configure_stream = lif_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_lif *lif;
+ int ret;
+
+ lif = devm_kzalloc(vsp1->dev, sizeof(*lif), GFP_KERNEL);
+ if (lif == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ lif->entity.ops = &lif_entity_ops;
+ lif->entity.type = VSP1_ENTITY_LIF;
+ lif->entity.index = index;
+
+ /*
+ * The LIF is never exposed to userspace, but media entity registration
+ * requires a function to be set. Use PROC_VIDEO_PIXEL_FORMATTER just to
+ * avoid triggering a WARN_ON(), the value won't be seen anywhere.
+ */
+ ret = vsp1_entity_init(vsp1, &lif->entity, "lif", 2, &lif_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return lif;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_lif.h b/drivers/media/platform/renesas/vsp1/vsp1_lif.h
new file mode 100644
index 0000000000..71a4eda9c2
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lif.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_lif.h -- R-Car VSP1 LCD Controller Interface
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_LIF_H__
+#define __VSP1_LIF_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define LIF_PAD_SINK 0
+#define LIF_PAD_SOURCE 1
+
+struct vsp1_lif {
+ struct vsp1_entity entity;
+};
+
+static inline struct vsp1_lif *to_lif(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_lif, entity.subdev);
+}
+
+struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index);
+
+#endif /* __VSP1_LIF_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_lut.c b/drivers/media/platform/renesas/vsp1/vsp1_lut.c
new file mode 100644
index 0000000000..ac6802a325
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lut.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_lut.c -- R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_lut.h"
+
+#define LUT_MIN_SIZE 4U
+#define LUT_MAX_SIZE 8190U
+
+#define LUT_SIZE 256
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_lut_write(struct vsp1_lut *lut,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_LUT_TABLE (V4L2_CID_USER_BASE | 0x1001)
+
+static int lut_set_table(struct vsp1_lut *lut, struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_dl_body *dlb;
+ unsigned int i;
+
+ dlb = vsp1_dl_body_get(lut->pool);
+ if (!dlb)
+ return -ENOMEM;
+
+ for (i = 0; i < LUT_SIZE; ++i)
+ vsp1_dl_body_write(dlb, VI6_LUT_TABLE + 4 * i,
+ ctrl->p_new.p_u32[i]);
+
+ spin_lock_irq(&lut->lock);
+ swap(lut->lut, dlb);
+ spin_unlock_irq(&lut->lock);
+
+ vsp1_dl_body_put(dlb);
+ return 0;
+}
+
+static int lut_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_lut *lut =
+ container_of(ctrl->handler, struct vsp1_lut, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_LUT_TABLE:
+ lut_set_table(lut, ctrl);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops lut_ctrl_ops = {
+ .s_ctrl = lut_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config lut_table_control = {
+ .ops = &lut_ctrl_ops,
+ .id = V4L2_CID_VSP1_LUT_TABLE,
+ .name = "Look-Up Table",
+ .type = V4L2_CTRL_TYPE_U32,
+ .min = 0x00000000,
+ .max = 0x00ffffff,
+ .step = 1,
+ .def = 0,
+ .dims = { LUT_SIZE },
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static const unsigned int lut_codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+};
+
+static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, lut_codes,
+ ARRAY_SIZE(lut_codes));
+}
+
+static int lut_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ LUT_MIN_SIZE,
+ LUT_MIN_SIZE, LUT_MAX_SIZE,
+ LUT_MAX_SIZE);
+}
+
+static int lut_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, lut_codes,
+ ARRAY_SIZE(lut_codes),
+ LUT_MIN_SIZE, LUT_MIN_SIZE,
+ LUT_MAX_SIZE, LUT_MAX_SIZE);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_pad_ops lut_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = lut_enum_mbus_code,
+ .enum_frame_size = lut_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = lut_set_format,
+};
+
+static const struct v4l2_subdev_ops lut_ops = {
+ .pad = &lut_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void lut_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_lut *lut = to_lut(&entity->subdev);
+
+ vsp1_lut_write(lut, dlb, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
+}
+
+static void lut_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_lut *lut = to_lut(&entity->subdev);
+ struct vsp1_dl_body *lut_dlb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lut->lock, flags);
+ lut_dlb = lut->lut;
+ lut->lut = NULL;
+ spin_unlock_irqrestore(&lut->lock, flags);
+
+ if (lut_dlb) {
+ vsp1_dl_list_add_body(dl, lut_dlb);
+
+ /* Release our local reference. */
+ vsp1_dl_body_put(lut_dlb);
+ }
+}
+
+static void lut_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_lut *lut = to_lut(&entity->subdev);
+
+ vsp1_dl_body_pool_destroy(lut->pool);
+}
+
+static const struct vsp1_entity_operations lut_entity_ops = {
+ .configure_stream = lut_configure_stream,
+ .configure_frame = lut_configure_frame,
+ .destroy = lut_destroy,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_lut *lut;
+ int ret;
+
+ lut = devm_kzalloc(vsp1->dev, sizeof(*lut), GFP_KERNEL);
+ if (lut == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&lut->lock);
+
+ lut->entity.ops = &lut_entity_ops;
+ lut->entity.type = VSP1_ENTITY_LUT;
+
+ ret = vsp1_entity_init(vsp1, &lut->entity, "lut", 2, &lut_ops,
+ MEDIA_ENT_F_PROC_VIDEO_LUT);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /*
+ * Pre-allocate a body pool, with 3 bodies allowing a userspace update
+ * before the hardware has committed a previous set of tables, handling
+ * both the queued and pending dl entries.
+ */
+ lut->pool = vsp1_dl_body_pool_create(vsp1, 3, LUT_SIZE, 0);
+ if (!lut->pool)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&lut->ctrls, 1);
+ v4l2_ctrl_new_custom(&lut->ctrls, &lut_table_control, NULL);
+
+ lut->entity.subdev.ctrl_handler = &lut->ctrls;
+
+ if (lut->ctrls.error) {
+ dev_err(vsp1->dev, "lut: failed to initialize controls\n");
+ ret = lut->ctrls.error;
+ vsp1_entity_destroy(&lut->entity);
+ return ERR_PTR(ret);
+ }
+
+ v4l2_ctrl_handler_setup(&lut->ctrls);
+
+ return lut;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_lut.h b/drivers/media/platform/renesas/vsp1/vsp1_lut.h
new file mode 100644
index 0000000000..8cb0df1b7e
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lut.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_lut.h -- R-Car VSP1 Look-Up Table
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_LUT_H__
+#define __VSP1_LUT_H__
+
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define LUT_PAD_SINK 0
+#define LUT_PAD_SOURCE 1
+
+struct vsp1_lut {
+ struct vsp1_entity entity;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ spinlock_t lock;
+ struct vsp1_dl_body *lut;
+ struct vsp1_dl_body_pool *pool;
+};
+
+static inline struct vsp1_lut *to_lut(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_lut, entity.subdev);
+}
+
+struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_LUT_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
new file mode 100644
index 0000000000..f8093ba953
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_pipe.c -- R-Car VSP1 Pipeline
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_dl.h"
+#include "vsp1_entity.h"
+#include "vsp1_hgo.h"
+#include "vsp1_hgt.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_uds.h"
+
+/* -----------------------------------------------------------------------------
+ * Helper Functions
+ */
+
+static const struct vsp1_format_info vsp1_video_formats[] = {
+ { V4L2_PIX_FMT_RGB332, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 8, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBA444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_RGBX444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBX_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ABGR444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ABGR_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XBGR444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ABGR_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_BGRA444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGRA_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_BGRX444, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGRA_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XRGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBA555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_RGBX555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBX_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ABGR555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ABGR_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XBGR555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ABGR_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_BGRA555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGRA_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_BGRX555, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGRA_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_BGR24, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGB24, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ABGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_BGRA32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_BGRX32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBA32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_RGBX32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, true },
+ { V4L2_PIX_FMT_XRGB32, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBX1010102, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB10_RGB10A2_A2RGB10,
+ VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBA1010102, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB10_RGB10A2_A2RGB10,
+ VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB2101010, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB10_RGB10A2_A2RGB10,
+ VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, true, 2, 1, false },
+ { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, true, false, 2, 1, false },
+ { V4L2_PIX_FMT_YVYU, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, true, true, 2, 1, false },
+ { V4L2_PIX_FMT_NV12M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, false, 2, 2, false },
+ { V4L2_PIX_FMT_NV21M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, true, 2, 2, false },
+ { V4L2_PIX_FMT_NV16M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_NV61M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, true, 2, 1, false },
+ { V4L2_PIX_FMT_YUV420M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 2, 2, false },
+ { V4L2_PIX_FMT_YVU420M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, true, 2, 2, false },
+ { V4L2_PIX_FMT_YUV422M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_YVU422M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, true, 2, 1, false },
+ { V4L2_PIX_FMT_YUV444M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_YVU444M, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, true, 1, 1, false },
+ { V4L2_PIX_FMT_Y210, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_Y212, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 2, 1, false },
+};
+
+/**
+ * vsp1_get_format_info - Retrieve format information for a 4CC
+ * @vsp1: the VSP1 device
+ * @fourcc: the format 4CC
+ *
+ * Return a pointer to the format information structure corresponding to the
+ * given V4L2 format 4CC, or NULL if no corresponding format can be found.
+ */
+const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
+ u32 fourcc)
+{
+ unsigned int i;
+
+ /* Special case, the VYUY and HSV formats are supported on Gen2 only. */
+ if (vsp1->info->gen != 2) {
+ switch (fourcc) {
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_HSV24:
+ case V4L2_PIX_FMT_HSV32:
+ return NULL;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
+ const struct vsp1_format_info *info = &vsp1_video_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Management
+ */
+
+void vsp1_pipeline_reset(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_entity *entity;
+ unsigned int i;
+
+ if (pipe->brx) {
+ struct vsp1_brx *brx = to_brx(&pipe->brx->subdev);
+
+ for (i = 0; i < ARRAY_SIZE(brx->inputs); ++i)
+ brx->inputs[i].rpf = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i)
+ pipe->inputs[i] = NULL;
+
+ pipe->output = NULL;
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe)
+ entity->pipe = NULL;
+
+ INIT_LIST_HEAD(&pipe->entities);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+ pipe->buffers_ready = 0;
+ pipe->num_inputs = 0;
+ pipe->brx = NULL;
+ pipe->hgo = NULL;
+ pipe->hgt = NULL;
+ pipe->lif = NULL;
+ pipe->uds = NULL;
+}
+
+void vsp1_pipeline_init(struct vsp1_pipeline *pipe)
+{
+ mutex_init(&pipe->lock);
+ spin_lock_init(&pipe->irqlock);
+ init_waitqueue_head(&pipe->wq);
+ kref_init(&pipe->kref);
+
+ INIT_LIST_HEAD(&pipe->entities);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+}
+
+/* Must be called with the pipe irqlock held. */
+void vsp1_pipeline_run(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+
+ if (pipe->state == VSP1_PIPELINE_STOPPED) {
+ vsp1_write(vsp1, VI6_CMD(pipe->output->entity.index),
+ VI6_CMD_STRCMD);
+ pipe->state = VSP1_PIPELINE_RUNNING;
+ }
+
+ pipe->buffers_ready = 0;
+}
+
+bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
+{
+ unsigned long flags;
+ bool stopped;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ stopped = pipe->state == VSP1_PIPELINE_STOPPED;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ return stopped;
+}
+
+int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ struct vsp1_entity *entity;
+ unsigned long flags;
+ int ret;
+
+ if (pipe->lif) {
+ /*
+ * When using display lists in continuous frame mode the only
+ * way to stop the pipeline is to reset the hardware.
+ */
+ ret = vsp1_reset_wpf(vsp1, pipe->output->entity.index);
+ if (ret == 0) {
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+ }
+ } else {
+ /* Otherwise just request a stop and wait. */
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (pipe->state == VSP1_PIPELINE_RUNNING)
+ pipe->state = VSP1_PIPELINE_STOPPING;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
+ msecs_to_jiffies(500));
+ ret = ret == 0 ? -ETIMEDOUT : 0;
+ }
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ if (entity->route && entity->route->reg)
+ vsp1_write(vsp1, entity->route->reg,
+ VI6_DPR_NODE_UNUSED);
+ }
+
+ if (pipe->hgo)
+ vsp1_write(vsp1, VI6_DPR_HGO_SMPPT,
+ (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
+ if (pipe->hgt)
+ vsp1_write(vsp1, VI6_DPR_HGT_SMPPT,
+ (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
+ v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0);
+
+ return ret;
+}
+
+bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
+{
+ unsigned int mask;
+
+ mask = ((1 << pipe->num_inputs) - 1) << 1;
+ if (!pipe->lif)
+ mask |= 1 << 0;
+
+ return pipe->buffers_ready == mask;
+}
+
+void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
+{
+ unsigned int flags;
+
+ if (pipe == NULL)
+ return;
+
+ /*
+ * If the DL commit raced with the frame end interrupt, the commit ends
+ * up being postponed by one frame. The returned flags tell whether the
+ * active frame was finished or postponed.
+ */
+ flags = vsp1_dlm_irq_frame_end(pipe->output->dlm);
+
+ if (pipe->hgo)
+ vsp1_hgo_frame_end(pipe->hgo);
+
+ if (pipe->hgt)
+ vsp1_hgt_frame_end(pipe->hgt);
+
+ /*
+ * Regardless of frame completion we still need to notify the pipe
+ * frame_end to account for vblank events.
+ */
+ if (pipe->frame_end)
+ pipe->frame_end(pipe, flags);
+
+ pipe->sequence++;
+}
+
+/*
+ * Propagate the alpha value through the pipeline.
+ *
+ * As the UDS has restricted scaling capabilities when the alpha component needs
+ * to be scaled, we disable alpha scaling when the UDS input has a fixed alpha
+ * value. The UDS then outputs a fixed alpha value which needs to be programmed
+ * from the input RPF alpha.
+ */
+void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb, unsigned int alpha)
+{
+ if (!pipe->uds)
+ return;
+
+ /*
+ * The BRU and BRS background color has a fixed alpha value set to 255,
+ * the output alpha value is thus always equal to 255.
+ */
+ if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
+ pipe->uds_input->type == VSP1_ENTITY_BRS)
+ alpha = 255;
+
+ vsp1_uds_set_alpha(pipe->uds, dlb, alpha);
+}
+
+/*
+ * Propagate the partition calculations through the pipeline
+ *
+ * Work backwards through the pipe, allowing each entity to update the partition
+ * parameters based on its configuration, and the entity connected to its
+ * source. Each entity must produce the partition required for the previous
+ * entity in the pipeline.
+ */
+void vsp1_pipeline_propagate_partition(struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int index,
+ struct vsp1_partition_window *window)
+{
+ struct vsp1_entity *entity;
+
+ list_for_each_entry_reverse(entity, &pipe->entities, list_pipe) {
+ if (entity->ops->partition)
+ entity->ops->partition(entity, pipe, partition, index,
+ window);
+ }
+}
+
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.h b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
new file mode 100644
index 0000000000..674b5748d9
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_pipe.h -- R-Car VSP1 Pipeline
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_PIPE_H__
+#define __VSP1_PIPE_H__
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+
+struct vsp1_dl_list;
+struct vsp1_rwpf;
+
+/*
+ * struct vsp1_format_info - VSP1 video format description
+ * @fourcc: V4L2 pixel format FCC identifier
+ * @mbus: media bus format code
+ * @hwfmt: VSP1 hardware format
+ * @swap: swap register control
+ * @planes: number of planes
+ * @bpp: bits per pixel
+ * @swap_yc: the Y and C components are swapped (Y comes before C)
+ * @swap_uv: the U and V components are swapped (V comes before U)
+ * @hsub: horizontal subsampling factor
+ * @vsub: vertical subsampling factor
+ * @alpha: has an alpha channel
+ */
+struct vsp1_format_info {
+ u32 fourcc;
+ unsigned int mbus;
+ unsigned int hwfmt;
+ unsigned int swap;
+ unsigned int planes;
+ unsigned int bpp[3];
+ bool swap_yc;
+ bool swap_uv;
+ unsigned int hsub;
+ unsigned int vsub;
+ bool alpha;
+};
+
+enum vsp1_pipeline_state {
+ VSP1_PIPELINE_STOPPED,
+ VSP1_PIPELINE_RUNNING,
+ VSP1_PIPELINE_STOPPING,
+};
+
+/*
+ * struct vsp1_partition_window - Partition window coordinates
+ * @left: horizontal coordinate of the partition start in pixels relative to the
+ * left edge of the image
+ * @width: partition width in pixels
+ */
+struct vsp1_partition_window {
+ unsigned int left;
+ unsigned int width;
+};
+
+/*
+ * struct vsp1_partition - A description of a slice for the partition algorithm
+ * @rpf: The RPF partition window configuration
+ * @uds_sink: The UDS input partition window configuration
+ * @uds_source: The UDS output partition window configuration
+ * @sru: The SRU partition window configuration
+ * @wpf: The WPF partition window configuration
+ */
+struct vsp1_partition {
+ struct vsp1_partition_window rpf;
+ struct vsp1_partition_window uds_sink;
+ struct vsp1_partition_window uds_source;
+ struct vsp1_partition_window sru;
+ struct vsp1_partition_window wpf;
+};
+
+/*
+ * struct vsp1_pipeline - A VSP1 hardware pipeline
+ * @pipe: the media pipeline
+ * @irqlock: protects the pipeline state
+ * @state: current state
+ * @wq: wait queue to wait for state change completion
+ * @frame_end: frame end interrupt handler
+ * @lock: protects the pipeline use count and stream count
+ * @kref: pipeline reference count
+ * @stream_count: number of streaming video nodes
+ * @buffers_ready: bitmask of RPFs and WPFs with at least one buffer available
+ * @sequence: frame sequence number
+ * @num_inputs: number of RPFs
+ * @inputs: array of RPFs in the pipeline (indexed by RPF index)
+ * @output: WPF at the output of the pipeline
+ * @brx: BRx entity, if present
+ * @hgo: HGO entity, if present
+ * @hgt: HGT entity, if present
+ * @lif: LIF entity, if present
+ * @uds: UDS entity, if present
+ * @uds_input: entity at the input of the UDS, if the UDS is present
+ * @entities: list of entities in the pipeline
+ * @stream_config: cached stream configuration for video pipelines
+ * @configured: when false the @stream_config shall be written to the hardware
+ * @interlaced: True when the pipeline is configured in interlaced mode
+ * @partitions: The number of partitions used to process one frame
+ * @partition: The current partition for configuration to process
+ * @part_table: The pre-calculated partitions used by the pipeline
+ */
+struct vsp1_pipeline {
+ struct media_pipeline pipe;
+
+ spinlock_t irqlock;
+ enum vsp1_pipeline_state state;
+ wait_queue_head_t wq;
+
+ void (*frame_end)(struct vsp1_pipeline *pipe, unsigned int completion);
+
+ struct mutex lock;
+ struct kref kref;
+ unsigned int stream_count;
+ unsigned int buffers_ready;
+ unsigned int sequence;
+
+ unsigned int num_inputs;
+ struct vsp1_rwpf *inputs[VSP1_MAX_RPF];
+ struct vsp1_rwpf *output;
+ struct vsp1_entity *brx;
+ struct vsp1_entity *hgo;
+ struct vsp1_entity *hgt;
+ struct vsp1_entity *lif;
+ struct vsp1_entity *uds;
+ struct vsp1_entity *uds_input;
+
+ /*
+ * The order of this list must be identical to the order of the entities
+ * in the pipeline, as it is assumed by the partition algorithm that we
+ * can walk this list in sequence.
+ */
+ struct list_head entities;
+
+ struct vsp1_dl_body *stream_config;
+ bool configured;
+ bool interlaced;
+
+ unsigned int partitions;
+ struct vsp1_partition *partition;
+ struct vsp1_partition *part_table;
+
+ u32 underrun_count;
+};
+
+void vsp1_pipeline_reset(struct vsp1_pipeline *pipe);
+void vsp1_pipeline_init(struct vsp1_pipeline *pipe);
+
+void vsp1_pipeline_run(struct vsp1_pipeline *pipe);
+bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe);
+int vsp1_pipeline_stop(struct vsp1_pipeline *pipe);
+bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe);
+
+void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe);
+
+void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
+ struct vsp1_dl_body *dlb,
+ unsigned int alpha);
+
+void vsp1_pipeline_propagate_partition(struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int index,
+ struct vsp1_partition_window *window);
+
+const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1,
+ u32 fourcc);
+
+#endif /* __VSP1_PIPE_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_regs.h b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
new file mode 100644
index 0000000000..7eca82e0ba
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
@@ -0,0 +1,886 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_regs.h -- R-Car VSP1 Registers Definitions
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#ifndef __VSP1_REGS_H__
+#define __VSP1_REGS_H__
+
+/* -----------------------------------------------------------------------------
+ * General Control Registers
+ */
+
+#define VI6_CMD(n) (0x0000 + (n) * 4)
+#define VI6_CMD_UPDHDR BIT(4)
+#define VI6_CMD_STRCMD BIT(0)
+
+#define VI6_CLK_DCSWT 0x0018
+#define VI6_CLK_DCSWT_CSTPW_MASK (0xff << 8)
+#define VI6_CLK_DCSWT_CSTPW_SHIFT 8
+#define VI6_CLK_DCSWT_CSTRW_MASK (0xff << 0)
+#define VI6_CLK_DCSWT_CSTRW_SHIFT 0
+
+#define VI6_SRESET 0x0028
+#define VI6_SRESET_SRTS(n) BIT(n)
+
+#define VI6_STATUS 0x0038
+#define VI6_STATUS_FLD_STD(n) BIT((n) + 28)
+#define VI6_STATUS_SYS_ACT(n) BIT((n) + 8)
+
+#define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12)
+#define VI6_WPF_IRQ_ENB_UNDE BIT(16)
+#define VI6_WPF_IRQ_ENB_DFEE BIT(1)
+#define VI6_WPF_IRQ_ENB_FREE BIT(0)
+
+#define VI6_WPF_IRQ_STA(n) (0x004c + (n) * 12)
+#define VI6_WPF_IRQ_STA_UND BIT(16)
+#define VI6_WPF_IRQ_STA_DFE BIT(1)
+#define VI6_WPF_IRQ_STA_FRE BIT(0)
+
+#define VI6_DISP_IRQ_ENB(n) (0x0078 + (n) * 60)
+#define VI6_DISP_IRQ_ENB_DSTE BIT(8)
+#define VI6_DISP_IRQ_ENB_MAEE BIT(5)
+#define VI6_DISP_IRQ_ENB_LNEE(n) BIT(n)
+
+#define VI6_DISP_IRQ_STA(n) (0x007c + (n) * 60)
+#define VI6_DISP_IRQ_STA_DST BIT(8)
+#define VI6_DISP_IRQ_STA_MAE BIT(5)
+#define VI6_DISP_IRQ_STA_LNE(n) BIT(n)
+
+#define VI6_WPF_LINE_COUNT(n) (0x0084 + (n) * 4)
+#define VI6_WPF_LINE_COUNT_MASK (0x1fffff << 0)
+
+/* -----------------------------------------------------------------------------
+ * Display List Control Registers
+ */
+
+#define VI6_DL_CTRL 0x0100
+#define VI6_DL_CTRL_AR_WAIT_MASK (0xffff << 16)
+#define VI6_DL_CTRL_AR_WAIT_SHIFT 16
+#define VI6_DL_CTRL_DC2 BIT(12)
+#define VI6_DL_CTRL_DC1 BIT(8)
+#define VI6_DL_CTRL_DC0 BIT(4)
+#define VI6_DL_CTRL_CFM0 BIT(2)
+#define VI6_DL_CTRL_NH0 BIT(1)
+#define VI6_DL_CTRL_DLE BIT(0)
+
+#define VI6_DL_HDR_ADDR(n) (0x0104 + (n) * 4)
+
+#define VI6_DL_SWAP 0x0114
+#define VI6_DL_SWAP_LWS BIT(2)
+#define VI6_DL_SWAP_WDS BIT(1)
+#define VI6_DL_SWAP_BTS BIT(0)
+
+#define VI6_DL_EXT_CTRL(n) (0x011c + (n) * 36)
+#define VI6_DL_EXT_CTRL_NWE BIT(16)
+#define VI6_DL_EXT_CTRL_POLINT_MASK (0x3f << 8)
+#define VI6_DL_EXT_CTRL_POLINT_SHIFT 8
+#define VI6_DL_EXT_CTRL_DLPRI BIT(5)
+#define VI6_DL_EXT_CTRL_EXPRI BIT(4)
+#define VI6_DL_EXT_CTRL_EXT BIT(0)
+
+#define VI6_DL_EXT_AUTOFLD_INT BIT(0)
+
+#define VI6_DL_BODY_SIZE 0x0120
+#define VI6_DL_BODY_SIZE_UPD BIT(24)
+#define VI6_DL_BODY_SIZE_BS_MASK (0x1ffff << 0)
+#define VI6_DL_BODY_SIZE_BS_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * RPF Control Registers
+ */
+
+#define VI6_RPF_OFFSET 0x100
+
+#define VI6_RPF_SRC_BSIZE 0x0300
+#define VI6_RPF_SRC_BSIZE_BHSIZE_MASK (0x1fff << 16)
+#define VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT 16
+#define VI6_RPF_SRC_BSIZE_BVSIZE_MASK (0x1fff << 0)
+#define VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT 0
+
+#define VI6_RPF_SRC_ESIZE 0x0304
+#define VI6_RPF_SRC_ESIZE_EHSIZE_MASK (0x1fff << 16)
+#define VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT 16
+#define VI6_RPF_SRC_ESIZE_EVSIZE_MASK (0x1fff << 0)
+#define VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT 0
+
+#define VI6_RPF_INFMT 0x0308
+#define VI6_RPF_INFMT_VIR BIT(28)
+#define VI6_RPF_INFMT_CIPM BIT(16)
+#define VI6_RPF_INFMT_SPYCS BIT(15)
+#define VI6_RPF_INFMT_SPUVS BIT(14)
+#define VI6_RPF_INFMT_CEXT_ZERO (0 << 12)
+#define VI6_RPF_INFMT_CEXT_EXT (1 << 12)
+#define VI6_RPF_INFMT_CEXT_ONE (2 << 12)
+#define VI6_RPF_INFMT_CEXT_MASK (3 << 12)
+#define VI6_RPF_INFMT_RDTM_BT601 (0 << 9)
+#define VI6_RPF_INFMT_RDTM_BT601_EXT (1 << 9)
+#define VI6_RPF_INFMT_RDTM_BT709 (2 << 9)
+#define VI6_RPF_INFMT_RDTM_BT709_EXT (3 << 9)
+#define VI6_RPF_INFMT_RDTM_MASK (7 << 9)
+#define VI6_RPF_INFMT_CSC BIT(8)
+#define VI6_RPF_INFMT_RDFMT_MASK (0x7f << 0)
+#define VI6_RPF_INFMT_RDFMT_SHIFT 0
+
+#define VI6_RPF_DSWAP 0x030c
+#define VI6_RPF_DSWAP_A_LLS BIT(11)
+#define VI6_RPF_DSWAP_A_LWS BIT(10)
+#define VI6_RPF_DSWAP_A_WDS BIT(9)
+#define VI6_RPF_DSWAP_A_BTS BIT(8)
+#define VI6_RPF_DSWAP_P_LLS BIT(3)
+#define VI6_RPF_DSWAP_P_LWS BIT(2)
+#define VI6_RPF_DSWAP_P_WDS BIT(1)
+#define VI6_RPF_DSWAP_P_BTS BIT(0)
+
+#define VI6_RPF_LOC 0x0310
+#define VI6_RPF_LOC_HCOORD_MASK (0x1fff << 16)
+#define VI6_RPF_LOC_HCOORD_SHIFT 16
+#define VI6_RPF_LOC_VCOORD_MASK (0x1fff << 0)
+#define VI6_RPF_LOC_VCOORD_SHIFT 0
+
+#define VI6_RPF_ALPH_SEL 0x0314
+#define VI6_RPF_ALPH_SEL_ASEL_PACKED (0 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_8B_PLANE (1 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_SELECT (2 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_1B_PLANE (3 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_FIXED (4 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_MASK (7 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_SHIFT 28
+#define VI6_RPF_ALPH_SEL_IROP_MASK (0xf << 24)
+#define VI6_RPF_ALPH_SEL_IROP_SHIFT 24
+#define VI6_RPF_ALPH_SEL_BSEL BIT(23)
+#define VI6_RPF_ALPH_SEL_AEXT_ZERO (0 << 18)
+#define VI6_RPF_ALPH_SEL_AEXT_EXT (1 << 18)
+#define VI6_RPF_ALPH_SEL_AEXT_ONE (2 << 18)
+#define VI6_RPF_ALPH_SEL_AEXT_MASK (3 << 18)
+#define VI6_RPF_ALPH_SEL_ALPHA1_MASK (0xff << 8)
+#define VI6_RPF_ALPH_SEL_ALPHA1_SHIFT 8
+#define VI6_RPF_ALPH_SEL_ALPHA0_MASK (0xff << 0)
+#define VI6_RPF_ALPH_SEL_ALPHA0_SHIFT 0
+
+#define VI6_RPF_VRTCOL_SET 0x0318
+#define VI6_RPF_VRTCOL_SET_LAYA_MASK (0xff << 24)
+#define VI6_RPF_VRTCOL_SET_LAYA_SHIFT 24
+#define VI6_RPF_VRTCOL_SET_LAYR_MASK (0xff << 16)
+#define VI6_RPF_VRTCOL_SET_LAYR_SHIFT 16
+#define VI6_RPF_VRTCOL_SET_LAYG_MASK (0xff << 8)
+#define VI6_RPF_VRTCOL_SET_LAYG_SHIFT 8
+#define VI6_RPF_VRTCOL_SET_LAYB_MASK (0xff << 0)
+#define VI6_RPF_VRTCOL_SET_LAYB_SHIFT 0
+
+#define VI6_RPF_MSK_CTRL 0x031c
+#define VI6_RPF_MSK_CTRL_MSK_EN BIT(24)
+#define VI6_RPF_MSK_CTRL_MGR_MASK (0xff << 16)
+#define VI6_RPF_MSK_CTRL_MGR_SHIFT 16
+#define VI6_RPF_MSK_CTRL_MGG_MASK (0xff << 8)
+#define VI6_RPF_MSK_CTRL_MGG_SHIFT 8
+#define VI6_RPF_MSK_CTRL_MGB_MASK (0xff << 0)
+#define VI6_RPF_MSK_CTRL_MGB_SHIFT 0
+
+#define VI6_RPF_MSK_SET0 0x0320
+#define VI6_RPF_MSK_SET1 0x0324
+#define VI6_RPF_MSK_SET_MSA_MASK (0xff << 24)
+#define VI6_RPF_MSK_SET_MSA_SHIFT 24
+#define VI6_RPF_MSK_SET_MSR_MASK (0xff << 16)
+#define VI6_RPF_MSK_SET_MSR_SHIFT 16
+#define VI6_RPF_MSK_SET_MSG_MASK (0xff << 8)
+#define VI6_RPF_MSK_SET_MSG_SHIFT 8
+#define VI6_RPF_MSK_SET_MSB_MASK (0xff << 0)
+#define VI6_RPF_MSK_SET_MSB_SHIFT 0
+
+#define VI6_RPF_CKEY_CTRL 0x0328
+#define VI6_RPF_CKEY_CTRL_CV BIT(4)
+#define VI6_RPF_CKEY_CTRL_SAPE1 BIT(1)
+#define VI6_RPF_CKEY_CTRL_SAPE0 BIT(0)
+
+#define VI6_RPF_CKEY_SET0 0x032c
+#define VI6_RPF_CKEY_SET1 0x0330
+#define VI6_RPF_CKEY_SET_AP_MASK (0xff << 24)
+#define VI6_RPF_CKEY_SET_AP_SHIFT 24
+#define VI6_RPF_CKEY_SET_R_MASK (0xff << 16)
+#define VI6_RPF_CKEY_SET_R_SHIFT 16
+#define VI6_RPF_CKEY_SET_GY_MASK (0xff << 8)
+#define VI6_RPF_CKEY_SET_GY_SHIFT 8
+#define VI6_RPF_CKEY_SET_B_MASK (0xff << 0)
+#define VI6_RPF_CKEY_SET_B_SHIFT 0
+
+#define VI6_RPF_SRCM_PSTRIDE 0x0334
+#define VI6_RPF_SRCM_PSTRIDE_Y_SHIFT 16
+#define VI6_RPF_SRCM_PSTRIDE_C_SHIFT 0
+
+#define VI6_RPF_SRCM_ASTRIDE 0x0338
+#define VI6_RPF_SRCM_PSTRIDE_A_SHIFT 0
+
+#define VI6_RPF_SRCM_ADDR_Y 0x033c
+#define VI6_RPF_SRCM_ADDR_C0 0x0340
+#define VI6_RPF_SRCM_ADDR_C1 0x0344
+#define VI6_RPF_SRCM_ADDR_AI 0x0348
+
+#define VI6_RPF_MULT_ALPHA 0x036c
+#define VI6_RPF_MULT_ALPHA_A_MMD_NONE (0 << 12)
+#define VI6_RPF_MULT_ALPHA_A_MMD_RATIO (1 << 12)
+#define VI6_RPF_MULT_ALPHA_P_MMD_NONE (0 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_RATIO (1 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_IMAGE (2 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_BOTH (3 << 8)
+#define VI6_RPF_MULT_ALPHA_RATIO_MASK (0xff << 0)
+#define VI6_RPF_MULT_ALPHA_RATIO_SHIFT 0
+
+#define VI6_RPF_EXT_INFMT0 0x0370
+#define VI6_RPF_EXT_INFMT0_F2B BIT(12)
+#define VI6_RPF_EXT_INFMT0_IPBD_Y_8 (0 << 8)
+#define VI6_RPF_EXT_INFMT0_IPBD_Y_10 (1 << 8)
+#define VI6_RPF_EXT_INFMT0_IPBD_Y_12 (2 << 8)
+#define VI6_RPF_EXT_INFMT0_IPBD_C_8 (0 << 4)
+#define VI6_RPF_EXT_INFMT0_IPBD_C_10 (1 << 4)
+#define VI6_RPF_EXT_INFMT0_IPBD_C_12 (2 << 4)
+#define VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10 (3 << 0)
+
+#define VI6_RPF_EXT_INFMT1 0x0374
+#define VI6_RPF_EXT_INFMT1_PACK_CPOS(a, b, c, d) \
+ (((a) << 24) | ((b) << 16) | ((c) << 8) | ((d) << 0))
+
+#define VI6_RPF_EXT_INFMT2 0x0378
+#define VI6_RPF_EXT_INFMT2_PACK_CLEN(a, b, c, d) \
+ (((a) << 24) | ((b) << 16) | ((c) << 8) | ((d) << 0))
+
+#define VI6_RPF_BRDITH_CTRL 0x03e0
+#define VI6_RPF_BRDITH_CTRL_ODE BIT(8)
+#define VI6_RPF_BRDITH_CTRL_CBRM BIT(0)
+
+/* -----------------------------------------------------------------------------
+ * WPF Control Registers
+ */
+
+#define VI6_WPF_OFFSET 0x100
+
+#define VI6_WPF_SRCRPF 0x1000
+#define VI6_WPF_SRCRPF_VIRACT_DIS (0 << 28)
+#define VI6_WPF_SRCRPF_VIRACT_SUB (1 << 28)
+#define VI6_WPF_SRCRPF_VIRACT_MST (2 << 28)
+#define VI6_WPF_SRCRPF_VIRACT_MASK (3 << 28)
+#define VI6_WPF_SRCRPF_VIRACT2_DIS (0 << 24)
+#define VI6_WPF_SRCRPF_VIRACT2_SUB (1 << 24)
+#define VI6_WPF_SRCRPF_VIRACT2_MST (2 << 24)
+#define VI6_WPF_SRCRPF_VIRACT2_MASK (3 << 24)
+#define VI6_WPF_SRCRPF_RPF_ACT_DIS(n) (0 << ((n) * 2))
+#define VI6_WPF_SRCRPF_RPF_ACT_SUB(n) (1 << ((n) * 2))
+#define VI6_WPF_SRCRPF_RPF_ACT_MST(n) (2 << ((n) * 2))
+#define VI6_WPF_SRCRPF_RPF_ACT_MASK(n) (3 << ((n) * 2))
+
+#define VI6_WPF_HSZCLIP 0x1004
+#define VI6_WPF_VSZCLIP 0x1008
+#define VI6_WPF_SZCLIP_EN BIT(28)
+#define VI6_WPF_SZCLIP_OFST_MASK (0xff << 16)
+#define VI6_WPF_SZCLIP_OFST_SHIFT 16
+#define VI6_WPF_SZCLIP_SIZE_MASK (0xfff << 0)
+#define VI6_WPF_SZCLIP_SIZE_SHIFT 0
+
+#define VI6_WPF_OUTFMT 0x100c
+#define VI6_WPF_OUTFMT_PDV_MASK (0xff << 24)
+#define VI6_WPF_OUTFMT_PDV_SHIFT 24
+#define VI6_WPF_OUTFMT_PXA BIT(23)
+#define VI6_WPF_OUTFMT_ROT BIT(18)
+#define VI6_WPF_OUTFMT_HFLP BIT(17)
+#define VI6_WPF_OUTFMT_FLP BIT(16)
+#define VI6_WPF_OUTFMT_SPYCS BIT(15)
+#define VI6_WPF_OUTFMT_SPUVS BIT(14)
+#define VI6_WPF_OUTFMT_DITH_DIS (0 << 12)
+#define VI6_WPF_OUTFMT_DITH_EN (3 << 12)
+#define VI6_WPF_OUTFMT_DITH_MASK (3 << 12)
+#define VI6_WPF_OUTFMT_WRTM_BT601 (0 << 9)
+#define VI6_WPF_OUTFMT_WRTM_BT601_EXT (1 << 9)
+#define VI6_WPF_OUTFMT_WRTM_BT709 (2 << 9)
+#define VI6_WPF_OUTFMT_WRTM_BT709_EXT (3 << 9)
+#define VI6_WPF_OUTFMT_WRTM_MASK (7 << 9)
+#define VI6_WPF_OUTFMT_CSC BIT(8)
+#define VI6_WPF_OUTFMT_WRFMT_MASK (0x7f << 0)
+#define VI6_WPF_OUTFMT_WRFMT_SHIFT 0
+
+#define VI6_WPF_DSWAP 0x1010
+#define VI6_WPF_DSWAP_P_LLS BIT(3)
+#define VI6_WPF_DSWAP_P_LWS BIT(2)
+#define VI6_WPF_DSWAP_P_WDS BIT(1)
+#define VI6_WPF_DSWAP_P_BTS BIT(0)
+
+#define VI6_WPF_RNDCTRL 0x1014
+#define VI6_WPF_RNDCTRL_CBRM BIT(28)
+#define VI6_WPF_RNDCTRL_ABRM_TRUNC (0 << 24)
+#define VI6_WPF_RNDCTRL_ABRM_ROUND (1 << 24)
+#define VI6_WPF_RNDCTRL_ABRM_THRESH (2 << 24)
+#define VI6_WPF_RNDCTRL_ABRM_MASK (3 << 24)
+#define VI6_WPF_RNDCTRL_ATHRESH_MASK (0xff << 16)
+#define VI6_WPF_RNDCTRL_ATHRESH_SHIFT 16
+#define VI6_WPF_RNDCTRL_CLMD_FULL (0 << 12)
+#define VI6_WPF_RNDCTRL_CLMD_CLIP (1 << 12)
+#define VI6_WPF_RNDCTRL_CLMD_EXT (2 << 12)
+#define VI6_WPF_RNDCTRL_CLMD_MASK (3 << 12)
+
+#define VI6_WPF_ROT_CTRL 0x1018
+#define VI6_WPF_ROT_CTRL_LN16 BIT(17)
+#define VI6_WPF_ROT_CTRL_LMEM_WD_MASK (0x1fff << 0)
+#define VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT 0
+
+#define VI6_WPF_DSTM_STRIDE_Y 0x101c
+#define VI6_WPF_DSTM_STRIDE_C 0x1020
+#define VI6_WPF_DSTM_ADDR_Y 0x1024
+#define VI6_WPF_DSTM_ADDR_C0 0x1028
+#define VI6_WPF_DSTM_ADDR_C1 0x102c
+
+#define VI6_WPF_WRBCK_CTRL(n) (0x1034 + (n) * 0x100)
+#define VI6_WPF_WRBCK_CTRL_WBMD BIT(0)
+
+/* -----------------------------------------------------------------------------
+ * UIF Control Registers
+ */
+
+#define VI6_UIF_OFFSET 0x100
+
+#define VI6_UIF_DISCOM_DOCMCR 0x1c00
+#define VI6_UIF_DISCOM_DOCMCR_CMPRU BIT(16)
+#define VI6_UIF_DISCOM_DOCMCR_CMPR BIT(0)
+
+#define VI6_UIF_DISCOM_DOCMSTR 0x1c04
+#define VI6_UIF_DISCOM_DOCMSTR_CMPPRE BIT(1)
+#define VI6_UIF_DISCOM_DOCMSTR_CMPST BIT(0)
+
+#define VI6_UIF_DISCOM_DOCMCLSTR 0x1c08
+#define VI6_UIF_DISCOM_DOCMCLSTR_CMPCLPRE BIT(1)
+#define VI6_UIF_DISCOM_DOCMCLSTR_CMPCLST BIT(0)
+
+#define VI6_UIF_DISCOM_DOCMIENR 0x1c0c
+#define VI6_UIF_DISCOM_DOCMIENR_CMPPREIEN BIT(1)
+#define VI6_UIF_DISCOM_DOCMIENR_CMPIEN BIT(0)
+
+#define VI6_UIF_DISCOM_DOCMMDR 0x1c10
+#define VI6_UIF_DISCOM_DOCMMDR_INTHRH(n) ((n) << 16)
+
+#define VI6_UIF_DISCOM_DOCMPMR 0x1c14
+#define VI6_UIF_DISCOM_DOCMPMR_CMPDFF(n) ((n) << 17)
+#define VI6_UIF_DISCOM_DOCMPMR_CMPDFA(n) ((n) << 8)
+#define VI6_UIF_DISCOM_DOCMPMR_CMPDAUF BIT(7)
+#define VI6_UIF_DISCOM_DOCMPMR_SEL(n) ((n) << 0)
+
+#define VI6_UIF_DISCOM_DOCMECRCR 0x1c18
+#define VI6_UIF_DISCOM_DOCMCCRCR 0x1c1c
+#define VI6_UIF_DISCOM_DOCMSPXR 0x1c20
+#define VI6_UIF_DISCOM_DOCMSPYR 0x1c24
+#define VI6_UIF_DISCOM_DOCMSZXR 0x1c28
+#define VI6_UIF_DISCOM_DOCMSZYR 0x1c2c
+
+/* -----------------------------------------------------------------------------
+ * DPR Control Registers
+ */
+
+#define VI6_DPR_RPF_ROUTE(n) (0x2000 + (n) * 4)
+
+#define VI6_DPR_WPF_FPORCH(n) (0x2014 + (n) * 4)
+#define VI6_DPR_WPF_FPORCH_FP_WPFN (5 << 8)
+
+#define VI6_DPR_SRU_ROUTE 0x2024
+#define VI6_DPR_UDS_ROUTE(n) (0x2028 + (n) * 4)
+#define VI6_DPR_LUT_ROUTE 0x203c
+#define VI6_DPR_CLU_ROUTE 0x2040
+#define VI6_DPR_HST_ROUTE 0x2044
+#define VI6_DPR_HSI_ROUTE 0x2048
+#define VI6_DPR_BRU_ROUTE 0x204c
+#define VI6_DPR_ILV_BRS_ROUTE 0x2050
+#define VI6_DPR_ROUTE_BRSSEL BIT(28)
+#define VI6_DPR_ROUTE_FXA_MASK (0xff << 16)
+#define VI6_DPR_ROUTE_FXA_SHIFT 16
+#define VI6_DPR_ROUTE_FP_MASK (0x3f << 8)
+#define VI6_DPR_ROUTE_FP_SHIFT 8
+#define VI6_DPR_ROUTE_RT_MASK (0x3f << 0)
+#define VI6_DPR_ROUTE_RT_SHIFT 0
+
+#define VI6_DPR_HGO_SMPPT 0x2054
+#define VI6_DPR_HGT_SMPPT 0x2058
+#define VI6_DPR_SMPPT_TGW_MASK (7 << 8)
+#define VI6_DPR_SMPPT_TGW_SHIFT 8
+#define VI6_DPR_SMPPT_PT_MASK (0x3f << 0)
+#define VI6_DPR_SMPPT_PT_SHIFT 0
+
+#define VI6_DPR_UIF_ROUTE(n) (0x2074 + (n) * 4)
+
+#define VI6_DPR_NODE_RPF(n) (n)
+#define VI6_DPR_NODE_UIF(n) (12 + (n))
+#define VI6_DPR_NODE_SRU 16
+#define VI6_DPR_NODE_UDS(n) (17 + (n))
+#define VI6_DPR_NODE_LUT 22
+#define VI6_DPR_NODE_BRU_IN(n) (((n) <= 3) ? 23 + (n) : 49)
+#define VI6_DPR_NODE_BRU_OUT 27
+#define VI6_DPR_NODE_CLU 29
+#define VI6_DPR_NODE_HST 30
+#define VI6_DPR_NODE_HSI 31
+#define VI6_DPR_NODE_BRS_IN(n) (38 + (n))
+#define VI6_DPR_NODE_LIF 55 /* Gen2 only */
+#define VI6_DPR_NODE_WPF(n) (56 + (n))
+#define VI6_DPR_NODE_UNUSED 63
+
+/* -----------------------------------------------------------------------------
+ * SRU Control Registers
+ */
+
+#define VI6_SRU_CTRL0 0x2200
+#define VI6_SRU_CTRL0_PARAM0_MASK (0x1ff << 16)
+#define VI6_SRU_CTRL0_PARAM0_SHIFT 16
+#define VI6_SRU_CTRL0_PARAM1_MASK (0x1f << 8)
+#define VI6_SRU_CTRL0_PARAM1_SHIFT 8
+#define VI6_SRU_CTRL0_MODE_UPSCALE (4 << 4)
+#define VI6_SRU_CTRL0_PARAM2 BIT(3)
+#define VI6_SRU_CTRL0_PARAM3 BIT(2)
+#define VI6_SRU_CTRL0_PARAM4 BIT(1)
+#define VI6_SRU_CTRL0_EN BIT(0)
+
+#define VI6_SRU_CTRL1 0x2204
+#define VI6_SRU_CTRL1_PARAM5 0x7ff
+
+#define VI6_SRU_CTRL2 0x2208
+#define VI6_SRU_CTRL2_PARAM6_SHIFT 16
+#define VI6_SRU_CTRL2_PARAM7_SHIFT 8
+#define VI6_SRU_CTRL2_PARAM8_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * UDS Control Registers
+ */
+
+#define VI6_UDS_OFFSET 0x100
+
+#define VI6_UDS_CTRL 0x2300
+#define VI6_UDS_CTRL_AMD BIT(30)
+#define VI6_UDS_CTRL_FMD BIT(29)
+#define VI6_UDS_CTRL_BLADV BIT(28)
+#define VI6_UDS_CTRL_AON BIT(25)
+#define VI6_UDS_CTRL_ATHON BIT(24)
+#define VI6_UDS_CTRL_BC BIT(20)
+#define VI6_UDS_CTRL_NE_A BIT(19)
+#define VI6_UDS_CTRL_NE_RCR BIT(18)
+#define VI6_UDS_CTRL_NE_GY BIT(17)
+#define VI6_UDS_CTRL_NE_BCB BIT(16)
+#define VI6_UDS_CTRL_AMDSLH BIT(2)
+#define VI6_UDS_CTRL_TDIPC BIT(1)
+
+#define VI6_UDS_SCALE 0x2304
+#define VI6_UDS_SCALE_HMANT_MASK (0xf << 28)
+#define VI6_UDS_SCALE_HMANT_SHIFT 28
+#define VI6_UDS_SCALE_HFRAC_MASK (0xfff << 16)
+#define VI6_UDS_SCALE_HFRAC_SHIFT 16
+#define VI6_UDS_SCALE_VMANT_MASK (0xf << 12)
+#define VI6_UDS_SCALE_VMANT_SHIFT 12
+#define VI6_UDS_SCALE_VFRAC_MASK (0xfff << 0)
+#define VI6_UDS_SCALE_VFRAC_SHIFT 0
+
+#define VI6_UDS_ALPTH 0x2308
+#define VI6_UDS_ALPTH_TH1_MASK (0xff << 8)
+#define VI6_UDS_ALPTH_TH1_SHIFT 8
+#define VI6_UDS_ALPTH_TH0_MASK (0xff << 0)
+#define VI6_UDS_ALPTH_TH0_SHIFT 0
+
+#define VI6_UDS_ALPVAL 0x230c
+#define VI6_UDS_ALPVAL_VAL2_MASK (0xff << 16)
+#define VI6_UDS_ALPVAL_VAL2_SHIFT 16
+#define VI6_UDS_ALPVAL_VAL1_MASK (0xff << 8)
+#define VI6_UDS_ALPVAL_VAL1_SHIFT 8
+#define VI6_UDS_ALPVAL_VAL0_MASK (0xff << 0)
+#define VI6_UDS_ALPVAL_VAL0_SHIFT 0
+
+#define VI6_UDS_PASS_BWIDTH 0x2310
+#define VI6_UDS_PASS_BWIDTH_H_MASK (0x7f << 16)
+#define VI6_UDS_PASS_BWIDTH_H_SHIFT 16
+#define VI6_UDS_PASS_BWIDTH_V_MASK (0x7f << 0)
+#define VI6_UDS_PASS_BWIDTH_V_SHIFT 0
+
+#define VI6_UDS_HPHASE 0x2314
+#define VI6_UDS_HPHASE_HSTP_MASK (0xfff << 16)
+#define VI6_UDS_HPHASE_HSTP_SHIFT 16
+#define VI6_UDS_HPHASE_HEDP_MASK (0xfff << 0)
+#define VI6_UDS_HPHASE_HEDP_SHIFT 0
+
+#define VI6_UDS_IPC 0x2318
+#define VI6_UDS_IPC_FIELD BIT(27)
+#define VI6_UDS_IPC_VEDP_MASK (0xfff << 0)
+#define VI6_UDS_IPC_VEDP_SHIFT 0
+
+#define VI6_UDS_HSZCLIP 0x231c
+#define VI6_UDS_HSZCLIP_HCEN BIT(28)
+#define VI6_UDS_HSZCLIP_HCL_OFST_MASK (0xff << 16)
+#define VI6_UDS_HSZCLIP_HCL_OFST_SHIFT 16
+#define VI6_UDS_HSZCLIP_HCL_SIZE_MASK (0x1fff << 0)
+#define VI6_UDS_HSZCLIP_HCL_SIZE_SHIFT 0
+
+#define VI6_UDS_CLIP_SIZE 0x2324
+#define VI6_UDS_CLIP_SIZE_HSIZE_MASK (0x1fff << 16)
+#define VI6_UDS_CLIP_SIZE_HSIZE_SHIFT 16
+#define VI6_UDS_CLIP_SIZE_VSIZE_MASK (0x1fff << 0)
+#define VI6_UDS_CLIP_SIZE_VSIZE_SHIFT 0
+
+#define VI6_UDS_FILL_COLOR 0x2328
+#define VI6_UDS_FILL_COLOR_RFILC_MASK (0xff << 16)
+#define VI6_UDS_FILL_COLOR_RFILC_SHIFT 16
+#define VI6_UDS_FILL_COLOR_GFILC_MASK (0xff << 8)
+#define VI6_UDS_FILL_COLOR_GFILC_SHIFT 8
+#define VI6_UDS_FILL_COLOR_BFILC_MASK (0xff << 0)
+#define VI6_UDS_FILL_COLOR_BFILC_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * LUT Control Registers
+ */
+
+#define VI6_LUT_CTRL 0x2800
+#define VI6_LUT_CTRL_EN BIT(0)
+
+/* -----------------------------------------------------------------------------
+ * CLU Control Registers
+ */
+
+#define VI6_CLU_CTRL 0x2900
+#define VI6_CLU_CTRL_AAI BIT(28)
+#define VI6_CLU_CTRL_MVS BIT(24)
+#define VI6_CLU_CTRL_AX1I_2D (3 << 14)
+#define VI6_CLU_CTRL_AX2I_2D (1 << 12)
+#define VI6_CLU_CTRL_OS0_2D (3 << 8)
+#define VI6_CLU_CTRL_OS1_2D (1 << 6)
+#define VI6_CLU_CTRL_OS2_2D (3 << 4)
+#define VI6_CLU_CTRL_M2D BIT(1)
+#define VI6_CLU_CTRL_EN BIT(0)
+
+/* -----------------------------------------------------------------------------
+ * HST Control Registers
+ */
+
+#define VI6_HST_CTRL 0x2a00
+#define VI6_HST_CTRL_EN BIT(0)
+
+/* -----------------------------------------------------------------------------
+ * HSI Control Registers
+ */
+
+#define VI6_HSI_CTRL 0x2b00
+#define VI6_HSI_CTRL_EN BIT(0)
+
+/* -----------------------------------------------------------------------------
+ * BRS and BRU Control Registers
+ */
+
+#define VI6_ROP_NOP 0
+#define VI6_ROP_AND 1
+#define VI6_ROP_AND_REV 2
+#define VI6_ROP_COPY 3
+#define VI6_ROP_AND_INV 4
+#define VI6_ROP_CLEAR 5
+#define VI6_ROP_XOR 6
+#define VI6_ROP_OR 7
+#define VI6_ROP_NOR 8
+#define VI6_ROP_EQUIV 9
+#define VI6_ROP_INVERT 10
+#define VI6_ROP_OR_REV 11
+#define VI6_ROP_COPY_INV 12
+#define VI6_ROP_OR_INV 13
+#define VI6_ROP_NAND 14
+#define VI6_ROP_SET 15
+
+#define VI6_BRU_BASE 0x2c00
+#define VI6_BRS_BASE 0x3900
+
+#define VI6_BRU_INCTRL 0x0000
+#define VI6_BRU_INCTRL_NRM BIT(28)
+#define VI6_BRU_INCTRL_DnON (1 << (16 + (n)))
+#define VI6_BRU_INCTRL_DITHn_OFF (0 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_18BPP (1 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_16BPP (2 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_15BPP (3 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_12BPP (4 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_8BPP (5 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_MASK (7 << ((n) * 4))
+#define VI6_BRU_INCTRL_DITHn_SHIFT ((n) * 4)
+
+#define VI6_BRU_VIRRPF_SIZE 0x0004
+#define VI6_BRU_VIRRPF_SIZE_HSIZE_MASK (0x1fff << 16)
+#define VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT 16
+#define VI6_BRU_VIRRPF_SIZE_VSIZE_MASK (0x1fff << 0)
+#define VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT 0
+
+#define VI6_BRU_VIRRPF_LOC 0x0008
+#define VI6_BRU_VIRRPF_LOC_HCOORD_MASK (0x1fff << 16)
+#define VI6_BRU_VIRRPF_LOC_HCOORD_SHIFT 16
+#define VI6_BRU_VIRRPF_LOC_VCOORD_MASK (0x1fff << 0)
+#define VI6_BRU_VIRRPF_LOC_VCOORD_SHIFT 0
+
+#define VI6_BRU_VIRRPF_COL 0x000c
+#define VI6_BRU_VIRRPF_COL_A_MASK (0xff << 24)
+#define VI6_BRU_VIRRPF_COL_A_SHIFT 24
+#define VI6_BRU_VIRRPF_COL_RCR_MASK (0xff << 16)
+#define VI6_BRU_VIRRPF_COL_RCR_SHIFT 16
+#define VI6_BRU_VIRRPF_COL_GY_MASK (0xff << 8)
+#define VI6_BRU_VIRRPF_COL_GY_SHIFT 8
+#define VI6_BRU_VIRRPF_COL_BCB_MASK (0xff << 0)
+#define VI6_BRU_VIRRPF_COL_BCB_SHIFT 0
+
+#define VI6_BRU_CTRL(n) (0x0010 + (n) * 8 + ((n) <= 3 ? 0 : 4))
+#define VI6_BRU_CTRL_RBC BIT(31)
+#define VI6_BRU_CTRL_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20)
+#define VI6_BRU_CTRL_DSTSEL_VRPF (4 << 20)
+#define VI6_BRU_CTRL_DSTSEL_MASK (7 << 20)
+#define VI6_BRU_CTRL_SRCSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 16)
+#define VI6_BRU_CTRL_SRCSEL_VRPF (4 << 16)
+#define VI6_BRU_CTRL_SRCSEL_MASK (7 << 16)
+#define VI6_BRU_CTRL_CROP(rop) ((rop) << 4)
+#define VI6_BRU_CTRL_CROP_MASK (0xf << 4)
+#define VI6_BRU_CTRL_AROP(rop) ((rop) << 0)
+#define VI6_BRU_CTRL_AROP_MASK (0xf << 0)
+
+#define VI6_BRU_BLD(n) (0x0014 + (n) * 8 + ((n) <= 3 ? 0 : 4))
+#define VI6_BRU_BLD_CBES BIT(31)
+#define VI6_BRU_BLD_CCMDX_DST_A (0 << 28)
+#define VI6_BRU_BLD_CCMDX_255_DST_A (1 << 28)
+#define VI6_BRU_BLD_CCMDX_SRC_A (2 << 28)
+#define VI6_BRU_BLD_CCMDX_255_SRC_A (3 << 28)
+#define VI6_BRU_BLD_CCMDX_COEFX (4 << 28)
+#define VI6_BRU_BLD_CCMDX_MASK (7 << 28)
+#define VI6_BRU_BLD_CCMDY_DST_A (0 << 24)
+#define VI6_BRU_BLD_CCMDY_255_DST_A (1 << 24)
+#define VI6_BRU_BLD_CCMDY_SRC_A (2 << 24)
+#define VI6_BRU_BLD_CCMDY_255_SRC_A (3 << 24)
+#define VI6_BRU_BLD_CCMDY_COEFY (4 << 24)
+#define VI6_BRU_BLD_CCMDY_MASK (7 << 24)
+#define VI6_BRU_BLD_CCMDY_SHIFT 24
+#define VI6_BRU_BLD_ABES BIT(23)
+#define VI6_BRU_BLD_ACMDX_DST_A (0 << 20)
+#define VI6_BRU_BLD_ACMDX_255_DST_A (1 << 20)
+#define VI6_BRU_BLD_ACMDX_SRC_A (2 << 20)
+#define VI6_BRU_BLD_ACMDX_255_SRC_A (3 << 20)
+#define VI6_BRU_BLD_ACMDX_COEFX (4 << 20)
+#define VI6_BRU_BLD_ACMDX_MASK (7 << 20)
+#define VI6_BRU_BLD_ACMDY_DST_A (0 << 16)
+#define VI6_BRU_BLD_ACMDY_255_DST_A (1 << 16)
+#define VI6_BRU_BLD_ACMDY_SRC_A (2 << 16)
+#define VI6_BRU_BLD_ACMDY_255_SRC_A (3 << 16)
+#define VI6_BRU_BLD_ACMDY_COEFY (4 << 16)
+#define VI6_BRU_BLD_ACMDY_MASK (7 << 16)
+#define VI6_BRU_BLD_COEFX_MASK (0xff << 8)
+#define VI6_BRU_BLD_COEFX_SHIFT 8
+#define VI6_BRU_BLD_COEFY_MASK (0xff << 0)
+#define VI6_BRU_BLD_COEFY_SHIFT 0
+
+#define VI6_BRU_ROP 0x0030 /* Only available on BRU */
+#define VI6_BRU_ROP_DSTSEL_BRUIN(n) (((n) <= 3 ? (n) : (n)+1) << 20)
+#define VI6_BRU_ROP_DSTSEL_VRPF (4 << 20)
+#define VI6_BRU_ROP_DSTSEL_MASK (7 << 20)
+#define VI6_BRU_ROP_CROP(rop) ((rop) << 4)
+#define VI6_BRU_ROP_CROP_MASK (0xf << 4)
+#define VI6_BRU_ROP_AROP(rop) ((rop) << 0)
+#define VI6_BRU_ROP_AROP_MASK (0xf << 0)
+
+/* -----------------------------------------------------------------------------
+ * HGO Control Registers
+ */
+
+#define VI6_HGO_OFFSET 0x3000
+#define VI6_HGO_OFFSET_HOFFSET_SHIFT 16
+#define VI6_HGO_OFFSET_VOFFSET_SHIFT 0
+#define VI6_HGO_SIZE 0x3004
+#define VI6_HGO_SIZE_HSIZE_SHIFT 16
+#define VI6_HGO_SIZE_VSIZE_SHIFT 0
+#define VI6_HGO_MODE 0x3008
+#define VI6_HGO_MODE_STEP BIT(10)
+#define VI6_HGO_MODE_MAXRGB BIT(7)
+#define VI6_HGO_MODE_OFSB_R BIT(6)
+#define VI6_HGO_MODE_OFSB_G BIT(5)
+#define VI6_HGO_MODE_OFSB_B BIT(4)
+#define VI6_HGO_MODE_HRATIO_SHIFT 2
+#define VI6_HGO_MODE_VRATIO_SHIFT 0
+#define VI6_HGO_LB_TH 0x300c
+#define VI6_HGO_LBn_H(n) (0x3010 + (n) * 8)
+#define VI6_HGO_LBn_V(n) (0x3014 + (n) * 8)
+#define VI6_HGO_R_HISTO(n) (0x3030 + (n) * 4)
+#define VI6_HGO_R_MAXMIN 0x3130
+#define VI6_HGO_R_SUM 0x3134
+#define VI6_HGO_R_LB_DET 0x3138
+#define VI6_HGO_G_HISTO(n) (0x3140 + (n) * 4)
+#define VI6_HGO_G_MAXMIN 0x3240
+#define VI6_HGO_G_SUM 0x3244
+#define VI6_HGO_G_LB_DET 0x3248
+#define VI6_HGO_B_HISTO(n) (0x3250 + (n) * 4)
+#define VI6_HGO_B_MAXMIN 0x3350
+#define VI6_HGO_B_SUM 0x3354
+#define VI6_HGO_B_LB_DET 0x3358
+#define VI6_HGO_EXT_HIST_ADDR 0x335c
+#define VI6_HGO_EXT_HIST_DATA 0x3360
+#define VI6_HGO_REGRST 0x33fc
+#define VI6_HGO_REGRST_RCLEA BIT(0)
+
+/* -----------------------------------------------------------------------------
+ * HGT Control Registers
+ */
+
+#define VI6_HGT_OFFSET 0x3400
+#define VI6_HGT_OFFSET_HOFFSET_SHIFT 16
+#define VI6_HGT_OFFSET_VOFFSET_SHIFT 0
+#define VI6_HGT_SIZE 0x3404
+#define VI6_HGT_SIZE_HSIZE_SHIFT 16
+#define VI6_HGT_SIZE_VSIZE_SHIFT 0
+#define VI6_HGT_MODE 0x3408
+#define VI6_HGT_MODE_HRATIO_SHIFT 2
+#define VI6_HGT_MODE_VRATIO_SHIFT 0
+#define VI6_HGT_HUE_AREA(n) (0x340c + (n) * 4)
+#define VI6_HGT_HUE_AREA_LOWER_SHIFT 16
+#define VI6_HGT_HUE_AREA_UPPER_SHIFT 0
+#define VI6_HGT_LB_TH 0x3424
+#define VI6_HGT_LBn_H(n) (0x3428 + (n) * 8)
+#define VI6_HGT_LBn_V(n) (0x342c + (n) * 8)
+#define VI6_HGT_HISTO(m, n) (0x3450 + (m) * 128 + (n) * 4)
+#define VI6_HGT_MAXMIN 0x3750
+#define VI6_HGT_SUM 0x3754
+#define VI6_HGT_LB_DET 0x3758
+#define VI6_HGT_REGRST 0x37fc
+#define VI6_HGT_REGRST_RCLEA BIT(0)
+
+/* -----------------------------------------------------------------------------
+ * LIF Control Registers
+ */
+
+#define VI6_LIF_OFFSET (-0x100)
+
+#define VI6_LIF_CTRL 0x3b00
+#define VI6_LIF_CTRL_OBTH_MASK (0x7ff << 16)
+#define VI6_LIF_CTRL_OBTH_SHIFT 16
+#define VI6_LIF_CTRL_CFMT BIT(4)
+#define VI6_LIF_CTRL_REQSEL BIT(1)
+#define VI6_LIF_CTRL_LIF_EN BIT(0)
+
+#define VI6_LIF_CSBTH 0x3b04
+#define VI6_LIF_CSBTH_HBTH_MASK (0x7ff << 16)
+#define VI6_LIF_CSBTH_HBTH_SHIFT 16
+#define VI6_LIF_CSBTH_LBTH_MASK (0x7ff << 0)
+#define VI6_LIF_CSBTH_LBTH_SHIFT 0
+
+#define VI6_LIF_LBA 0x3b0c
+#define VI6_LIF_LBA_LBA0 BIT(31)
+#define VI6_LIF_LBA_LBA1_MASK (0xfff << 16)
+#define VI6_LIF_LBA_LBA1_SHIFT 16
+
+/* -----------------------------------------------------------------------------
+ * Security Control Registers
+ */
+
+#define VI6_SECURITY_CTRL0 0x3d00
+#define VI6_SECURITY_CTRL1 0x3d04
+
+/* -----------------------------------------------------------------------------
+ * IP Version Registers
+ */
+
+#define VI6_IP_VERSION 0x3f00
+#define VI6_IP_VERSION_MASK (0xffff << 0)
+#define VI6_IP_VERSION_MODEL_MASK (0xff << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_H2 (0x09 << 8)
+#define VI6_IP_VERSION_MODEL_VSPR_H2 (0x0a << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_GEN2 (0x0b << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_M2 (0x0c << 8)
+#define VI6_IP_VERSION_MODEL_VSPS_V2H (0x12 << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_V2H (0x13 << 8)
+#define VI6_IP_VERSION_MODEL_VSPI_GEN3 (0x14 << 8)
+#define VI6_IP_VERSION_MODEL_VSPBD_GEN3 (0x15 << 8)
+#define VI6_IP_VERSION_MODEL_VSPBC_GEN3 (0x16 << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_GEN3 (0x17 << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_V3 (0x18 << 8)
+#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
+#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_GEN4 (0x1c << 8)
+/* RZ/G2L SoCs have no version register, So use 0x80 as the model version */
+#define VI6_IP_VERSION_MODEL_VSPD_RZG2L (0x80 << 8)
+
+#define VI6_IP_VERSION_SOC_MASK (0xff << 0)
+#define VI6_IP_VERSION_SOC_H2 (0x01 << 0)
+#define VI6_IP_VERSION_SOC_V2H (0x01 << 0)
+#define VI6_IP_VERSION_SOC_V3M (0x01 << 0)
+#define VI6_IP_VERSION_SOC_M2 (0x02 << 0)
+#define VI6_IP_VERSION_SOC_M3W (0x02 << 0)
+#define VI6_IP_VERSION_SOC_V3H (0x02 << 0)
+#define VI6_IP_VERSION_SOC_H3 (0x03 << 0)
+#define VI6_IP_VERSION_SOC_D3 (0x04 << 0)
+#define VI6_IP_VERSION_SOC_M3N (0x04 << 0)
+#define VI6_IP_VERSION_SOC_E3 (0x04 << 0)
+#define VI6_IP_VERSION_SOC_V3U (0x05 << 0)
+#define VI6_IP_VERSION_SOC_V4H (0x06 << 0)
+/* RZ/G2L SoCs have no version register, So use 0x80 for SoC Identification */
+#define VI6_IP_VERSION_SOC_RZG2L (0x80 << 0)
+
+#define VI6_IP_VERSION_VSP_SW (0xfffe << 16) /* SW VSP version */
+
+/* -----------------------------------------------------------------------------
+ * RPF CLUT Registers
+ */
+
+#define VI6_CLUT_TABLE 0x4000
+
+/* -----------------------------------------------------------------------------
+ * 1D LUT Registers
+ */
+
+#define VI6_LUT_TABLE 0x7000
+
+/* -----------------------------------------------------------------------------
+ * 3D LUT Registers
+ */
+
+#define VI6_CLU_ADDR 0x7400
+#define VI6_CLU_DATA 0x7404
+
+/* -----------------------------------------------------------------------------
+ * Formats
+ */
+
+#define VI6_FMT_RGB_332 0x00
+#define VI6_FMT_XRGB_4444 0x01
+#define VI6_FMT_RGBX_4444 0x02
+#define VI6_FMT_XRGB_1555 0x04
+#define VI6_FMT_RGBX_5551 0x05
+#define VI6_FMT_RGB_565 0x06
+#define VI6_FMT_AXRGB_86666 0x07
+#define VI6_FMT_RGBXA_66668 0x08
+#define VI6_FMT_XRGBA_66668 0x09
+#define VI6_FMT_ARGBX_86666 0x0a
+#define VI6_FMT_AXRXGXB_8262626 0x0b
+#define VI6_FMT_XRXGXBA_2626268 0x0c
+#define VI6_FMT_ARXGXBX_8626262 0x0d
+#define VI6_FMT_RXGXBXA_6262628 0x0e
+#define VI6_FMT_XRGB_6666 0x0f
+#define VI6_FMT_RGBX_6666 0x10
+#define VI6_FMT_XRXGXB_262626 0x11
+#define VI6_FMT_RXGXBX_626262 0x12
+#define VI6_FMT_ARGB_8888 0x13
+#define VI6_FMT_RGBA_8888 0x14
+#define VI6_FMT_RGB_888 0x15
+#define VI6_FMT_XRGXGB_763763 0x16
+#define VI6_FMT_XXRGB_86666 0x17
+#define VI6_FMT_BGR_888 0x18
+#define VI6_FMT_ARGB_4444 0x19
+#define VI6_FMT_RGBA_4444 0x1a
+#define VI6_FMT_ARGB_1555 0x1b
+#define VI6_FMT_RGBA_5551 0x1c
+#define VI6_FMT_ABGR_4444 0x1d
+#define VI6_FMT_BGRA_4444 0x1e
+#define VI6_FMT_ABGR_1555 0x1f
+#define VI6_FMT_BGRA_5551 0x20
+#define VI6_FMT_XBXGXR_262626 0x21
+#define VI6_FMT_ABGR_8888 0x22
+#define VI6_FMT_XXRGB_88565 0x23
+#define VI6_FMT_RGB10_RGB10A2_A2RGB10 0x30
+
+#define VI6_FMT_Y_UV_444 0x40
+#define VI6_FMT_Y_UV_422 0x41
+#define VI6_FMT_Y_UV_420 0x42
+#define VI6_FMT_YUV_444 0x46
+#define VI6_FMT_YUYV_422 0x47
+#define VI6_FMT_YYUV_422 0x48
+#define VI6_FMT_YUV_420 0x49
+#define VI6_FMT_Y_U_V_444 0x4a
+#define VI6_FMT_Y_U_V_422 0x4b
+#define VI6_FMT_Y_U_V_420 0x4c
+
+#endif /* __VSP1_REGS_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
new file mode 100644
index 0000000000..3b17f5fa40
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_rpf.c -- R-Car VSP1 Read Pixel Formatter
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define RPF_MAX_WIDTH 8190
+#define RPF_MAX_HEIGHT 8190
+
+/* Pre extended display list command data structure. */
+struct vsp1_extcmd_auto_fld_body {
+ u32 top_y0;
+ u32 bottom_y0;
+ u32 top_c0;
+ u32 bottom_c0;
+ u32 top_c1;
+ u32 bottom_c1;
+ u32 reserved0;
+ u32 reserved1;
+} __packed;
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + rpf->entity.index * VI6_RPF_OFFSET,
+ data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_ops rpf_ops = {
+ .pad = &vsp1_rwpf_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void rpf_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
+ const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
+ const struct v4l2_pix_format_mplane *format = &rpf->format;
+ const struct v4l2_mbus_framefmt *source_format;
+ const struct v4l2_mbus_framefmt *sink_format;
+ unsigned int left = 0;
+ unsigned int top = 0;
+ u32 pstride;
+ u32 infmt;
+
+ /* Stride */
+ pstride = format->plane_fmt[0].bytesperline
+ << VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
+ if (format->num_planes > 1)
+ pstride |= format->plane_fmt[1].bytesperline
+ << VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+
+ /*
+ * pstride has both STRIDE_Y and STRIDE_C, but multiplying the whole
+ * of pstride by 2 is conveniently OK here as we are multiplying both
+ * values.
+ */
+ if (pipe->interlaced)
+ pstride *= 2;
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_PSTRIDE, pstride);
+
+ /* Format */
+ sink_format = vsp1_entity_get_pad_format(&rpf->entity,
+ rpf->entity.config,
+ RWPF_PAD_SINK);
+ source_format = vsp1_entity_get_pad_format(&rpf->entity,
+ rpf->entity.config,
+ RWPF_PAD_SOURCE);
+
+ infmt = VI6_RPF_INFMT_CIPM
+ | (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT);
+
+ if (fmtinfo->swap_yc)
+ infmt |= VI6_RPF_INFMT_SPYCS;
+ if (fmtinfo->swap_uv)
+ infmt |= VI6_RPF_INFMT_SPUVS;
+
+ if (sink_format->code != source_format->code)
+ infmt |= VI6_RPF_INFMT_CSC;
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_INFMT, infmt);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_DSWAP, fmtinfo->swap);
+
+ if (entity->vsp1->info->gen == 4) {
+ u32 ext_infmt0;
+ u32 ext_infmt1;
+ u32 ext_infmt2;
+
+ switch (fmtinfo->fourcc) {
+ case V4L2_PIX_FMT_RGBX1010102:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10;
+ ext_infmt1 = VI6_RPF_EXT_INFMT1_PACK_CPOS(0, 10, 20, 0);
+ ext_infmt2 = VI6_RPF_EXT_INFMT2_PACK_CLEN(10, 10, 10, 0);
+ break;
+
+ case V4L2_PIX_FMT_RGBA1010102:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10;
+ ext_infmt1 = VI6_RPF_EXT_INFMT1_PACK_CPOS(0, 10, 20, 30);
+ ext_infmt2 = VI6_RPF_EXT_INFMT2_PACK_CLEN(10, 10, 10, 2);
+ break;
+
+ case V4L2_PIX_FMT_ARGB2101010:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10;
+ ext_infmt1 = VI6_RPF_EXT_INFMT1_PACK_CPOS(2, 12, 22, 0);
+ ext_infmt2 = VI6_RPF_EXT_INFMT2_PACK_CLEN(10, 10, 10, 2);
+ break;
+
+ case V4L2_PIX_FMT_Y210:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_F2B |
+ VI6_RPF_EXT_INFMT0_IPBD_Y_10 |
+ VI6_RPF_EXT_INFMT0_IPBD_C_10;
+ ext_infmt1 = 0x0;
+ ext_infmt2 = 0x0;
+ break;
+
+ case V4L2_PIX_FMT_Y212:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_F2B |
+ VI6_RPF_EXT_INFMT0_IPBD_Y_12 |
+ VI6_RPF_EXT_INFMT0_IPBD_C_12;
+ ext_infmt1 = 0x0;
+ ext_infmt2 = 0x0;
+ break;
+
+ default:
+ ext_infmt0 = 0;
+ ext_infmt1 = 0;
+ ext_infmt2 = 0;
+ break;
+ }
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_EXT_INFMT0, ext_infmt0);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_EXT_INFMT1, ext_infmt1);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_EXT_INFMT2, ext_infmt2);
+ }
+
+ /* Output location. */
+ if (pipe->brx) {
+ const struct v4l2_rect *compose;
+
+ compose = vsp1_entity_get_pad_selection(pipe->brx,
+ pipe->brx->config,
+ rpf->brx_input,
+ V4L2_SEL_TGT_COMPOSE);
+ left = compose->left;
+ top = compose->top;
+ }
+
+ if (pipe->interlaced)
+ top /= 2;
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_LOC,
+ (left << VI6_RPF_LOC_HCOORD_SHIFT) |
+ (top << VI6_RPF_LOC_VCOORD_SHIFT));
+
+ /*
+ * On Gen2 use the alpha channel (extended to 8 bits) when available or
+ * a fixed alpha value set through the V4L2_CID_ALPHA_COMPONENT control
+ * otherwise.
+ *
+ * The Gen3+ RPF has extended alpha capability and can both multiply the
+ * alpha channel by a fixed global alpha value, and multiply the pixel
+ * components to convert the input to premultiplied alpha.
+ *
+ * As alpha premultiplication is available in the BRx for both Gen2 and
+ * Gen3+ we handle it there and use the Gen3 alpha multiplier for global
+ * alpha multiplication only. This however prevents conversion to
+ * premultiplied alpha if no BRx is present in the pipeline. If that use
+ * case turns out to be useful we will revisit the implementation (for
+ * Gen3 only).
+ *
+ * We enable alpha multiplication on Gen3+ using the fixed alpha value
+ * set through the V4L2_CID_ALPHA_COMPONENT control when the input
+ * contains an alpha channel. On Gen2 the global alpha is ignored in
+ * that case.
+ *
+ * In all cases, disable color keying.
+ */
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_AEXT_EXT |
+ (fmtinfo->alpha ? VI6_RPF_ALPH_SEL_ASEL_PACKED
+ : VI6_RPF_ALPH_SEL_ASEL_FIXED));
+
+ if (entity->vsp1->info->gen >= 3) {
+ u32 mult;
+
+ if (fmtinfo->alpha) {
+ /*
+ * When the input contains an alpha channel enable the
+ * alpha multiplier. If the input is premultiplied we
+ * need to multiply both the alpha channel and the pixel
+ * components by the global alpha value to keep them
+ * premultiplied. Otherwise multiply the alpha channel
+ * only.
+ */
+ bool premultiplied = format->flags
+ & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA;
+
+ mult = VI6_RPF_MULT_ALPHA_A_MMD_RATIO
+ | (premultiplied ?
+ VI6_RPF_MULT_ALPHA_P_MMD_RATIO :
+ VI6_RPF_MULT_ALPHA_P_MMD_NONE);
+ } else {
+ /*
+ * When the input doesn't contain an alpha channel the
+ * global alpha value is applied in the unpacking unit,
+ * the alpha multiplier isn't needed and must be
+ * disabled.
+ */
+ mult = VI6_RPF_MULT_ALPHA_A_MMD_NONE
+ | VI6_RPF_MULT_ALPHA_P_MMD_NONE;
+ }
+
+ rpf->mult_alpha = mult;
+ }
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_MSK_CTRL, 0);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_CKEY_CTRL, 0);
+
+}
+
+static void vsp1_rpf_configure_autofld(struct vsp1_rwpf *rpf,
+ struct vsp1_dl_list *dl)
+{
+ const struct v4l2_pix_format_mplane *format = &rpf->format;
+ struct vsp1_dl_ext_cmd *cmd;
+ struct vsp1_extcmd_auto_fld_body *auto_fld;
+ u32 offset_y, offset_c;
+
+ cmd = vsp1_dl_get_pre_cmd(dl);
+ if (WARN_ONCE(!cmd, "Failed to obtain an autofld cmd"))
+ return;
+
+ /* Re-index our auto_fld to match the current RPF. */
+ auto_fld = cmd->data;
+ auto_fld = &auto_fld[rpf->entity.index];
+
+ auto_fld->top_y0 = rpf->mem.addr[0];
+ auto_fld->top_c0 = rpf->mem.addr[1];
+ auto_fld->top_c1 = rpf->mem.addr[2];
+
+ offset_y = format->plane_fmt[0].bytesperline;
+ offset_c = format->plane_fmt[1].bytesperline;
+
+ auto_fld->bottom_y0 = rpf->mem.addr[0] + offset_y;
+ auto_fld->bottom_c0 = rpf->mem.addr[1] + offset_c;
+ auto_fld->bottom_c1 = rpf->mem.addr[2] + offset_c;
+
+ cmd->flags |= VI6_DL_EXT_AUTOFLD_INT | BIT(16 + rpf->entity.index);
+}
+
+static void rpf_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_VRTCOL_SET,
+ rpf->alpha << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_MULT_ALPHA, rpf->mult_alpha |
+ (rpf->alpha << VI6_RPF_MULT_ALPHA_RATIO_SHIFT));
+
+ vsp1_pipeline_propagate_alpha(pipe, dlb, rpf->alpha);
+}
+
+static void rpf_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
+ struct vsp1_rwpf_memory mem = rpf->mem;
+ struct vsp1_device *vsp1 = rpf->entity.vsp1;
+ const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
+ const struct v4l2_pix_format_mplane *format = &rpf->format;
+ struct v4l2_rect crop;
+
+ /*
+ * Source size and crop offsets.
+ *
+ * The crop offsets correspond to the location of the crop
+ * rectangle top left corner in the plane buffer. Only two
+ * offsets are needed, as planes 2 and 3 always have identical
+ * strides.
+ */
+ crop = *vsp1_rwpf_get_crop(rpf, rpf->entity.config);
+
+ /*
+ * Partition Algorithm Control
+ *
+ * The partition algorithm can split this frame into multiple
+ * slices. We must scale our partition window based on the pipe
+ * configuration to match the destination partition window.
+ * To achieve this, we adjust our crop to provide a 'sub-crop'
+ * matching the expected partition window. Only 'left' and
+ * 'width' need to be adjusted.
+ */
+ if (pipe->partitions > 1) {
+ crop.width = pipe->partition->rpf.width;
+ crop.left += pipe->partition->rpf.left;
+ }
+
+ if (pipe->interlaced) {
+ crop.height = round_down(crop.height / 2, fmtinfo->vsub);
+ crop.top = round_down(crop.top / 2, fmtinfo->vsub);
+ }
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRC_BSIZE,
+ (crop.width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
+ (crop.height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRC_ESIZE,
+ (crop.width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
+ (crop.height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+
+ mem.addr[0] += crop.top * format->plane_fmt[0].bytesperline
+ + crop.left * fmtinfo->bpp[0] / 8;
+
+ if (format->num_planes > 1) {
+ unsigned int bpl = format->plane_fmt[1].bytesperline;
+ unsigned int offset;
+
+ offset = crop.top / fmtinfo->vsub * bpl
+ + crop.left / fmtinfo->hsub * fmtinfo->bpp[1] / 8;
+ mem.addr[1] += offset;
+ mem.addr[2] += offset;
+ }
+
+ /*
+ * On Gen3+ hardware the SPUVS bit has no effect on 3-planar
+ * formats. Swap the U and V planes manually in that case.
+ */
+ if (vsp1->info->gen >= 3 && format->num_planes == 3 &&
+ fmtinfo->swap_uv)
+ swap(mem.addr[1], mem.addr[2]);
+
+ /*
+ * Interlaced pipelines will use the extended pre-cmd to process
+ * SRCM_ADDR_{Y,C0,C1}.
+ */
+ if (pipe->interlaced) {
+ vsp1_rpf_configure_autofld(rpf, dl);
+ } else {
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_Y, mem.addr[0]);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C0, mem.addr[1]);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C1, mem.addr[2]);
+ }
+}
+
+static void rpf_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int partition_idx,
+ struct vsp1_partition_window *window)
+{
+ partition->rpf = *window;
+}
+
+static const struct vsp1_entity_operations rpf_entity_ops = {
+ .configure_stream = rpf_configure_stream,
+ .configure_frame = rpf_configure_frame,
+ .configure_partition = rpf_configure_partition,
+ .partition = rpf_partition,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_rwpf *rpf;
+ char name[6];
+ int ret;
+
+ rpf = devm_kzalloc(vsp1->dev, sizeof(*rpf), GFP_KERNEL);
+ if (rpf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ rpf->max_width = RPF_MAX_WIDTH;
+ rpf->max_height = RPF_MAX_HEIGHT;
+
+ rpf->entity.ops = &rpf_entity_ops;
+ rpf->entity.type = VSP1_ENTITY_RPF;
+ rpf->entity.index = index;
+
+ sprintf(name, "rpf.%u", index);
+ ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the control handler. */
+ ret = vsp1_rwpf_init_ctrls(rpf, 0);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "rpf%u: failed to initialize controls\n",
+ index);
+ goto error;
+ }
+
+ v4l2_ctrl_handler_setup(&rpf->ctrls);
+
+ return rpf;
+
+error:
+ vsp1_entity_destroy(&rpf->entity);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
new file mode 100644
index 0000000000..22a82d2181
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_rwpf.c -- R-Car VSP1 Read and Write Pixel Formatters
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define RWPF_MIN_WIDTH 1
+#define RWPF_MIN_HEIGHT 1
+
+struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+ struct v4l2_subdev_state *sd_state)
+{
+ return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, sd_state,
+ RWPF_PAD_SINK);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ };
+
+ if (code->index >= ARRAY_SIZE(codes))
+ return -EINVAL;
+
+ code->code = codes[code->index];
+
+ return 0;
+}
+
+static int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ RWPF_MIN_WIDTH,
+ RWPF_MIN_HEIGHT, rwpf->max_width,
+ rwpf->max_height);
+}
+
+static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&rwpf->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
+ fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
+
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config, fmt->pad);
+
+ if (fmt->pad == RWPF_PAD_SOURCE) {
+ /*
+ * The RWPF performs format conversion but can't scale, only the
+ * format code can be changed on the source pad.
+ */
+ format->code = fmt->format.code;
+ fmt->format = *format;
+ goto done;
+ }
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ RWPF_MIN_WIDTH, rwpf->max_width);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ RWPF_MIN_HEIGHT, rwpf->max_height);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ if (rwpf->entity.type == VSP1_ENTITY_RPF) {
+ struct v4l2_rect *crop;
+
+ /* Update the sink crop rectangle. */
+ crop = vsp1_rwpf_get_crop(rwpf, config);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+ }
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SOURCE);
+ *format = fmt->format;
+
+ if (rwpf->flip.rotate) {
+ format->width = fmt->format.height;
+ format->height = fmt->format.width;
+ }
+
+done:
+ mutex_unlock(&rwpf->entity.lock);
+ return ret;
+}
+
+static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ /*
+ * Cropping is only supported on the RPF and is implemented on the sink
+ * pad.
+ */
+ if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&rwpf->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
+ sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *vsp1_rwpf_get_crop(rwpf, config);
+ break;
+
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SINK);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ mutex_unlock(&rwpf->entity.lock);
+ return ret;
+}
+
+static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ /*
+ * Cropping is only supported on the RPF and is implemented on the sink
+ * pad.
+ */
+ if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK)
+ return -EINVAL;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ mutex_lock(&rwpf->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state,
+ sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* Make sure the crop rectangle is entirely contained in the image. */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SINK);
+
+ /*
+ * Restrict the crop rectangle coordinates to multiples of 2 to avoid
+ * shifting the color plane.
+ */
+ if (format->code == MEDIA_BUS_FMT_AYUV8_1X32) {
+ sel->r.left = ALIGN(sel->r.left, 2);
+ sel->r.top = ALIGN(sel->r.top, 2);
+ sel->r.width = round_down(sel->r.width, 2);
+ sel->r.height = round_down(sel->r.height, 2);
+ }
+
+ sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2);
+ sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2);
+ sel->r.width = min_t(unsigned int, sel->r.width,
+ format->width - sel->r.left);
+ sel->r.height = min_t(unsigned int, sel->r.height,
+ format->height - sel->r.top);
+
+ crop = vsp1_rwpf_get_crop(rwpf, config);
+ *crop = sel->r;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SOURCE);
+ format->width = crop->width;
+ format->height = crop->height;
+
+done:
+ mutex_unlock(&rwpf->entity.lock);
+ return ret;
+}
+
+const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = vsp1_rwpf_enum_mbus_code,
+ .enum_frame_size = vsp1_rwpf_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = vsp1_rwpf_set_format,
+ .get_selection = vsp1_rwpf_get_selection,
+ .set_selection = vsp1_rwpf_set_selection,
+};
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static int vsp1_rwpf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_rwpf *rwpf =
+ container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ rwpf->alpha = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vsp1_rwpf_ctrl_ops = {
+ .s_ctrl = vsp1_rwpf_s_ctrl,
+};
+
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols)
+{
+ v4l2_ctrl_handler_init(&rwpf->ctrls, ncontrols + 1);
+ v4l2_ctrl_new_std(&rwpf->ctrls, &vsp1_rwpf_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ rwpf->entity.subdev.ctrl_handler = &rwpf->ctrls;
+
+ return rwpf->ctrls.error;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
new file mode 100644
index 0000000000..eac5c04c22
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_rwpf.h -- R-Car VSP1 Read and Write Pixel Formatters
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_RWPF_H__
+#define __VSP1_RWPF_H__
+
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_entity.h"
+
+#define RWPF_PAD_SINK 0
+#define RWPF_PAD_SOURCE 1
+
+struct v4l2_ctrl;
+struct vsp1_dl_manager;
+struct vsp1_rwpf;
+struct vsp1_video;
+
+struct vsp1_rwpf_memory {
+ dma_addr_t addr[3];
+};
+
+struct vsp1_rwpf {
+ struct vsp1_entity entity;
+ struct v4l2_ctrl_handler ctrls;
+
+ struct vsp1_video *video;
+
+ unsigned int max_width;
+ unsigned int max_height;
+
+ struct v4l2_pix_format_mplane format;
+ const struct vsp1_format_info *fmtinfo;
+ unsigned int brx_input;
+
+ unsigned int alpha;
+
+ u32 mult_alpha;
+ u32 outfmt;
+
+ struct {
+ spinlock_t lock;
+ struct {
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *rotate;
+ } ctrls;
+ unsigned int pending;
+ unsigned int active;
+ bool rotate;
+ } flip;
+
+ struct vsp1_rwpf_memory mem;
+ bool writeback;
+
+ struct vsp1_dl_manager *dlm;
+};
+
+static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_rwpf, entity.subdev);
+}
+
+static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
+{
+ return container_of(entity, struct vsp1_rwpf, entity);
+}
+
+struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
+struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
+
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
+
+extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
+
+struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+ struct v4l2_subdev_state *sd_state);
+
+#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_sru.c b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
new file mode 100644
index 0000000000..b614a2aea4
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_sru.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_sru.c -- R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_sru.h"
+
+#define SRU_MIN_SIZE 4U
+#define SRU_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_sru_write(struct vsp1_sru *sru,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_SRU_INTENSITY (V4L2_CID_USER_BASE | 0x1001)
+
+struct vsp1_sru_param {
+ u32 ctrl0;
+ u32 ctrl2;
+};
+
+#define VI6_SRU_CTRL0_PARAMS(p0, p1) \
+ (((p0) << VI6_SRU_CTRL0_PARAM0_SHIFT) | \
+ ((p1) << VI6_SRU_CTRL0_PARAM1_SHIFT))
+
+#define VI6_SRU_CTRL2_PARAMS(p6, p7, p8) \
+ (((p6) << VI6_SRU_CTRL2_PARAM6_SHIFT) | \
+ ((p7) << VI6_SRU_CTRL2_PARAM7_SHIFT) | \
+ ((p8) << VI6_SRU_CTRL2_PARAM8_SHIFT))
+
+static const struct vsp1_sru_param vsp1_sru_params[] = {
+ {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(24, 40, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(8, 16, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(36, 60, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(12, 27, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(48, 80, 255),
+ }, {
+ .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN,
+ .ctrl2 = VI6_SRU_CTRL2_PARAMS(16, 36, 255),
+ },
+};
+
+static int sru_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_sru *sru =
+ container_of(ctrl->handler, struct vsp1_sru, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_SRU_INTENSITY:
+ sru->intensity = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops sru_ctrl_ops = {
+ .s_ctrl = sru_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config sru_intensity_control = {
+ .ops = &sru_ctrl_ops,
+ .id = V4L2_CID_VSP1_SRU_INTENSITY,
+ .name = "Intensity",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 6,
+ .def = 1,
+ .step = 1,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ };
+
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes,
+ ARRAY_SIZE(codes));
+}
+
+static int sru_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ config = vsp1_entity_get_pad_config(&sru->entity, sd_state,
+ fse->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(&sru->entity, config, SRU_PAD_SINK);
+
+ mutex_lock(&sru->entity.lock);
+
+ if (fse->index || fse->code != format->code) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (fse->pad == SRU_PAD_SINK) {
+ fse->min_width = SRU_MIN_SIZE;
+ fse->max_width = SRU_MAX_SIZE;
+ fse->min_height = SRU_MIN_SIZE;
+ fse->max_height = SRU_MAX_SIZE;
+ } else {
+ fse->min_width = format->width;
+ fse->min_height = format->height;
+ if (format->width <= SRU_MAX_SIZE / 2 &&
+ format->height <= SRU_MAX_SIZE / 2) {
+ fse->max_width = format->width * 2;
+ fse->max_height = format->height * 2;
+ } else {
+ fse->max_width = format->width;
+ fse->max_height = format->height;
+ }
+ }
+
+done:
+ mutex_unlock(&sru->entity.lock);
+ return ret;
+}
+
+static void sru_try_format(struct vsp1_sru *sru,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int input_area;
+ unsigned int output_area;
+
+ switch (pad) {
+ case SRU_PAD_SINK:
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+
+ fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE);
+ fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE);
+ break;
+
+ case SRU_PAD_SOURCE:
+ /* The SRU can't perform format conversion. */
+ format = vsp1_entity_get_pad_format(&sru->entity, sd_state,
+ SRU_PAD_SINK);
+ fmt->code = format->code;
+
+ /*
+ * We can upscale by 2 in both direction, but not independently.
+ * Compare the input and output rectangles areas (avoiding
+ * integer overflows on the output): if the requested output
+ * area is larger than 1.5^2 the input area upscale by two,
+ * otherwise don't scale.
+ */
+ input_area = format->width * format->height;
+ output_area = min(fmt->width, SRU_MAX_SIZE)
+ * min(fmt->height, SRU_MAX_SIZE);
+
+ if (fmt->width <= SRU_MAX_SIZE / 2 &&
+ fmt->height <= SRU_MAX_SIZE / 2 &&
+ output_area > input_area * 9 / 4) {
+ fmt->width = format->width * 2;
+ fmt->height = format->height * 2;
+ } else {
+ fmt->width = format->width;
+ fmt->height = format->height;
+ }
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int sru_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_sru *sru = to_sru(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&sru->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&sru->entity, sd_state,
+ fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ sru_try_format(sru, config, fmt->pad, &fmt->format);
+
+ format = vsp1_entity_get_pad_format(&sru->entity, config, fmt->pad);
+ *format = fmt->format;
+
+ if (fmt->pad == SRU_PAD_SINK) {
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&sru->entity, config,
+ SRU_PAD_SOURCE);
+ *format = fmt->format;
+
+ sru_try_format(sru, config, SRU_PAD_SOURCE, format);
+ }
+
+done:
+ mutex_unlock(&sru->entity.lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops sru_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = sru_enum_mbus_code,
+ .enum_frame_size = sru_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = sru_set_format,
+};
+
+static const struct v4l2_subdev_ops sru_ops = {
+ .pad = &sru_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void sru_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ const struct vsp1_sru_param *param;
+ struct vsp1_sru *sru = to_sru(&entity->subdev);
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+ u32 ctrl0;
+
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SOURCE);
+
+ if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32)
+ ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
+ | VI6_SRU_CTRL0_PARAM4;
+ else
+ ctrl0 = VI6_SRU_CTRL0_PARAM3;
+
+ if (input->width != output->width)
+ ctrl0 |= VI6_SRU_CTRL0_MODE_UPSCALE;
+
+ param = &vsp1_sru_params[sru->intensity - 1];
+
+ ctrl0 |= param->ctrl0;
+
+ vsp1_sru_write(sru, dlb, VI6_SRU_CTRL0, ctrl0);
+ vsp1_sru_write(sru, dlb, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+ vsp1_sru_write(sru, dlb, VI6_SRU_CTRL2, param->ctrl2);
+}
+
+static unsigned int sru_max_width(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_sru *sru = to_sru(&entity->subdev);
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SOURCE);
+
+ /*
+ * The maximum input width of the SRU is 288 input pixels, but 32
+ * pixels are reserved to support overlapping partition windows when
+ * scaling.
+ */
+ if (input->width != output->width)
+ return 512;
+ else
+ return 256;
+}
+
+static void sru_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int partition_idx,
+ struct vsp1_partition_window *window)
+{
+ struct vsp1_sru *sru = to_sru(&entity->subdev);
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SOURCE);
+
+ /* Adapt if SRUx2 is enabled. */
+ if (input->width != output->width) {
+ window->width /= 2;
+ window->left /= 2;
+ }
+
+ partition->sru = *window;
+}
+
+static const struct vsp1_entity_operations sru_entity_ops = {
+ .configure_stream = sru_configure_stream,
+ .max_width = sru_max_width,
+ .partition = sru_partition,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_sru *sru;
+ int ret;
+
+ sru = devm_kzalloc(vsp1->dev, sizeof(*sru), GFP_KERNEL);
+ if (sru == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ sru->entity.ops = &sru_entity_ops;
+ sru->entity.type = VSP1_ENTITY_SRU;
+
+ ret = vsp1_entity_init(vsp1, &sru->entity, "sru", 2, &sru_ops,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&sru->ctrls, 1);
+ v4l2_ctrl_new_custom(&sru->ctrls, &sru_intensity_control, NULL);
+
+ sru->intensity = 1;
+
+ sru->entity.subdev.ctrl_handler = &sru->ctrls;
+
+ if (sru->ctrls.error) {
+ dev_err(vsp1->dev, "sru: failed to initialize controls\n");
+ ret = sru->ctrls.error;
+ vsp1_entity_destroy(&sru->entity);
+ return ERR_PTR(ret);
+ }
+
+ return sru;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_sru.h b/drivers/media/platform/renesas/vsp1/vsp1_sru.h
new file mode 100644
index 0000000000..ddb00eadd1
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_sru.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_sru.h -- R-Car VSP1 Super Resolution Unit
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_SRU_H__
+#define __VSP1_SRU_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define SRU_PAD_SINK 0
+#define SRU_PAD_SOURCE 1
+
+struct vsp1_sru {
+ struct vsp1_entity entity;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ unsigned int intensity;
+};
+
+static inline struct vsp1_sru *to_sru(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_sru, entity.subdev);
+}
+
+struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_SRU_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_uds.c b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
new file mode 100644
index 0000000000..1c290cda00
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uds.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_uds.c -- R-Car VSP1 Up and Down Scaler
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_uds.h"
+
+#define UDS_MIN_SIZE 4U
+#define UDS_MAX_SIZE 8190U
+
+#define UDS_MIN_FACTOR 0x0100
+#define UDS_MAX_FACTOR 0xffff
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_uds_write(struct vsp1_uds *uds,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + uds->entity.index * VI6_UDS_OFFSET, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Scaling Computation
+ */
+
+void vsp1_uds_set_alpha(struct vsp1_entity *entity, struct vsp1_dl_body *dlb,
+ unsigned int alpha)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+
+ vsp1_uds_write(uds, dlb, VI6_UDS_ALPVAL,
+ alpha << VI6_UDS_ALPVAL_VAL0_SHIFT);
+}
+
+/*
+ * uds_output_size - Return the output size for an input size and scaling ratio
+ * @input: input size in pixels
+ * @ratio: scaling ratio in U4.12 fixed-point format
+ */
+static unsigned int uds_output_size(unsigned int input, unsigned int ratio)
+{
+ if (ratio > 4096) {
+ /* Down-scaling */
+ unsigned int mp;
+
+ mp = ratio / 4096;
+ mp = mp < 4 ? 1 : (mp < 8 ? 2 : 4);
+
+ return (input - 1) / mp * mp * 4096 / ratio + 1;
+ } else {
+ /* Up-scaling */
+ return (input - 1) * 4096 / ratio + 1;
+ }
+}
+
+/*
+ * uds_output_limits - Return the min and max output sizes for an input size
+ * @input: input size in pixels
+ * @minimum: minimum output size (returned)
+ * @maximum: maximum output size (returned)
+ */
+static void uds_output_limits(unsigned int input,
+ unsigned int *minimum, unsigned int *maximum)
+{
+ *minimum = max(uds_output_size(input, UDS_MAX_FACTOR), UDS_MIN_SIZE);
+ *maximum = min(uds_output_size(input, UDS_MIN_FACTOR), UDS_MAX_SIZE);
+}
+
+/*
+ * uds_passband_width - Return the passband filter width for a scaling ratio
+ * @ratio: scaling ratio in U4.12 fixed-point format
+ */
+static unsigned int uds_passband_width(unsigned int ratio)
+{
+ if (ratio >= 4096) {
+ /* Down-scaling */
+ unsigned int mp;
+
+ mp = ratio / 4096;
+ mp = mp < 4 ? 1 : (mp < 8 ? 2 : 4);
+
+ return 64 * 4096 * mp / ratio;
+ } else {
+ /* Up-scaling */
+ return 64;
+ }
+}
+
+static unsigned int uds_compute_ratio(unsigned int input, unsigned int output)
+{
+ /* TODO: This is an approximation that will need to be refined. */
+ return (input - 1) * 4096 / (output - 1);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ };
+
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes,
+ ARRAY_SIZE(codes));
+}
+
+static int uds_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vsp1_uds *uds = to_uds(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ config = vsp1_entity_get_pad_config(&uds->entity, sd_state,
+ fse->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(&uds->entity, config,
+ UDS_PAD_SINK);
+
+ mutex_lock(&uds->entity.lock);
+
+ if (fse->index || fse->code != format->code) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (fse->pad == UDS_PAD_SINK) {
+ fse->min_width = UDS_MIN_SIZE;
+ fse->max_width = UDS_MAX_SIZE;
+ fse->min_height = UDS_MIN_SIZE;
+ fse->max_height = UDS_MAX_SIZE;
+ } else {
+ uds_output_limits(format->width, &fse->min_width,
+ &fse->max_width);
+ uds_output_limits(format->height, &fse->min_height,
+ &fse->max_height);
+ }
+
+done:
+ mutex_unlock(&uds->entity.lock);
+ return ret;
+}
+
+static void uds_try_format(struct vsp1_uds *uds,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int minimum;
+ unsigned int maximum;
+
+ switch (pad) {
+ case UDS_PAD_SINK:
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->code = MEDIA_BUS_FMT_AYUV8_1X32;
+
+ fmt->width = clamp(fmt->width, UDS_MIN_SIZE, UDS_MAX_SIZE);
+ fmt->height = clamp(fmt->height, UDS_MIN_SIZE, UDS_MAX_SIZE);
+ break;
+
+ case UDS_PAD_SOURCE:
+ /* The UDS scales but can't perform format conversion. */
+ format = vsp1_entity_get_pad_format(&uds->entity, sd_state,
+ UDS_PAD_SINK);
+ fmt->code = format->code;
+
+ uds_output_limits(format->width, &minimum, &maximum);
+ fmt->width = clamp(fmt->width, minimum, maximum);
+ uds_output_limits(format->height, &minimum, &maximum);
+ fmt->height = clamp(fmt->height, minimum, maximum);
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int uds_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_uds *uds = to_uds(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ mutex_lock(&uds->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&uds->entity, sd_state,
+ fmt->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ uds_try_format(uds, config, fmt->pad, &fmt->format);
+
+ format = vsp1_entity_get_pad_format(&uds->entity, config, fmt->pad);
+ *format = fmt->format;
+
+ if (fmt->pad == UDS_PAD_SINK) {
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&uds->entity, config,
+ UDS_PAD_SOURCE);
+ *format = fmt->format;
+
+ uds_try_format(uds, config, UDS_PAD_SOURCE, format);
+ }
+
+done:
+ mutex_unlock(&uds->entity.lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_pad_ops uds_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = uds_enum_mbus_code,
+ .enum_frame_size = uds_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = uds_set_format,
+};
+
+static const struct v4l2_subdev_ops uds_ops = {
+ .pad = &uds_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void uds_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ const struct v4l2_mbus_framefmt *output;
+ const struct v4l2_mbus_framefmt *input;
+ unsigned int hscale;
+ unsigned int vscale;
+ bool multitap;
+
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+
+ hscale = uds_compute_ratio(input->width, output->width);
+ vscale = uds_compute_ratio(input->height, output->height);
+
+ dev_dbg(uds->entity.vsp1->dev, "hscale %u vscale %u\n", hscale, vscale);
+
+ /*
+ * Multi-tap scaling can't be enabled along with alpha scaling when
+ * scaling down with a factor lower than or equal to 1/2 in either
+ * direction.
+ */
+ if (uds->scale_alpha && (hscale >= 8192 || vscale >= 8192))
+ multitap = false;
+ else
+ multitap = true;
+
+ vsp1_uds_write(uds, dlb, VI6_UDS_CTRL,
+ (uds->scale_alpha ? VI6_UDS_CTRL_AON : 0) |
+ (multitap ? VI6_UDS_CTRL_BC : 0));
+
+ vsp1_uds_write(uds, dlb, VI6_UDS_PASS_BWIDTH,
+ (uds_passband_width(hscale)
+ << VI6_UDS_PASS_BWIDTH_H_SHIFT) |
+ (uds_passband_width(vscale)
+ << VI6_UDS_PASS_BWIDTH_V_SHIFT));
+
+ /* Set the scaling ratios. */
+ vsp1_uds_write(uds, dlb, VI6_UDS_SCALE,
+ (hscale << VI6_UDS_SCALE_HFRAC_SHIFT) |
+ (vscale << VI6_UDS_SCALE_VFRAC_SHIFT));
+}
+
+static void uds_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ struct vsp1_partition *partition = pipe->partition;
+ const struct v4l2_mbus_framefmt *output;
+
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+
+ /* Input size clipping. */
+ vsp1_uds_write(uds, dlb, VI6_UDS_HSZCLIP, VI6_UDS_HSZCLIP_HCEN |
+ (0 << VI6_UDS_HSZCLIP_HCL_OFST_SHIFT) |
+ (partition->uds_sink.width
+ << VI6_UDS_HSZCLIP_HCL_SIZE_SHIFT));
+
+ /* Output size clipping. */
+ vsp1_uds_write(uds, dlb, VI6_UDS_CLIP_SIZE,
+ (partition->uds_source.width
+ << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
+ (output->height
+ << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
+}
+
+static unsigned int uds_max_width(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ const struct v4l2_mbus_framefmt *output;
+ const struct v4l2_mbus_framefmt *input;
+ unsigned int hscale;
+
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+ hscale = output->width / input->width;
+
+ /*
+ * The maximum width of the UDS is 304 pixels. These are input pixels
+ * in the event of up-scaling, and output pixels in the event of
+ * downscaling.
+ *
+ * To support overlapping partition windows we clamp at units of 256 and
+ * the remaining pixels are reserved.
+ */
+ if (hscale <= 2)
+ return 256;
+ else if (hscale <= 4)
+ return 512;
+ else if (hscale <= 8)
+ return 1024;
+ else
+ return 2048;
+}
+
+/* -----------------------------------------------------------------------------
+ * Partition Algorithm Support
+ */
+
+static void uds_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int partition_idx,
+ struct vsp1_partition_window *window)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ const struct v4l2_mbus_framefmt *output;
+ const struct v4l2_mbus_framefmt *input;
+
+ /* Initialise the partition state. */
+ partition->uds_sink = *window;
+ partition->uds_source = *window;
+
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+
+ partition->uds_sink.width = window->width * input->width
+ / output->width;
+ partition->uds_sink.left = window->left * input->width
+ / output->width;
+
+ *window = partition->uds_sink;
+}
+
+static const struct vsp1_entity_operations uds_entity_ops = {
+ .configure_stream = uds_configure_stream,
+ .configure_partition = uds_configure_partition,
+ .max_width = uds_max_width,
+ .partition = uds_partition,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_uds *uds;
+ char name[6];
+ int ret;
+
+ uds = devm_kzalloc(vsp1->dev, sizeof(*uds), GFP_KERNEL);
+ if (uds == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ uds->entity.ops = &uds_entity_ops;
+ uds->entity.type = VSP1_ENTITY_UDS;
+ uds->entity.index = index;
+
+ sprintf(name, "uds.%u", index);
+ ret = vsp1_entity_init(vsp1, &uds->entity, name, 2, &uds_ops,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return uds;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_uds.h b/drivers/media/platform/renesas/vsp1/vsp1_uds.h
new file mode 100644
index 0000000000..c34f95a666
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uds.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_uds.h -- R-Car VSP1 Up and Down Scaler
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_UDS_H__
+#define __VSP1_UDS_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define UDS_PAD_SINK 0
+#define UDS_PAD_SOURCE 1
+
+struct vsp1_uds {
+ struct vsp1_entity entity;
+ bool scale_alpha;
+};
+
+static inline struct vsp1_uds *to_uds(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_uds, entity.subdev);
+}
+
+struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index);
+
+void vsp1_uds_set_alpha(struct vsp1_entity *uds, struct vsp1_dl_body *dlb,
+ unsigned int alpha);
+
+#endif /* __VSP1_UDS_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_uif.c b/drivers/media/platform/renesas/vsp1/vsp1_uif.c
new file mode 100644
index 0000000000..83d7f17df8
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uif.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_uif.c -- R-Car VSP1 User Logic Interface
+ *
+ * Copyright (C) 2017-2018 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/sys_soc.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_entity.h"
+#include "vsp1_uif.h"
+
+#define UIF_MIN_SIZE 4U
+#define UIF_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_uif_read(struct vsp1_uif *uif, u32 reg)
+{
+ return vsp1_read(uif->entity.vsp1,
+ uif->entity.index * VI6_UIF_OFFSET + reg);
+}
+
+static inline void vsp1_uif_write(struct vsp1_uif *uif,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + uif->entity.index * VI6_UIF_OFFSET, data);
+}
+
+u32 vsp1_uif_get_crc(struct vsp1_uif *uif)
+{
+ return vsp1_uif_read(uif, VI6_UIF_DISCOM_DOCMCCRCR);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static const unsigned int uif_codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+};
+
+static int uif_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, uif_codes,
+ ARRAY_SIZE(uif_codes));
+}
+
+static int uif_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, sd_state, fse,
+ UIF_MIN_SIZE,
+ UIF_MIN_SIZE, UIF_MAX_SIZE,
+ UIF_MAX_SIZE);
+}
+
+static int uif_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, uif_codes,
+ ARRAY_SIZE(uif_codes),
+ UIF_MIN_SIZE, UIF_MIN_SIZE,
+ UIF_MAX_SIZE, UIF_MAX_SIZE);
+}
+
+static int uif_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_uif *uif = to_uif(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ int ret = 0;
+
+ if (sel->pad != UIF_PAD_SINK)
+ return -EINVAL;
+
+ mutex_lock(&uif->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&uif->entity, sd_state,
+ sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ format = vsp1_entity_get_pad_format(&uif->entity, config,
+ UIF_PAD_SINK);
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = format->width;
+ sel->r.height = format->height;
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *vsp1_entity_get_pad_selection(&uif->entity, config,
+ sel->pad, sel->target);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ mutex_unlock(&uif->entity.lock);
+ return ret;
+}
+
+static int uif_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct vsp1_uif *uif = to_uif(subdev);
+ struct v4l2_subdev_state *config;
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *selection;
+ int ret = 0;
+
+ if (sel->pad != UIF_PAD_SINK ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ mutex_lock(&uif->entity.lock);
+
+ config = vsp1_entity_get_pad_config(&uif->entity, sd_state,
+ sel->which);
+ if (!config) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* The crop rectangle must be inside the input frame. */
+ format = vsp1_entity_get_pad_format(&uif->entity, config, UIF_PAD_SINK);
+
+ sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
+ sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
+ sel->r.width = clamp_t(unsigned int, sel->r.width, UIF_MIN_SIZE,
+ format->width - sel->r.left);
+ sel->r.height = clamp_t(unsigned int, sel->r.height, UIF_MIN_SIZE,
+ format->height - sel->r.top);
+
+ /* Store the crop rectangle. */
+ selection = vsp1_entity_get_pad_selection(&uif->entity, config,
+ sel->pad, V4L2_SEL_TGT_CROP);
+ *selection = sel->r;
+
+done:
+ mutex_unlock(&uif->entity.lock);
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_pad_ops uif_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = uif_enum_mbus_code,
+ .enum_frame_size = uif_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = uif_set_format,
+ .get_selection = uif_get_selection,
+ .set_selection = uif_set_selection,
+};
+
+static const struct v4l2_subdev_ops uif_ops = {
+ .pad = &uif_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void uif_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_uif *uif = to_uif(&entity->subdev);
+ const struct v4l2_rect *crop;
+ unsigned int left;
+ unsigned int width;
+
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMPMR,
+ VI6_UIF_DISCOM_DOCMPMR_SEL(9));
+
+ crop = vsp1_entity_get_pad_selection(entity, entity->config,
+ UIF_PAD_SINK, V4L2_SEL_TGT_CROP);
+
+ left = crop->left;
+ width = crop->width;
+
+ /* On M3-W the horizontal coordinates are twice the register value. */
+ if (uif->m3w_quirk) {
+ left /= 2;
+ width /= 2;
+ }
+
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSPXR, left);
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSPYR, crop->top);
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSZXR, width);
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSZYR, crop->height);
+
+ vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMCR,
+ VI6_UIF_DISCOM_DOCMCR_CMPR);
+}
+
+static const struct vsp1_entity_operations uif_entity_ops = {
+ .configure_stream = uif_configure_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+static const struct soc_device_attribute vsp1_r8a7796[] = {
+ { .soc_id = "r8a7796" },
+ { /* sentinel */ }
+};
+
+struct vsp1_uif *vsp1_uif_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_uif *uif;
+ char name[6];
+ int ret;
+
+ uif = devm_kzalloc(vsp1->dev, sizeof(*uif), GFP_KERNEL);
+ if (!uif)
+ return ERR_PTR(-ENOMEM);
+
+ if (soc_device_match(vsp1_r8a7796))
+ uif->m3w_quirk = true;
+
+ uif->entity.ops = &uif_entity_ops;
+ uif->entity.type = VSP1_ENTITY_UIF;
+ uif->entity.index = index;
+
+ /* The datasheet names the two UIF instances UIF4 and UIF5. */
+ sprintf(name, "uif.%u", index + 4);
+ ret = vsp1_entity_init(vsp1, &uif->entity, name, 2, &uif_ops,
+ MEDIA_ENT_F_PROC_VIDEO_STATISTICS);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return uif;
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_uif.h b/drivers/media/platform/renesas/vsp1/vsp1_uif.h
new file mode 100644
index 0000000000..c71ab5f6a6
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_uif.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_uif.h -- R-Car VSP1 User Logic Interface
+ *
+ * Copyright (C) 2017-2018 Laurent Pinchart
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_UIF_H__
+#define __VSP1_UIF_H__
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define UIF_PAD_SINK 0
+#define UIF_PAD_SOURCE 1
+
+struct vsp1_uif {
+ struct vsp1_entity entity;
+ bool m3w_quirk;
+};
+
+static inline struct vsp1_uif *to_uif(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_uif, entity.subdev);
+}
+
+struct vsp1_uif *vsp1_uif_create(struct vsp1_device *vsp1, unsigned int index);
+u32 vsp1_uif_get_crc(struct vsp1_uif *uif);
+
+#endif /* __VSP1_UIF_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
new file mode 100644
index 0000000000..e9d5027761
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -0,0 +1,1327 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_video.c -- R-Car VSP1 Video Node
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vsp1.h"
+#include "vsp1_brx.h"
+#include "vsp1_dl.h"
+#include "vsp1_entity.h"
+#include "vsp1_hgo.h"
+#include "vsp1_hgt.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_uds.h"
+#include "vsp1_video.h"
+
+#define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
+#define VSP1_VIDEO_DEF_WIDTH 1024
+#define VSP1_VIDEO_DEF_HEIGHT 768
+
+#define VSP1_VIDEO_MAX_WIDTH 8190U
+#define VSP1_VIDEO_MAX_HEIGHT 8190U
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static struct v4l2_subdev *
+vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_pad_remote_pad_first(local);
+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int vsp1_video_verify_format(struct vsp1_video *video)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
+ video->rwpf->format.height != fmt.format.height ||
+ video->rwpf->format.width != fmt.format.width)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __vsp1_video_try_format(struct vsp1_video *video,
+ struct v4l2_pix_format_mplane *pix,
+ const struct vsp1_format_info **fmtinfo)
+{
+ static const u32 xrgb_formats[][2] = {
+ { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 },
+ { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 },
+ { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 },
+ { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 },
+ };
+
+ const struct vsp1_format_info *info;
+ unsigned int width = pix->width;
+ unsigned int height = pix->height;
+ unsigned int i;
+
+ /*
+ * Backward compatibility: replace deprecated RGB formats by their XRGB
+ * equivalent. This selects the format older userspace applications want
+ * while still exposing the new format.
+ */
+ for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) {
+ if (xrgb_formats[i][0] == pix->pixelformat) {
+ pix->pixelformat = xrgb_formats[i][1];
+ break;
+ }
+ }
+
+ /*
+ * Retrieve format information and select the default format if the
+ * requested format isn't supported.
+ */
+ info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
+ if (info == NULL)
+ info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
+
+ pix->pixelformat = info->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->field = V4L2_FIELD_NONE;
+
+ if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
+ info->fourcc == V4L2_PIX_FMT_HSV32)
+ pix->hsv_enc = V4L2_HSV_ENC_256;
+
+ memset(pix->reserved, 0, sizeof(pix->reserved));
+
+ /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
+ width = round_down(width, info->hsub);
+ height = round_down(height, info->vsub);
+
+ /* Clamp the width and height. */
+ pix->width = clamp(width, info->hsub, VSP1_VIDEO_MAX_WIDTH);
+ pix->height = clamp(height, info->vsub, VSP1_VIDEO_MAX_HEIGHT);
+
+ /*
+ * Compute and clamp the stride and image size. While not documented in
+ * the datasheet, strides not aligned to a multiple of 128 bytes result
+ * in image corruption.
+ */
+ for (i = 0; i < min(info->planes, 2U); ++i) {
+ unsigned int hsub = i > 0 ? info->hsub : 1;
+ unsigned int vsub = i > 0 ? info->vsub : 1;
+ unsigned int align = 128;
+ unsigned int bpl;
+
+ bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
+ pix->width / hsub * info->bpp[i] / 8,
+ round_down(65535U, align));
+
+ pix->plane_fmt[i].bytesperline = round_up(bpl, align);
+ pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
+ * pix->height / vsub;
+ }
+
+ if (info->planes == 3) {
+ /* The second and third planes must have the same stride. */
+ pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
+ pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
+ }
+
+ pix->num_planes = info->planes;
+
+ if (fmtinfo)
+ *fmtinfo = info;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Partition Algorithm support
+ */
+
+/**
+ * vsp1_video_calculate_partition - Calculate the active partition output window
+ *
+ * @pipe: the pipeline
+ * @partition: partition that will hold the calculated values
+ * @div_size: pre-determined maximum partition division size
+ * @index: partition index
+ */
+static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int div_size,
+ unsigned int index)
+{
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_partition_window window;
+ unsigned int modulus;
+
+ /*
+ * Partitions are computed on the size before rotation, use the format
+ * at the WPF sink.
+ */
+ format = vsp1_entity_get_pad_format(&pipe->output->entity,
+ pipe->output->entity.config,
+ RWPF_PAD_SINK);
+
+ /* A single partition simply processes the output size in full. */
+ if (pipe->partitions <= 1) {
+ window.left = 0;
+ window.width = format->width;
+
+ vsp1_pipeline_propagate_partition(pipe, partition, index,
+ &window);
+ return;
+ }
+
+ /* Initialise the partition with sane starting conditions. */
+ window.left = index * div_size;
+ window.width = div_size;
+
+ modulus = format->width % div_size;
+
+ /*
+ * We need to prevent the last partition from being smaller than the
+ * *minimum* width of the hardware capabilities.
+ *
+ * If the modulus is less than half of the partition size,
+ * the penultimate partition is reduced to half, which is added
+ * to the final partition: |1234|1234|1234|12|341|
+ * to prevent this: |1234|1234|1234|1234|1|.
+ */
+ if (modulus) {
+ /*
+ * pipe->partitions is 1 based, whilst index is a 0 based index.
+ * Normalise this locally.
+ */
+ unsigned int partitions = pipe->partitions - 1;
+
+ if (modulus < div_size / 2) {
+ if (index == partitions - 1) {
+ /* Halve the penultimate partition. */
+ window.width = div_size / 2;
+ } else if (index == partitions) {
+ /* Increase the final partition. */
+ window.width = (div_size / 2) + modulus;
+ window.left -= div_size / 2;
+ }
+ } else if (index == partitions) {
+ window.width = modulus;
+ }
+ }
+
+ vsp1_pipeline_propagate_partition(pipe, partition, index, &window);
+}
+
+static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_entity *entity;
+ unsigned int div_size;
+ unsigned int i;
+
+ /*
+ * Partitions are computed on the size before rotation, use the format
+ * at the WPF sink.
+ */
+ format = vsp1_entity_get_pad_format(&pipe->output->entity,
+ pipe->output->entity.config,
+ RWPF_PAD_SINK);
+ div_size = format->width;
+
+ /*
+ * Only Gen3+ hardware requires image partitioning, Gen2 will operate
+ * with a single partition that covers the whole output.
+ */
+ if (vsp1->info->gen >= 3) {
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ unsigned int entity_max;
+
+ if (!entity->ops->max_width)
+ continue;
+
+ entity_max = entity->ops->max_width(entity, pipe);
+ if (entity_max)
+ div_size = min(div_size, entity_max);
+ }
+ }
+
+ pipe->partitions = DIV_ROUND_UP(format->width, div_size);
+ pipe->part_table = kcalloc(pipe->partitions, sizeof(*pipe->part_table),
+ GFP_KERNEL);
+ if (!pipe->part_table)
+ return -ENOMEM;
+
+ for (i = 0; i < pipe->partitions; ++i)
+ vsp1_video_calculate_partition(pipe, &pipe->part_table[i],
+ div_size, i);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Management
+ */
+
+/*
+ * vsp1_video_complete_buffer - Complete the current buffer
+ * @video: the video node
+ *
+ * This function completes the current buffer by filling its sequence number,
+ * time stamp and payload size, and hands it back to the vb2 core.
+ *
+ * Return the next queued buffer or NULL if the queue is empty.
+ */
+static struct vsp1_vb2_buffer *
+vsp1_video_complete_buffer(struct vsp1_video *video)
+{
+ struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
+ struct vsp1_vb2_buffer *next = NULL;
+ struct vsp1_vb2_buffer *done;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+
+ if (list_empty(&video->irqqueue)) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return NULL;
+ }
+
+ done = list_first_entry(&video->irqqueue,
+ struct vsp1_vb2_buffer, queue);
+
+ list_del(&done->queue);
+
+ if (!list_empty(&video->irqqueue))
+ next = list_first_entry(&video->irqqueue,
+ struct vsp1_vb2_buffer, queue);
+
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ done->buf.sequence = pipe->sequence;
+ done->buf.vb2_buf.timestamp = ktime_get_ns();
+ for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
+ vb2_set_plane_payload(&done->buf.vb2_buf, i,
+ vb2_plane_size(&done->buf.vb2_buf, i));
+ vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
+
+ return next;
+}
+
+static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
+ struct vsp1_rwpf *rwpf)
+{
+ struct vsp1_video *video = rwpf->video;
+ struct vsp1_vb2_buffer *buf;
+
+ buf = vsp1_video_complete_buffer(video);
+ if (buf == NULL)
+ return;
+
+ video->rwpf->mem = buf->mem;
+ pipe->buffers_ready |= 1 << video->pipe_index;
+}
+
+static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ unsigned int partition)
+{
+ struct vsp1_dl_body *dlb = vsp1_dl_list_get_body0(dl);
+ struct vsp1_entity *entity;
+
+ pipe->partition = &pipe->part_table[partition];
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe)
+ vsp1_entity_configure_partition(entity, pipe, dl, dlb);
+}
+
+static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ struct vsp1_entity *entity;
+ struct vsp1_dl_body *dlb;
+ struct vsp1_dl_list *dl;
+ unsigned int partition;
+
+ dl = vsp1_dl_list_get(pipe->output->dlm);
+
+ /*
+ * If the VSP hardware isn't configured yet (which occurs either when
+ * processing the first frame or after a system suspend/resume), add the
+ * cached stream configuration to the display list to perform a full
+ * initialisation.
+ */
+ if (!pipe->configured)
+ vsp1_dl_list_add_body(dl, pipe->stream_config);
+
+ dlb = vsp1_dl_list_get_body0(dl);
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe)
+ vsp1_entity_configure_frame(entity, pipe, dl, dlb);
+
+ /* Run the first partition. */
+ vsp1_video_pipeline_run_partition(pipe, dl, 0);
+
+ /* Process consecutive partitions as necessary. */
+ for (partition = 1; partition < pipe->partitions; ++partition) {
+ struct vsp1_dl_list *dl_next;
+
+ dl_next = vsp1_dl_list_get(pipe->output->dlm);
+
+ /*
+ * An incomplete chain will still function, but output only
+ * the partitions that had a dl available. The frame end
+ * interrupt will be marked on the last dl in the chain.
+ */
+ if (!dl_next) {
+ dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
+ break;
+ }
+
+ vsp1_video_pipeline_run_partition(pipe, dl_next, partition);
+ vsp1_dl_list_add_chain(dl, dl_next);
+ }
+
+ /* Complete, and commit the head display list. */
+ vsp1_dl_list_commit(dl, 0);
+ pipe->configured = true;
+
+ vsp1_pipeline_run(pipe);
+}
+
+static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe,
+ unsigned int completion)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ enum vsp1_pipeline_state state;
+ unsigned long flags;
+ unsigned int i;
+
+ /* M2M Pipelines should never call here with an incomplete frame. */
+ WARN_ON_ONCE(!(completion & VSP1_DL_FRAME_END_COMPLETED));
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ /* Complete buffers on all video nodes. */
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ if (!pipe->inputs[i])
+ continue;
+
+ vsp1_video_frame_end(pipe, pipe->inputs[i]);
+ }
+
+ vsp1_video_frame_end(pipe, pipe->output);
+
+ state = pipe->state;
+ pipe->state = VSP1_PIPELINE_STOPPED;
+
+ /*
+ * If a stop has been requested, mark the pipeline as stopped and
+ * return. Otherwise restart the pipeline if ready.
+ */
+ if (state == VSP1_PIPELINE_STOPPING)
+ wake_up(&pipe->wq);
+ else if (vsp1_pipeline_ready(pipe))
+ vsp1_video_pipeline_run(pipe);
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
+ struct vsp1_rwpf *input,
+ struct vsp1_rwpf *output)
+{
+ struct media_entity_enum ent_enum;
+ struct vsp1_entity *entity;
+ struct media_pad *pad;
+ struct vsp1_brx *brx = NULL;
+ int ret;
+
+ ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The main data path doesn't include the HGO or HGT, use
+ * vsp1_entity_remote_pad() to traverse the graph.
+ */
+
+ pad = vsp1_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
+
+ while (1) {
+ if (pad == NULL) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /* We've reached a video node, that shouldn't have happened. */
+ if (!is_media_entity_v4l2_subdev(pad->entity)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ entity = to_vsp1_entity(
+ media_entity_to_v4l2_subdev(pad->entity));
+
+ /*
+ * A BRU or BRS is present in the pipeline, store its input pad
+ * number in the input RPF for use when configuring the RPF.
+ */
+ if (entity->type == VSP1_ENTITY_BRU ||
+ entity->type == VSP1_ENTITY_BRS) {
+ /* BRU and BRS can't be chained. */
+ if (brx) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ brx = to_brx(&entity->subdev);
+ brx->inputs[pad->index].rpf = input;
+ input->brx_input = pad->index;
+ }
+
+ /* We've reached the WPF, we're done. */
+ if (entity->type == VSP1_ENTITY_WPF)
+ break;
+
+ /* Ensure the branch has no loop. */
+ if (media_entity_enum_test_and_set(&ent_enum,
+ &entity->subdev.entity)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /* UDS can't be chained. */
+ if (entity->type == VSP1_ENTITY_UDS) {
+ if (pipe->uds) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ pipe->uds = entity;
+ pipe->uds_input = brx ? &brx->entity : &input->entity;
+ }
+
+ /* Follow the source link, ignoring any HGO or HGT. */
+ pad = &entity->pads[entity->source_pad];
+ pad = vsp1_entity_remote_pad(pad);
+ }
+
+ /* The last entity must be the output WPF. */
+ if (entity != &output->entity)
+ ret = -EPIPE;
+
+out:
+ media_entity_enum_cleanup(&ent_enum);
+
+ return ret;
+}
+
+static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
+{
+ struct media_graph graph;
+ struct media_entity *entity = &video->video.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ unsigned int i;
+ int ret;
+
+ /* Walk the graph to locate the entities and video nodes. */
+ ret = media_graph_walk_init(&graph, mdev);
+ if (ret)
+ return ret;
+
+ media_graph_walk_start(&graph, entity);
+
+ while ((entity = media_graph_walk_next(&graph))) {
+ struct v4l2_subdev *subdev;
+ struct vsp1_rwpf *rwpf;
+ struct vsp1_entity *e;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ subdev = media_entity_to_v4l2_subdev(entity);
+ e = to_vsp1_entity(subdev);
+ list_add_tail(&e->list_pipe, &pipe->entities);
+ e->pipe = pipe;
+
+ switch (e->type) {
+ case VSP1_ENTITY_RPF:
+ rwpf = to_rwpf(subdev);
+ pipe->inputs[rwpf->entity.index] = rwpf;
+ rwpf->video->pipe_index = ++pipe->num_inputs;
+ break;
+
+ case VSP1_ENTITY_WPF:
+ rwpf = to_rwpf(subdev);
+ pipe->output = rwpf;
+ rwpf->video->pipe_index = 0;
+ break;
+
+ case VSP1_ENTITY_LIF:
+ pipe->lif = e;
+ break;
+
+ case VSP1_ENTITY_BRU:
+ case VSP1_ENTITY_BRS:
+ pipe->brx = e;
+ break;
+
+ case VSP1_ENTITY_HGO:
+ pipe->hgo = e;
+ break;
+
+ case VSP1_ENTITY_HGT:
+ pipe->hgt = e;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ media_graph_walk_cleanup(&graph);
+
+ /* We need one output and at least one input. */
+ if (pipe->num_inputs == 0 || !pipe->output)
+ return -EPIPE;
+
+ /*
+ * Follow links downstream for each input and make sure the graph
+ * contains no loop and that all branches end at the output WPF.
+ */
+ for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
+ if (!pipe->inputs[i])
+ continue;
+
+ ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
+ pipe->output);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
+{
+ vsp1_pipeline_init(pipe);
+
+ pipe->frame_end = vsp1_video_pipeline_frame_end;
+
+ return vsp1_video_pipeline_build(pipe, video);
+}
+
+static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
+{
+ struct vsp1_pipeline *pipe;
+ int ret;
+
+ /*
+ * Get a pipeline object for the video node. If a pipeline has already
+ * been allocated just increment its reference count and return it.
+ * Otherwise allocate a new pipeline and initialize it, it will be freed
+ * when the last reference is released.
+ */
+ if (!video->rwpf->entity.pipe) {
+ pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+ if (!pipe)
+ return ERR_PTR(-ENOMEM);
+
+ ret = vsp1_video_pipeline_init(pipe, video);
+ if (ret < 0) {
+ vsp1_pipeline_reset(pipe);
+ kfree(pipe);
+ return ERR_PTR(ret);
+ }
+ } else {
+ pipe = video->rwpf->entity.pipe;
+ kref_get(&pipe->kref);
+ }
+
+ return pipe;
+}
+
+static void vsp1_video_pipeline_release(struct kref *kref)
+{
+ struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
+
+ vsp1_pipeline_reset(pipe);
+ kfree(pipe);
+}
+
+static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
+{
+ struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
+
+ mutex_lock(&mdev->graph_mutex);
+ kref_put(&pipe->kref, vsp1_video_pipeline_release);
+ mutex_unlock(&mdev->graph_mutex);
+}
+
+/* -----------------------------------------------------------------------------
+ * videobuf2 Queue Operations
+ */
+
+static int
+vsp1_video_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vq);
+ const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
+ unsigned int i;
+
+ if (*nplanes) {
+ if (*nplanes != format->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; i++)
+ if (sizes[i] < format->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *nplanes = format->num_planes;
+
+ for (i = 0; i < format->num_planes; ++i)
+ sizes[i] = format->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
+ const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
+ unsigned int i;
+
+ if (vb->num_planes < format->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < vb->num_planes; ++i) {
+ buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+
+ for ( ; i < 3; ++i)
+ buf->mem.addr[i] = 0;
+
+ return 0;
+}
+
+static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
+ struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
+ unsigned long flags;
+ bool empty;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+ empty = list_empty(&video->irqqueue);
+ list_add_tail(&buf->queue, &video->irqqueue);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ if (!empty)
+ return;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ video->rwpf->mem = buf->mem;
+ pipe->buffers_ready |= 1 << video->pipe_index;
+
+ if (vb2_start_streaming_called(&video->queue) &&
+ vsp1_pipeline_ready(pipe))
+ vsp1_video_pipeline_run(pipe);
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_entity *entity;
+ int ret;
+
+ /* Determine this pipelines sizes for image partitioning support. */
+ ret = vsp1_video_pipeline_setup_partitions(pipe);
+ if (ret < 0)
+ return ret;
+
+ if (pipe->uds) {
+ struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
+
+ /*
+ * If a BRU or BRS is present in the pipeline before the UDS,
+ * the alpha component doesn't need to be scaled as the BRU and
+ * BRS output alpha value is fixed to 255. Otherwise we need to
+ * scale the alpha component only when available at the input
+ * RPF.
+ */
+ if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
+ pipe->uds_input->type == VSP1_ENTITY_BRS) {
+ uds->scale_alpha = false;
+ } else {
+ struct vsp1_rwpf *rpf =
+ to_rwpf(&pipe->uds_input->subdev);
+
+ uds->scale_alpha = rpf->fmtinfo->alpha;
+ }
+ }
+
+ /*
+ * Compute and cache the stream configuration into a body. The cached
+ * body will be added to the display list by vsp1_video_pipeline_run()
+ * whenever the pipeline needs to be fully reconfigured.
+ */
+ pipe->stream_config = vsp1_dlm_dl_body_get(pipe->output->dlm);
+ if (!pipe->stream_config)
+ return -ENOMEM;
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ vsp1_entity_route_setup(entity, pipe, pipe->stream_config);
+ vsp1_entity_configure_stream(entity, pipe, NULL,
+ pipe->stream_config);
+ }
+
+ return 0;
+}
+
+static void vsp1_video_release_buffers(struct vsp1_video *video)
+{
+ struct vsp1_vb2_buffer *buffer;
+ unsigned long flags;
+
+ /* Remove all buffers from the IRQ queue. */
+ spin_lock_irqsave(&video->irqlock, flags);
+ list_for_each_entry(buffer, &video->irqqueue, queue)
+ vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&video->irqqueue);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+}
+
+static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
+{
+ lockdep_assert_held(&pipe->lock);
+
+ /* Release any cached configuration from our output video. */
+ vsp1_dl_body_put(pipe->stream_config);
+ pipe->stream_config = NULL;
+ pipe->configured = false;
+
+ /* Release our partition table allocation. */
+ kfree(pipe->part_table);
+ pipe->part_table = NULL;
+}
+
+static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vq);
+ struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
+ bool start_pipeline = false;
+ unsigned long flags;
+ int ret;
+
+ mutex_lock(&pipe->lock);
+ if (pipe->stream_count == pipe->num_inputs) {
+ ret = vsp1_video_setup_pipeline(pipe);
+ if (ret < 0) {
+ vsp1_video_release_buffers(video);
+ vsp1_video_cleanup_pipeline(pipe);
+ mutex_unlock(&pipe->lock);
+ return ret;
+ }
+
+ start_pipeline = true;
+ }
+
+ pipe->stream_count++;
+ mutex_unlock(&pipe->lock);
+
+ /*
+ * vsp1_pipeline_ready() is not sufficient to establish that all streams
+ * are prepared and the pipeline is configured, as multiple streams
+ * can race through streamon with buffers already queued; Therefore we
+ * don't even attempt to start the pipeline until the last stream has
+ * called through here.
+ */
+ if (!start_pipeline)
+ return 0;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (vsp1_pipeline_ready(pipe))
+ vsp1_video_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ return 0;
+}
+
+static void vsp1_video_stop_streaming(struct vb2_queue *vq)
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vq);
+ struct vsp1_pipeline *pipe = video->rwpf->entity.pipe;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * Clear the buffers ready flag to make sure the device won't be started
+ * by a QBUF on the video node on the other side of the pipeline.
+ */
+ spin_lock_irqsave(&video->irqlock, flags);
+ pipe->buffers_ready &= ~(1 << video->pipe_index);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ mutex_lock(&pipe->lock);
+ if (--pipe->stream_count == pipe->num_inputs) {
+ /* Stop the pipeline. */
+ ret = vsp1_pipeline_stop(pipe);
+ if (ret == -ETIMEDOUT)
+ dev_err(video->vsp1->dev, "pipeline stop timeout\n");
+
+ vsp1_video_cleanup_pipeline(pipe);
+ }
+ mutex_unlock(&pipe->lock);
+
+ video_device_pipeline_stop(&video->video);
+ vsp1_video_release_buffers(video);
+ vsp1_video_pipeline_put(pipe);
+}
+
+static const struct vb2_ops vsp1_video_queue_qops = {
+ .queue_setup = vsp1_video_queue_setup,
+ .buf_prepare = vsp1_video_buffer_prepare,
+ .buf_queue = vsp1_video_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vsp1_video_start_streaming,
+ .stop_streaming = vsp1_video_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+
+ strscpy(cap->driver, "vsp1", sizeof(cap->driver));
+ strscpy(cap->card, video->video.name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int
+vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+
+ if (format->type != video->queue.type)
+ return -EINVAL;
+
+ mutex_lock(&video->lock);
+ format->fmt.pix_mp = video->rwpf->format;
+ mutex_unlock(&video->lock);
+
+ return 0;
+}
+
+static int
+vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+
+ if (format->type != video->queue.type)
+ return -EINVAL;
+
+ return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
+}
+
+static int
+vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+ const struct vsp1_format_info *info;
+ int ret;
+
+ if (format->type != video->queue.type)
+ return -EINVAL;
+
+ ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&video->lock);
+
+ if (vb2_is_busy(&video->queue)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ video->rwpf->format = format->fmt.pix_mp;
+ video->rwpf->fmtinfo = info;
+
+done:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+static int
+vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+ struct media_device *mdev = &video->vsp1->media_dev;
+ struct vsp1_pipeline *pipe;
+ int ret;
+
+ if (vb2_queue_is_busy(&video->queue, file))
+ return -EBUSY;
+
+ /*
+ * Get a pipeline for the video node and start streaming on it. No link
+ * touching an entity in the pipeline can be activated or deactivated
+ * once streaming is started.
+ */
+ mutex_lock(&mdev->graph_mutex);
+
+ pipe = vsp1_video_pipeline_get(video);
+ if (IS_ERR(pipe)) {
+ mutex_unlock(&mdev->graph_mutex);
+ return PTR_ERR(pipe);
+ }
+
+ ret = __video_device_pipeline_start(&video->video, &pipe->pipe);
+ if (ret < 0) {
+ mutex_unlock(&mdev->graph_mutex);
+ goto err_pipe;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ /*
+ * Verify that the configured format matches the output of the connected
+ * subdev.
+ */
+ ret = vsp1_video_verify_format(video);
+ if (ret < 0)
+ goto err_stop;
+
+ /* Start the queue. */
+ ret = vb2_streamon(&video->queue, type);
+ if (ret < 0)
+ goto err_stop;
+
+ return 0;
+
+err_stop:
+ video_device_pipeline_stop(&video->video);
+err_pipe:
+ vsp1_video_pipeline_put(pipe);
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
+ .vidioc_querycap = vsp1_video_querycap,
+ .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
+ .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
+ .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
+ .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format,
+ .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format,
+ .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vsp1_video_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 File Operations
+ */
+
+static int vsp1_video_open(struct file *file)
+{
+ struct vsp1_video *video = video_drvdata(file);
+ struct v4l2_fh *vfh;
+ int ret = 0;
+
+ vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
+ if (vfh == NULL)
+ return -ENOMEM;
+
+ v4l2_fh_init(vfh, &video->video);
+ v4l2_fh_add(vfh);
+
+ file->private_data = vfh;
+
+ ret = vsp1_device_get(video->vsp1);
+ if (ret < 0) {
+ v4l2_fh_del(vfh);
+ v4l2_fh_exit(vfh);
+ kfree(vfh);
+ }
+
+ return ret;
+}
+
+static int vsp1_video_release(struct file *file)
+{
+ struct vsp1_video *video = video_drvdata(file);
+
+ vb2_fop_release(file);
+
+ vsp1_device_put(video->vsp1);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations vsp1_video_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = vsp1_video_open,
+ .release = vsp1_video_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * Suspend and Resume
+ */
+
+void vsp1_video_suspend(struct vsp1_device *vsp1)
+{
+ unsigned long flags;
+ unsigned int i;
+ int ret;
+
+ /*
+ * To avoid increasing the system suspend time needlessly, loop over the
+ * pipelines twice, first to set them all to the stopping state, and
+ * then to wait for the stop to complete.
+ */
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = wpf->entity.pipe;
+ if (pipe == NULL)
+ continue;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (pipe->state == VSP1_PIPELINE_RUNNING)
+ pipe->state = VSP1_PIPELINE_STOPPING;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+ }
+
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = wpf->entity.pipe;
+ if (pipe == NULL)
+ continue;
+
+ ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe),
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_warn(vsp1->dev, "pipeline %u stop timeout\n",
+ wpf->entity.index);
+ }
+}
+
+void vsp1_video_resume(struct vsp1_device *vsp1)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ /* Resume all running pipelines. */
+ for (i = 0; i < vsp1->info->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = wpf->entity.pipe;
+ if (pipe == NULL)
+ continue;
+
+ /*
+ * The hardware may have been reset during a suspend and will
+ * need a full reconfiguration.
+ */
+ pipe->configured = false;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (vsp1_pipeline_ready(pipe))
+ vsp1_video_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rwpf)
+{
+ struct vsp1_video *video;
+ const char *direction;
+ int ret;
+
+ video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
+ if (!video)
+ return ERR_PTR(-ENOMEM);
+
+ rwpf->video = video;
+
+ video->vsp1 = vsp1;
+ video->rwpf = rwpf;
+
+ if (rwpf->entity.type == VSP1_ENTITY_RPF) {
+ direction = "input";
+ video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ video->video.vfl_dir = VFL_DIR_TX;
+ video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_STREAMING;
+ } else {
+ direction = "output";
+ video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ video->video.vfl_dir = VFL_DIR_RX;
+ video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING;
+ }
+
+ mutex_init(&video->lock);
+ spin_lock_init(&video->irqlock);
+ INIT_LIST_HEAD(&video->irqqueue);
+
+ /* Initialize the media entity... */
+ ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* ... and the format ... */
+ rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
+ rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
+ rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
+ __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
+
+ /* ... and the video node... */
+ video->video.v4l2_dev = &video->vsp1->v4l2_dev;
+ video->video.fops = &vsp1_video_fops;
+ snprintf(video->video.name, sizeof(video->video.name), "%s %s",
+ rwpf->entity.subdev.name, direction);
+ video->video.vfl_type = VFL_TYPE_VIDEO;
+ video->video.release = video_device_release_empty;
+ video->video.ioctl_ops = &vsp1_video_ioctl_ops;
+
+ video_set_drvdata(&video->video, video);
+
+ video->queue.type = video->type;
+ video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ video->queue.lock = &video->lock;
+ video->queue.drv_priv = video;
+ video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
+ video->queue.ops = &vsp1_video_queue_qops;
+ video->queue.mem_ops = &vb2_dma_contig_memops;
+ video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ video->queue.dev = video->vsp1->bus_master;
+ ret = vb2_queue_init(&video->queue);
+ if (ret < 0) {
+ dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
+ goto error;
+ }
+
+ /* ... and register the video device. */
+ video->video.queue = &video->queue;
+ ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(video->vsp1->dev, "failed to register video device\n");
+ goto error;
+ }
+
+ return video;
+
+error:
+ vsp1_video_cleanup(video);
+ return ERR_PTR(ret);
+}
+
+void vsp1_video_cleanup(struct vsp1_video *video)
+{
+ if (video_is_registered(&video->video))
+ video_unregister_device(&video->video);
+
+ media_entity_cleanup(&video->video.entity);
+}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.h b/drivers/media/platform/renesas/vsp1/vsp1_video.h
new file mode 100644
index 0000000000..f3cf5e2fdf
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * vsp1_video.h -- R-Car VSP1 Video Node
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+#ifndef __VSP1_VIDEO_H__
+#define __VSP1_VIDEO_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include <media/videobuf2-v4l2.h>
+
+#include "vsp1_rwpf.h"
+
+struct vsp1_vb2_buffer {
+ struct vb2_v4l2_buffer buf;
+ struct list_head queue;
+ struct vsp1_rwpf_memory mem;
+};
+
+static inline struct vsp1_vb2_buffer *
+to_vsp1_vb2_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ return container_of(vbuf, struct vsp1_vb2_buffer, buf);
+}
+
+struct vsp1_video {
+ struct list_head list;
+ struct vsp1_device *vsp1;
+ struct vsp1_rwpf *rwpf;
+
+ struct video_device video;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+
+ struct mutex lock;
+
+ unsigned int pipe_index;
+
+ struct vb2_queue queue;
+ spinlock_t irqlock;
+ struct list_head irqqueue;
+};
+
+static inline struct vsp1_video *to_vsp1_video(struct video_device *vdev)
+{
+ return container_of(vdev, struct vsp1_video, video);
+}
+
+void vsp1_video_suspend(struct vsp1_device *vsp1);
+void vsp1_video_resume(struct vsp1_device *vsp1);
+
+struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rwpf);
+void vsp1_video_cleanup(struct vsp1_video *video);
+
+#endif /* __VSP1_VIDEO_H__ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
new file mode 100644
index 0000000000..d0074ca009
--- /dev/null
+++ b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * vsp1_wpf.c -- R-Car VSP1 Write Pixel Formatter
+ *
+ * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ */
+
+#include <linux/device.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define WPF_GEN2_MAX_WIDTH 2048U
+#define WPF_GEN2_MAX_HEIGHT 2048U
+#define WPF_GEN3_MAX_WIDTH 8190U
+#define WPF_GEN3_MAX_HEIGHT 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf,
+ struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ vsp1_dl_body_write(dlb, reg + wpf->entity.index * VI6_WPF_OFFSET, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+enum wpf_flip_ctrl {
+ WPF_CTRL_VFLIP = 0,
+ WPF_CTRL_HFLIP = 1,
+};
+
+static int vsp1_wpf_set_rotation(struct vsp1_rwpf *wpf, unsigned int rotation)
+{
+ struct vsp1_video *video = wpf->video;
+ struct v4l2_mbus_framefmt *sink_format;
+ struct v4l2_mbus_framefmt *source_format;
+ bool rotate;
+ int ret = 0;
+
+ /*
+ * Only consider the 0°/180° from/to 90°/270° modifications, the rest
+ * is taken care of by the flipping configuration.
+ */
+ rotate = rotation == 90 || rotation == 270;
+ if (rotate == wpf->flip.rotate)
+ return 0;
+
+ /* Changing rotation isn't allowed when buffers are allocated. */
+ mutex_lock(&video->lock);
+
+ if (vb2_is_busy(&video->queue)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ sink_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SINK);
+ source_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SOURCE);
+
+ mutex_lock(&wpf->entity.lock);
+
+ if (rotate) {
+ source_format->width = sink_format->height;
+ source_format->height = sink_format->width;
+ } else {
+ source_format->width = sink_format->width;
+ source_format->height = sink_format->height;
+ }
+
+ wpf->flip.rotate = rotate;
+
+ mutex_unlock(&wpf->entity.lock);
+
+done:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+static int vsp1_wpf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_rwpf *wpf =
+ container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
+ unsigned int rotation;
+ u32 flip = 0;
+ int ret;
+
+ /* Update the rotation. */
+ rotation = wpf->flip.ctrls.rotate ? wpf->flip.ctrls.rotate->val : 0;
+ ret = vsp1_wpf_set_rotation(wpf, rotation);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Compute the flip value resulting from all three controls, with
+ * rotation by 180° flipping the image in both directions. Store the
+ * result in the pending flip field for the next frame that will be
+ * processed.
+ */
+ if (wpf->flip.ctrls.vflip->val)
+ flip |= BIT(WPF_CTRL_VFLIP);
+
+ if (wpf->flip.ctrls.hflip && wpf->flip.ctrls.hflip->val)
+ flip |= BIT(WPF_CTRL_HFLIP);
+
+ if (rotation == 180 || rotation == 270)
+ flip ^= BIT(WPF_CTRL_VFLIP) | BIT(WPF_CTRL_HFLIP);
+
+ spin_lock_irq(&wpf->flip.lock);
+ wpf->flip.pending = flip;
+ spin_unlock_irq(&wpf->flip.lock);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vsp1_wpf_ctrl_ops = {
+ .s_ctrl = vsp1_wpf_s_ctrl,
+};
+
+static int wpf_init_controls(struct vsp1_rwpf *wpf)
+{
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+ unsigned int num_flip_ctrls;
+
+ spin_lock_init(&wpf->flip.lock);
+
+ if (wpf->entity.index != 0) {
+ /* Only WPF0 supports flipping. */
+ num_flip_ctrls = 0;
+ } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP)) {
+ /*
+ * When horizontal flip is supported the WPF implements three
+ * controls (horizontal flip, vertical flip and rotation).
+ */
+ num_flip_ctrls = 3;
+ } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_VFLIP)) {
+ /*
+ * When only vertical flip is supported the WPF implements a
+ * single control (vertical flip).
+ */
+ num_flip_ctrls = 1;
+ } else {
+ /* Otherwise flipping is not supported. */
+ num_flip_ctrls = 0;
+ }
+
+ vsp1_rwpf_init_ctrls(wpf, num_flip_ctrls);
+
+ if (num_flip_ctrls >= 1) {
+ wpf->flip.ctrls.vflip =
+ v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ }
+
+ if (num_flip_ctrls == 3) {
+ wpf->flip.ctrls.hflip =
+ v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ wpf->flip.ctrls.rotate =
+ v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+ v4l2_ctrl_cluster(3, &wpf->flip.ctrls.vflip);
+ }
+
+ if (wpf->ctrls.error) {
+ dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
+ wpf->entity.index);
+ return wpf->ctrls.error;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(subdev);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+
+ if (enable)
+ return 0;
+
+ /*
+ * Write to registers directly when stopping the stream as there will be
+ * no pipeline run to apply the display list.
+ */
+ vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
+ vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
+ VI6_WPF_SRCRPF, 0);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_video_ops wpf_video_ops = {
+ .s_stream = wpf_s_stream,
+};
+
+static const struct v4l2_subdev_ops wpf_ops = {
+ .video = &wpf_video_ops,
+ .pad = &vsp1_rwpf_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void vsp1_wpf_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
+
+ vsp1_dlm_destroy(wpf->dlm);
+}
+
+static int wpf_configure_writeback_chain(struct vsp1_rwpf *wpf,
+ struct vsp1_dl_list *dl)
+{
+ unsigned int index = wpf->entity.index;
+ struct vsp1_dl_list *dl_next;
+ struct vsp1_dl_body *dlb;
+
+ dl_next = vsp1_dl_list_get(wpf->dlm);
+ if (!dl_next) {
+ dev_err(wpf->entity.vsp1->dev,
+ "Failed to obtain a dl list, disabling writeback\n");
+ return -ENOMEM;
+ }
+
+ dlb = vsp1_dl_list_get_body0(dl_next);
+ vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL(index), 0);
+ vsp1_dl_list_add_chain(dl, dl_next);
+
+ return 0;
+}
+
+static void wpf_configure_stream(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+ const struct v4l2_mbus_framefmt *source_format;
+ const struct v4l2_mbus_framefmt *sink_format;
+ unsigned int index = wpf->entity.index;
+ unsigned int i;
+ u32 outfmt = 0;
+ u32 srcrpf = 0;
+ int ret;
+
+ sink_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SINK);
+ source_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SOURCE);
+
+ /* Format */
+ if (!pipe->lif || wpf->writeback) {
+ const struct v4l2_pix_format_mplane *format = &wpf->format;
+ const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
+
+ outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT;
+
+ if (wpf->flip.rotate)
+ outfmt |= VI6_WPF_OUTFMT_ROT;
+
+ if (fmtinfo->alpha)
+ outfmt |= VI6_WPF_OUTFMT_PXA;
+ if (fmtinfo->swap_yc)
+ outfmt |= VI6_WPF_OUTFMT_SPYCS;
+ if (fmtinfo->swap_uv)
+ outfmt |= VI6_WPF_OUTFMT_SPUVS;
+
+ /* Destination stride and byte swapping. */
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_STRIDE_Y,
+ format->plane_fmt[0].bytesperline);
+ if (format->num_planes > 1)
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_STRIDE_C,
+ format->plane_fmt[1].bytesperline);
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSWAP, fmtinfo->swap);
+
+ if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP) && index == 0)
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_ROT_CTRL,
+ VI6_WPF_ROT_CTRL_LN16 |
+ (256 << VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT));
+ }
+
+ if (sink_format->code != source_format->code)
+ outfmt |= VI6_WPF_OUTFMT_CSC;
+
+ wpf->outfmt = outfmt;
+
+ vsp1_dl_body_write(dlb, VI6_DPR_WPF_FPORCH(index),
+ VI6_DPR_WPF_FPORCH_FP_WPFN);
+
+ /*
+ * Sources. If the pipeline has a single input and BRx is not used,
+ * configure it as the master layer. Otherwise configure all
+ * inputs as sub-layers and select the virtual RPF as the master
+ * layer.
+ */
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *input = pipe->inputs[i];
+
+ if (!input)
+ continue;
+
+ srcrpf |= (!pipe->brx && pipe->num_inputs == 1)
+ ? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index)
+ : VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
+ }
+
+ if (pipe->brx)
+ srcrpf |= pipe->brx->type == VSP1_ENTITY_BRU
+ ? VI6_WPF_SRCRPF_VIRACT_MST
+ : VI6_WPF_SRCRPF_VIRACT2_MST;
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_SRCRPF, srcrpf);
+
+ /* Enable interrupts. */
+ vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(index), 0);
+ vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(index),
+ VI6_WPF_IRQ_ENB_DFEE);
+
+ /*
+ * Configure writeback for display pipelines (the wpf writeback flag is
+ * never set for memory-to-memory pipelines). Start by adding a chained
+ * display list to disable writeback after a single frame, and process
+ * to enable writeback. If the display list allocation fails don't
+ * enable writeback as we wouldn't be able to safely disable it,
+ * resulting in possible memory corruption.
+ */
+ if (wpf->writeback) {
+ ret = wpf_configure_writeback_chain(wpf, dl);
+ if (ret < 0)
+ wpf->writeback = false;
+ }
+
+ vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL(index),
+ wpf->writeback ? VI6_WPF_WRBCK_CTRL_WBMD : 0);
+}
+
+static void wpf_configure_frame(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ const unsigned int mask = BIT(WPF_CTRL_VFLIP)
+ | BIT(WPF_CTRL_HFLIP);
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+ unsigned long flags;
+ u32 outfmt;
+
+ spin_lock_irqsave(&wpf->flip.lock, flags);
+ wpf->flip.active = (wpf->flip.active & ~mask)
+ | (wpf->flip.pending & mask);
+ spin_unlock_irqrestore(&wpf->flip.lock, flags);
+
+ outfmt = (wpf->alpha << VI6_WPF_OUTFMT_PDV_SHIFT) | wpf->outfmt;
+
+ if (wpf->flip.active & BIT(WPF_CTRL_VFLIP))
+ outfmt |= VI6_WPF_OUTFMT_FLP;
+ if (wpf->flip.active & BIT(WPF_CTRL_HFLIP))
+ outfmt |= VI6_WPF_OUTFMT_HFLP;
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_OUTFMT, outfmt);
+}
+
+static void wpf_configure_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+ struct vsp1_rwpf_memory mem = wpf->mem;
+ const struct v4l2_mbus_framefmt *sink_format;
+ const struct v4l2_pix_format_mplane *format = &wpf->format;
+ const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
+ unsigned int width;
+ unsigned int height;
+ unsigned int left;
+ unsigned int offset;
+ unsigned int flip;
+ unsigned int i;
+
+ sink_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SINK);
+ width = sink_format->width;
+ height = sink_format->height;
+ left = 0;
+
+ /*
+ * Cropping. The partition algorithm can split the image into
+ * multiple slices.
+ */
+ if (pipe->partitions > 1) {
+ width = pipe->partition->wpf.width;
+ left = pipe->partition->wpf.left;
+ }
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
+ (0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (width << VI6_WPF_SZCLIP_SIZE_SHIFT));
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
+ (0 << VI6_WPF_SZCLIP_OFST_SHIFT) |
+ (height << VI6_WPF_SZCLIP_SIZE_SHIFT));
+
+ /*
+ * For display pipelines without writeback enabled there's no memory
+ * address to configure, return now.
+ */
+ if (pipe->lif && !wpf->writeback)
+ return;
+
+ /*
+ * Update the memory offsets based on flipping configuration.
+ * The destination addresses point to the locations where the
+ * VSP starts writing to memory, which can be any corner of the
+ * image depending on the combination of flipping and rotation.
+ */
+
+ /*
+ * First take the partition left coordinate into account.
+ * Compute the offset to order the partitions correctly on the
+ * output based on whether flipping is enabled. Consider
+ * horizontal flipping when rotation is disabled but vertical
+ * flipping when rotation is enabled, as rotating the image
+ * switches the horizontal and vertical directions. The offset
+ * is applied horizontally or vertically accordingly.
+ */
+ flip = wpf->flip.active;
+
+ if (flip & BIT(WPF_CTRL_HFLIP) && !wpf->flip.rotate)
+ offset = format->width - left - width;
+ else if (flip & BIT(WPF_CTRL_VFLIP) && wpf->flip.rotate)
+ offset = format->height - left - width;
+ else
+ offset = left;
+
+ for (i = 0; i < format->num_planes; ++i) {
+ unsigned int hsub = i > 0 ? fmtinfo->hsub : 1;
+ unsigned int vsub = i > 0 ? fmtinfo->vsub : 1;
+
+ if (wpf->flip.rotate)
+ mem.addr[i] += offset / vsub
+ * format->plane_fmt[i].bytesperline;
+ else
+ mem.addr[i] += offset / hsub
+ * fmtinfo->bpp[i] / 8;
+ }
+
+ if (flip & BIT(WPF_CTRL_VFLIP)) {
+ /*
+ * When rotating the output (after rotation) image
+ * height is equal to the partition width (before
+ * rotation). Otherwise it is equal to the output
+ * image height.
+ */
+ if (wpf->flip.rotate)
+ height = width;
+ else
+ height = format->height;
+
+ mem.addr[0] += (height - 1)
+ * format->plane_fmt[0].bytesperline;
+
+ if (format->num_planes > 1) {
+ offset = (height / fmtinfo->vsub - 1)
+ * format->plane_fmt[1].bytesperline;
+ mem.addr[1] += offset;
+ mem.addr[2] += offset;
+ }
+ }
+
+ if (wpf->flip.rotate && !(flip & BIT(WPF_CTRL_HFLIP))) {
+ unsigned int hoffset = max(0, (int)format->width - 16);
+
+ /*
+ * Compute the output coordinate. The partition
+ * horizontal (left) offset becomes a vertical offset.
+ */
+ for (i = 0; i < format->num_planes; ++i) {
+ unsigned int hsub = i > 0 ? fmtinfo->hsub : 1;
+
+ mem.addr[i] += hoffset / hsub
+ * fmtinfo->bpp[i] / 8;
+ }
+ }
+
+ /*
+ * On Gen3+ hardware the SPUVS bit has no effect on 3-planar
+ * formats. Swap the U and V planes manually in that case.
+ */
+ if (vsp1->info->gen >= 3 && format->num_planes == 3 &&
+ fmtinfo->swap_uv)
+ swap(mem.addr[1], mem.addr[2]);
+
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
+ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
+
+ /*
+ * Writeback operates in single-shot mode and lasts for a single frame,
+ * reset the writeback flag to false for the next frame.
+ */
+ wpf->writeback = false;
+}
+
+static unsigned int wpf_max_width(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+
+ return wpf->flip.rotate ? 256 : wpf->max_width;
+}
+
+static void wpf_partition(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_partition *partition,
+ unsigned int partition_idx,
+ struct vsp1_partition_window *window)
+{
+ partition->wpf = *window;
+}
+
+static const struct vsp1_entity_operations wpf_entity_ops = {
+ .destroy = vsp1_wpf_destroy,
+ .configure_stream = wpf_configure_stream,
+ .configure_frame = wpf_configure_frame,
+ .configure_partition = wpf_configure_partition,
+ .max_width = wpf_max_width,
+ .partition = wpf_partition,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct vsp1_rwpf *wpf;
+ char name[6];
+ int ret;
+
+ wpf = devm_kzalloc(vsp1->dev, sizeof(*wpf), GFP_KERNEL);
+ if (wpf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ if (vsp1->info->gen == 2) {
+ wpf->max_width = WPF_GEN2_MAX_WIDTH;
+ wpf->max_height = WPF_GEN2_MAX_HEIGHT;
+ } else {
+ wpf->max_width = WPF_GEN3_MAX_WIDTH;
+ wpf->max_height = WPF_GEN3_MAX_HEIGHT;
+ }
+
+ wpf->entity.ops = &wpf_entity_ops;
+ wpf->entity.type = VSP1_ENTITY_WPF;
+ wpf->entity.index = index;
+
+ sprintf(name, "wpf.%u", index);
+ ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the display list manager. */
+ wpf->dlm = vsp1_dlm_create(vsp1, index, 64);
+ if (!wpf->dlm) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the control handler. */
+ ret = wpf_init_controls(wpf);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
+ index);
+ goto error;
+ }
+
+ v4l2_ctrl_handler_setup(&wpf->ctrls);
+
+ return wpf;
+
+error:
+ vsp1_entity_destroy(&wpf->entity);
+ return ERR_PTR(ret);
+}