summaryrefslogtreecommitdiffstats
path: root/drivers/media/platform/st
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/media/platform/st
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/media/platform/st')
-rw-r--r--drivers/media/platform/st/Kconfig6
-rw-r--r--drivers/media/platform/st/Makefile7
-rw-r--r--drivers/media/platform/st/sti/Kconfig5
-rw-r--r--drivers/media/platform/st/sti/Makefile6
-rw-r--r--drivers/media/platform/st/sti/bdisp/Kconfig10
-rw-r--r--drivers/media/platform/st/sti/bdisp/Makefile4
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-debug.c658
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-filter.h42
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-hw.c1118
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-reg.h235
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c1428
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp.h214
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Kconfig29
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Makefile8
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c262
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h60
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c1187
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h285
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c244
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h18
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c235
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h17
-rw-r--r--drivers/media/platform/st/sti/delta/Kconfig36
-rw-r--r--drivers/media/platform/st/sti/delta/Makefile7
-rw-r--r--drivers/media/platform/st/sti/delta/delta-cfg.h64
-rw-r--r--drivers/media/platform/st/sti/delta/delta-debug.c72
-rw-r--r--drivers/media/platform/st/sti/delta/delta-debug.h18
-rw-r--r--drivers/media/platform/st/sti/delta/delta-ipc.c591
-rw-r--r--drivers/media/platform/st/sti/delta/delta-ipc.h76
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mem.c51
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mem.h14
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c455
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-fw.h225
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-hdr.c149
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg.h35
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c1970
-rw-r--r--drivers/media/platform/st/sti/delta/delta.h566
-rw-r--r--drivers/media/platform/st/sti/hva/Kconfig26
-rw-r--r--drivers/media/platform/st/sti/hva/Makefile4
-rw-r--r--drivers/media/platform/st/sti/hva/hva-debugfs.c396
-rw-r--r--drivers/media/platform/st/sti/hva/hva-h264.c1063
-rw-r--r--drivers/media/platform/st/sti/hva/hva-hw.c585
-rw-r--r--drivers/media/platform/st/sti/hva/hva-hw.h45
-rw-r--r--drivers/media/platform/st/sti/hva/hva-mem.c62
-rw-r--r--drivers/media/platform/st/sti/hva/hva-mem.h34
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c1476
-rw-r--r--drivers/media/platform/st/sti/hva/hva.h409
-rw-r--r--drivers/media/platform/st/stm32/Kconfig31
-rw-r--r--drivers/media/platform/st/stm32/Makefile4
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d-hw.c133
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d-regs.h113
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c736
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.h135
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c2229
54 files changed, 17888 insertions, 0 deletions
diff --git a/drivers/media/platform/st/Kconfig b/drivers/media/platform/st/Kconfig
new file mode 100644
index 000000000..b29c258ea
--- /dev/null
+++ b/drivers/media/platform/st/Kconfig
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "STMicroelectronics media platform drivers"
+
+source "drivers/media/platform/st/sti/Kconfig"
+source "drivers/media/platform/st/stm32/Kconfig"
diff --git a/drivers/media/platform/st/Makefile b/drivers/media/platform/st/Makefile
new file mode 100644
index 000000000..a1f75b2a8
--- /dev/null
+++ b/drivers/media/platform/st/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += sti/bdisp/
+obj-y += sti/c8sectpfe/
+obj-y += sti/delta/
+obj-y += sti/hva/
+obj-y += stm32/
diff --git a/drivers/media/platform/st/sti/Kconfig b/drivers/media/platform/st/sti/Kconfig
new file mode 100644
index 000000000..60068e8b4
--- /dev/null
+++ b/drivers/media/platform/st/sti/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+source "drivers/media/platform/st/sti/bdisp/Kconfig"
+source "drivers/media/platform/st/sti/c8sectpfe/Kconfig"
+source "drivers/media/platform/st/sti/delta/Kconfig"
+source "drivers/media/platform/st/sti/hva/Kconfig"
diff --git a/drivers/media/platform/st/sti/Makefile b/drivers/media/platform/st/sti/Makefile
new file mode 100644
index 000000000..f9ce8169b
--- /dev/null
+++ b/drivers/media/platform/st/sti/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += bdisp/
+obj-y += c8sectpfe/
+obj-y += delta/
+obj-y += hva/
+obj-y += stm32/
diff --git a/drivers/media/platform/st/sti/bdisp/Kconfig b/drivers/media/platform/st/sti/bdisp/Kconfig
new file mode 100644
index 000000000..496f8aedf
--- /dev/null
+++ b/drivers/media/platform/st/sti/bdisp/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_STI_BDISP
+ tristate "STMicroelectronics BDISP 2D blitter driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_STI || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This v4l2 mem2mem driver is a 2D blitter for STMicroelectronics SoC.
diff --git a/drivers/media/platform/st/sti/bdisp/Makefile b/drivers/media/platform/st/sti/bdisp/Makefile
new file mode 100644
index 000000000..39ade0a34
--- /dev/null
+++ b/drivers/media/platform/st/sti/bdisp/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_STI_BDISP) += bdisp.o
+
+bdisp-objs := bdisp-v4l2.o bdisp-hw.o bdisp-debug.o
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-debug.c b/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
new file mode 100644
index 000000000..a27f638df
--- /dev/null
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-debug.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "bdisp.h"
+#include "bdisp-filter.h"
+#include "bdisp-reg.h"
+
+void bdisp_dbg_perf_begin(struct bdisp_dev *bdisp)
+{
+ bdisp->dbg.hw_start = ktime_get();
+}
+
+void bdisp_dbg_perf_end(struct bdisp_dev *bdisp)
+{
+ s64 time_us;
+
+ time_us = ktime_us_delta(ktime_get(), bdisp->dbg.hw_start);
+
+ if (!bdisp->dbg.min_duration)
+ bdisp->dbg.min_duration = time_us;
+ else
+ bdisp->dbg.min_duration = min(time_us, bdisp->dbg.min_duration);
+
+ bdisp->dbg.last_duration = time_us;
+ bdisp->dbg.max_duration = max(time_us, bdisp->dbg.max_duration);
+ bdisp->dbg.tot_duration += time_us;
+}
+
+static void bdisp_dbg_dump_ins(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "INS\t0x%08X\t", val);
+
+ switch (val & BLT_INS_S1_MASK) {
+ case BLT_INS_S1_OFF:
+ break;
+ case BLT_INS_S1_MEM:
+ seq_puts(s, "SRC1=mem - ");
+ break;
+ case BLT_INS_S1_CF:
+ seq_puts(s, "SRC1=ColorFill - ");
+ break;
+ case BLT_INS_S1_COPY:
+ seq_puts(s, "SRC1=copy - ");
+ break;
+ case BLT_INS_S1_FILL:
+ seq_puts(s, "SRC1=fil - ");
+ break;
+ default:
+ seq_puts(s, "SRC1=??? - ");
+ break;
+ }
+
+ switch (val & BLT_INS_S2_MASK) {
+ case BLT_INS_S2_OFF:
+ break;
+ case BLT_INS_S2_MEM:
+ seq_puts(s, "SRC2=mem - ");
+ break;
+ case BLT_INS_S2_CF:
+ seq_puts(s, "SRC2=ColorFill - ");
+ break;
+ default:
+ seq_puts(s, "SRC2=??? - ");
+ break;
+ }
+
+ if ((val & BLT_INS_S3_MASK) == BLT_INS_S3_MEM)
+ seq_puts(s, "SRC3=mem - ");
+
+ if (val & BLT_INS_IVMX)
+ seq_puts(s, "IVMX - ");
+ if (val & BLT_INS_CLUT)
+ seq_puts(s, "CLUT - ");
+ if (val & BLT_INS_SCALE)
+ seq_puts(s, "Scale - ");
+ if (val & BLT_INS_FLICK)
+ seq_puts(s, "Flicker - ");
+ if (val & BLT_INS_CLIP)
+ seq_puts(s, "Clip - ");
+ if (val & BLT_INS_CKEY)
+ seq_puts(s, "ColorKey - ");
+ if (val & BLT_INS_OVMX)
+ seq_puts(s, "OVMX - ");
+ if (val & BLT_INS_DEI)
+ seq_puts(s, "Deint - ");
+ if (val & BLT_INS_PMASK)
+ seq_puts(s, "PlaneMask - ");
+ if (val & BLT_INS_VC1R)
+ seq_puts(s, "VC1R - ");
+ if (val & BLT_INS_ROTATE)
+ seq_puts(s, "Rotate - ");
+ if (val & BLT_INS_GRAD)
+ seq_puts(s, "GradFill - ");
+ if (val & BLT_INS_AQLOCK)
+ seq_puts(s, "AQLock - ");
+ if (val & BLT_INS_PACE)
+ seq_puts(s, "Pace - ");
+ if (val & BLT_INS_IRQ)
+ seq_puts(s, "IRQ - ");
+
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "TTY\t0x%08X\t", val);
+ seq_printf(s, "Pitch=%d - ", val & 0xFFFF);
+
+ switch ((val & BLT_TTY_COL_MASK) >> BLT_TTY_COL_SHIFT) {
+ case BDISP_RGB565:
+ seq_puts(s, "RGB565 - ");
+ break;
+ case BDISP_RGB888:
+ seq_puts(s, "RGB888 - ");
+ break;
+ case BDISP_XRGB8888:
+ seq_puts(s, "xRGB888 - ");
+ break;
+ case BDISP_ARGB8888:
+ seq_puts(s, "ARGB8888 - ");
+ break;
+ case BDISP_NV12:
+ seq_puts(s, "NV12 - ");
+ break;
+ case BDISP_YUV_3B:
+ seq_puts(s, "YUV420P - ");
+ break;
+ default:
+ seq_puts(s, "ColorFormat ??? - ");
+ break;
+ }
+
+ if (val & BLT_TTY_ALPHA_R)
+ seq_puts(s, "AlphaRange - ");
+ if (val & BLT_TTY_CR_NOT_CB)
+ seq_puts(s, "CrNotCb - ");
+ if (val & BLT_TTY_MB)
+ seq_puts(s, "MB - ");
+ if (val & BLT_TTY_HSO)
+ seq_puts(s, "HSO inverse - ");
+ if (val & BLT_TTY_VSO)
+ seq_puts(s, "VSO inverse - ");
+ if (val & BLT_TTY_DITHER)
+ seq_puts(s, "Dither - ");
+ if (val & BLT_TTY_CHROMA)
+ seq_puts(s, "Write CHROMA - ");
+ if (val & BLT_TTY_BIG_END)
+ seq_puts(s, "BigEndian - ");
+
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_xy(struct seq_file *s, u32 val, char *name)
+{
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+ seq_printf(s, "(%d,%d)\n", val & 0xFFFF, (val >> 16));
+}
+
+static void bdisp_dbg_dump_sz(struct seq_file *s, u32 val, char *name)
+{
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+ seq_printf(s, "%dx%d\n", val & 0x1FFF, (val >> 16) & 0x1FFF);
+}
+
+static void bdisp_dbg_dump_sty(struct seq_file *s,
+ u32 val, u32 addr, char *name)
+{
+ bool s1, s2, s3;
+
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+
+ if (!addr || !name || (strlen(name) < 2))
+ goto done;
+
+ s1 = name[strlen(name) - 1] == '1';
+ s2 = name[strlen(name) - 1] == '2';
+ s3 = name[strlen(name) - 1] == '3';
+
+ seq_printf(s, "Pitch=%d - ", val & 0xFFFF);
+
+ switch ((val & BLT_TTY_COL_MASK) >> BLT_TTY_COL_SHIFT) {
+ case BDISP_RGB565:
+ seq_puts(s, "RGB565 - ");
+ break;
+ case BDISP_RGB888:
+ seq_puts(s, "RGB888 - ");
+ break;
+ case BDISP_XRGB8888:
+ seq_puts(s, "xRGB888 - ");
+ break;
+ case BDISP_ARGB8888:
+ seq_puts(s, "ARGB888 - ");
+ break;
+ case BDISP_NV12:
+ seq_puts(s, "NV12 - ");
+ break;
+ case BDISP_YUV_3B:
+ seq_puts(s, "YUV420P - ");
+ break;
+ default:
+ seq_puts(s, "ColorFormat ??? - ");
+ break;
+ }
+
+ if ((val & BLT_TTY_ALPHA_R) && !s3)
+ seq_puts(s, "AlphaRange - ");
+ if ((val & BLT_S1TY_A1_SUBSET) && !s3)
+ seq_puts(s, "A1SubSet - ");
+ if ((val & BLT_TTY_MB) && !s1)
+ seq_puts(s, "MB - ");
+ if (val & BLT_TTY_HSO)
+ seq_puts(s, "HSO inverse - ");
+ if (val & BLT_TTY_VSO)
+ seq_puts(s, "VSO inverse - ");
+ if ((val & BLT_S1TY_CHROMA_EXT) && (s1 || s2))
+ seq_puts(s, "ChromaExt - ");
+ if ((val & BLT_S3TY_BLANK_ACC) && s3)
+ seq_puts(s, "Blank Acc - ");
+ if ((val & BTL_S1TY_SUBBYTE) && !s3)
+ seq_puts(s, "SubByte - ");
+ if ((val & BLT_S1TY_RGB_EXP) && !s3)
+ seq_puts(s, "RGBExpand - ");
+ if ((val & BLT_TTY_BIG_END) && !s3)
+ seq_puts(s, "BigEndian - ");
+
+done:
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "FCTL\t0x%08X\t", val);
+
+ if ((val & BLT_FCTL_Y_HV_SCALE) == BLT_FCTL_Y_HV_SCALE)
+ seq_puts(s, "Resize Luma - ");
+ else if ((val & BLT_FCTL_Y_HV_SCALE) == BLT_FCTL_Y_HV_SAMPLE)
+ seq_puts(s, "Sample Luma - ");
+
+ if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SCALE)
+ seq_puts(s, "Resize Chroma");
+ else if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SAMPLE)
+ seq_puts(s, "Sample Chroma");
+
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name)
+{
+ u32 inc;
+
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+
+ if (!val)
+ goto done;
+
+ inc = val & 0xFFFF;
+ seq_printf(s, "H: %d(6.10) / scale~%dx0.1 - ", inc, 1024 * 10 / inc);
+
+ inc = val >> 16;
+ seq_printf(s, "V: %d(6.10) / scale~%dx0.1", inc, 1024 * 10 / inc);
+
+done:
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name)
+{
+ seq_printf(s, "%s\t0x%08X\t", name, val);
+
+ if (!val)
+ goto done;
+
+ seq_printf(s, "H: init=%d repeat=%d - ", val & 0x3FF, (val >> 12) & 7);
+ val >>= 16;
+ seq_printf(s, "V: init=%d repeat=%d", val & 0x3FF, (val >> 12) & 7);
+
+done:
+ seq_putc(s, '\n');
+}
+
+static void bdisp_dbg_dump_ivmx(struct seq_file *s,
+ u32 c0, u32 c1, u32 c2, u32 c3)
+{
+ seq_printf(s, "IVMX0\t0x%08X\n", c0);
+ seq_printf(s, "IVMX1\t0x%08X\n", c1);
+ seq_printf(s, "IVMX2\t0x%08X\n", c2);
+ seq_printf(s, "IVMX3\t0x%08X\t", c3);
+
+ if (!c0 && !c1 && !c2 && !c3) {
+ seq_putc(s, '\n');
+ return;
+ }
+
+ if ((c0 == bdisp_rgb_to_yuv[0]) &&
+ (c1 == bdisp_rgb_to_yuv[1]) &&
+ (c2 == bdisp_rgb_to_yuv[2]) &&
+ (c3 == bdisp_rgb_to_yuv[3])) {
+ seq_puts(s, "RGB to YUV\n");
+ return;
+ }
+
+ if ((c0 == bdisp_yuv_to_rgb[0]) &&
+ (c1 == bdisp_yuv_to_rgb[1]) &&
+ (c2 == bdisp_yuv_to_rgb[2]) &&
+ (c3 == bdisp_yuv_to_rgb[3])) {
+ seq_puts(s, "YUV to RGB\n");
+ return;
+ }
+ seq_puts(s, "Unknown conversion\n");
+}
+
+static int last_nodes_show(struct seq_file *s, void *data)
+{
+ /* Not dumping all fields, focusing on significant ones */
+ struct bdisp_dev *bdisp = s->private;
+ struct bdisp_node *node;
+ int i = 0;
+
+ if (!bdisp->dbg.copy_node[0]) {
+ seq_puts(s, "No node built yet\n");
+ return 0;
+ }
+
+ do {
+ node = bdisp->dbg.copy_node[i];
+ if (!node)
+ break;
+ seq_printf(s, "--------\nNode %d:\n", i);
+ seq_puts(s, "-- General --\n");
+ seq_printf(s, "NIP\t0x%08X\n", node->nip);
+ seq_printf(s, "CIC\t0x%08X\n", node->cic);
+ bdisp_dbg_dump_ins(s, node->ins);
+ seq_printf(s, "ACK\t0x%08X\n", node->ack);
+ seq_puts(s, "-- Target --\n");
+ seq_printf(s, "TBA\t0x%08X\n", node->tba);
+ bdisp_dbg_dump_tty(s, node->tty);
+ bdisp_dbg_dump_xy(s, node->txy, "TXY");
+ bdisp_dbg_dump_sz(s, node->tsz, "TSZ");
+ /* Color Fill not dumped */
+ seq_puts(s, "-- Source 1 --\n");
+ seq_printf(s, "S1BA\t0x%08X\n", node->s1ba);
+ bdisp_dbg_dump_sty(s, node->s1ty, node->s1ba, "S1TY");
+ bdisp_dbg_dump_xy(s, node->s1xy, "S1XY");
+ seq_puts(s, "-- Source 2 --\n");
+ seq_printf(s, "S2BA\t0x%08X\n", node->s2ba);
+ bdisp_dbg_dump_sty(s, node->s2ty, node->s2ba, "S2TY");
+ bdisp_dbg_dump_xy(s, node->s2xy, "S2XY");
+ bdisp_dbg_dump_sz(s, node->s2sz, "S2SZ");
+ seq_puts(s, "-- Source 3 --\n");
+ seq_printf(s, "S3BA\t0x%08X\n", node->s3ba);
+ bdisp_dbg_dump_sty(s, node->s3ty, node->s3ba, "S3TY");
+ bdisp_dbg_dump_xy(s, node->s3xy, "S3XY");
+ bdisp_dbg_dump_sz(s, node->s3sz, "S3SZ");
+ /* Clipping not dumped */
+ /* CLUT not dumped */
+ seq_puts(s, "-- Filter & Mask --\n");
+ bdisp_dbg_dump_fctl(s, node->fctl);
+ /* PMK not dumped */
+ seq_puts(s, "-- Chroma Filter --\n");
+ bdisp_dbg_dump_rsf(s, node->rsf, "RSF");
+ bdisp_dbg_dump_rzi(s, node->rzi, "RZI");
+ seq_printf(s, "HFP\t0x%08X\n", node->hfp);
+ seq_printf(s, "VFP\t0x%08X\n", node->vfp);
+ seq_puts(s, "-- Luma Filter --\n");
+ bdisp_dbg_dump_rsf(s, node->y_rsf, "Y_RSF");
+ bdisp_dbg_dump_rzi(s, node->y_rzi, "Y_RZI");
+ seq_printf(s, "Y_HFP\t0x%08X\n", node->y_hfp);
+ seq_printf(s, "Y_VFP\t0x%08X\n", node->y_vfp);
+ /* Flicker not dumped */
+ /* Color key not dumped */
+ /* Reserved not dumped */
+ /* Static Address & User not dumped */
+ seq_puts(s, "-- Input Versatile Matrix --\n");
+ bdisp_dbg_dump_ivmx(s, node->ivmx0, node->ivmx1,
+ node->ivmx2, node->ivmx3);
+ /* Output Versatile Matrix not dumped */
+ /* Pace not dumped */
+ /* VC1R & DEI not dumped */
+ /* Gradient Fill not dumped */
+ } while ((++i < MAX_NB_NODE) && node->nip);
+
+ return 0;
+}
+
+static int last_nodes_raw_show(struct seq_file *s, void *data)
+{
+ struct bdisp_dev *bdisp = s->private;
+ struct bdisp_node *node;
+ u32 *val;
+ int j, i = 0;
+
+ if (!bdisp->dbg.copy_node[0]) {
+ seq_puts(s, "No node built yet\n");
+ return 0;
+ }
+
+ do {
+ node = bdisp->dbg.copy_node[i];
+ if (!node)
+ break;
+
+ seq_printf(s, "--------\nNode %d:\n", i);
+ val = (u32 *)node;
+ for (j = 0; j < sizeof(struct bdisp_node) / sizeof(u32); j++)
+ seq_printf(s, "0x%08X\n", *val++);
+ } while ((++i < MAX_NB_NODE) && node->nip);
+
+ return 0;
+}
+
+static const char *bdisp_fmt_to_str(struct bdisp_frame frame)
+{
+ switch (frame.fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ return "YUV420P";
+ case V4L2_PIX_FMT_NV12:
+ if (frame.field == V4L2_FIELD_INTERLACED)
+ return "NV12 interlaced";
+ else
+ return "NV12";
+ case V4L2_PIX_FMT_RGB565:
+ return "RGB16";
+ case V4L2_PIX_FMT_RGB24:
+ return "RGB24";
+ case V4L2_PIX_FMT_XBGR32:
+ return "XRGB";
+ case V4L2_PIX_FMT_ABGR32:
+ return "ARGB";
+ default:
+ return "????";
+ }
+}
+
+static int last_request_show(struct seq_file *s, void *data)
+{
+ struct bdisp_dev *bdisp = s->private;
+ struct bdisp_request *request = &bdisp->dbg.copy_request;
+ struct bdisp_frame src, dst;
+
+ if (!request->nb_req) {
+ seq_puts(s, "No request\n");
+ return 0;
+ }
+
+ src = request->src;
+ dst = request->dst;
+
+ seq_printf(s, "\nRequest #%d\n", request->nb_req);
+
+ seq_printf(s, "Format: %s\t\t\t%s\n",
+ bdisp_fmt_to_str(src), bdisp_fmt_to_str(dst));
+ seq_printf(s, "Crop area: %dx%d @ %d,%d ==>\t%dx%d @ %d,%d\n",
+ src.crop.width, src.crop.height,
+ src.crop.left, src.crop.top,
+ dst.crop.width, dst.crop.height,
+ dst.crop.left, dst.crop.top);
+ seq_printf(s, "Buff size: %dx%d\t\t%dx%d\n\n",
+ src.width, src.height, dst.width, dst.height);
+
+ if (request->hflip)
+ seq_puts(s, "Horizontal flip\n\n");
+
+ if (request->vflip)
+ seq_puts(s, "Vertical flip\n\n");
+
+ return 0;
+}
+
+#define DUMP(reg) seq_printf(s, #reg " \t0x%08X\n", readl(bdisp->regs + reg))
+
+static int regs_show(struct seq_file *s, void *data)
+{
+ struct bdisp_dev *bdisp = s->private;
+ int ret;
+ unsigned int i;
+
+ ret = pm_runtime_resume_and_get(bdisp->dev);
+ if (ret < 0) {
+ seq_puts(s, "Cannot wake up IP\n");
+ return 0;
+ }
+
+ seq_printf(s, "Reg @ = 0x%p\n", bdisp->regs);
+
+ seq_puts(s, "\nStatic:\n");
+ DUMP(BLT_CTL);
+ DUMP(BLT_ITS);
+ DUMP(BLT_STA1);
+ DUMP(BLT_AQ1_CTL);
+ DUMP(BLT_AQ1_IP);
+ DUMP(BLT_AQ1_LNA);
+ DUMP(BLT_AQ1_STA);
+ DUMP(BLT_ITM0);
+
+ seq_puts(s, "\nPlugs:\n");
+ DUMP(BLT_PLUGS1_OP2);
+ DUMP(BLT_PLUGS1_CHZ);
+ DUMP(BLT_PLUGS1_MSZ);
+ DUMP(BLT_PLUGS1_PGZ);
+ DUMP(BLT_PLUGS2_OP2);
+ DUMP(BLT_PLUGS2_CHZ);
+ DUMP(BLT_PLUGS2_MSZ);
+ DUMP(BLT_PLUGS2_PGZ);
+ DUMP(BLT_PLUGS3_OP2);
+ DUMP(BLT_PLUGS3_CHZ);
+ DUMP(BLT_PLUGS3_MSZ);
+ DUMP(BLT_PLUGS3_PGZ);
+ DUMP(BLT_PLUGT_OP2);
+ DUMP(BLT_PLUGT_CHZ);
+ DUMP(BLT_PLUGT_MSZ);
+ DUMP(BLT_PLUGT_PGZ);
+
+ seq_puts(s, "\nNode:\n");
+ DUMP(BLT_NIP);
+ DUMP(BLT_CIC);
+ DUMP(BLT_INS);
+ DUMP(BLT_ACK);
+ DUMP(BLT_TBA);
+ DUMP(BLT_TTY);
+ DUMP(BLT_TXY);
+ DUMP(BLT_TSZ);
+ DUMP(BLT_S1BA);
+ DUMP(BLT_S1TY);
+ DUMP(BLT_S1XY);
+ DUMP(BLT_S2BA);
+ DUMP(BLT_S2TY);
+ DUMP(BLT_S2XY);
+ DUMP(BLT_S2SZ);
+ DUMP(BLT_S3BA);
+ DUMP(BLT_S3TY);
+ DUMP(BLT_S3XY);
+ DUMP(BLT_S3SZ);
+ DUMP(BLT_FCTL);
+ DUMP(BLT_RSF);
+ DUMP(BLT_RZI);
+ DUMP(BLT_HFP);
+ DUMP(BLT_VFP);
+ DUMP(BLT_Y_RSF);
+ DUMP(BLT_Y_RZI);
+ DUMP(BLT_Y_HFP);
+ DUMP(BLT_Y_VFP);
+ DUMP(BLT_IVMX0);
+ DUMP(BLT_IVMX1);
+ DUMP(BLT_IVMX2);
+ DUMP(BLT_IVMX3);
+ DUMP(BLT_OVMX0);
+ DUMP(BLT_OVMX1);
+ DUMP(BLT_OVMX2);
+ DUMP(BLT_OVMX3);
+ DUMP(BLT_DEI);
+
+ seq_puts(s, "\nFilter:\n");
+ for (i = 0; i < BLT_NB_H_COEF; i++) {
+ seq_printf(s, "BLT_HFC%d \t0x%08X\n", i,
+ readl(bdisp->regs + BLT_HFC_N + i * 4));
+ }
+ for (i = 0; i < BLT_NB_V_COEF; i++) {
+ seq_printf(s, "BLT_VFC%d \t0x%08X\n", i,
+ readl(bdisp->regs + BLT_VFC_N + i * 4));
+ }
+
+ seq_puts(s, "\nLuma filter:\n");
+ for (i = 0; i < BLT_NB_H_COEF; i++) {
+ seq_printf(s, "BLT_Y_HFC%d \t0x%08X\n", i,
+ readl(bdisp->regs + BLT_Y_HFC_N + i * 4));
+ }
+ for (i = 0; i < BLT_NB_V_COEF; i++) {
+ seq_printf(s, "BLT_Y_VFC%d \t0x%08X\n", i,
+ readl(bdisp->regs + BLT_Y_VFC_N + i * 4));
+ }
+
+ pm_runtime_put(bdisp->dev);
+
+ return 0;
+}
+
+#define SECOND 1000000
+
+static int perf_show(struct seq_file *s, void *data)
+{
+ struct bdisp_dev *bdisp = s->private;
+ struct bdisp_request *request = &bdisp->dbg.copy_request;
+ s64 avg_time_us;
+ int avg_fps, min_fps, max_fps, last_fps;
+
+ if (!request->nb_req) {
+ seq_puts(s, "No request\n");
+ return 0;
+ }
+
+ avg_time_us = div64_s64(bdisp->dbg.tot_duration, request->nb_req);
+ if (avg_time_us > SECOND)
+ avg_fps = 0;
+ else
+ avg_fps = SECOND / (s32)avg_time_us;
+
+ if (bdisp->dbg.min_duration > SECOND)
+ min_fps = 0;
+ else
+ min_fps = SECOND / (s32)bdisp->dbg.min_duration;
+
+ if (bdisp->dbg.max_duration > SECOND)
+ max_fps = 0;
+ else
+ max_fps = SECOND / (s32)bdisp->dbg.max_duration;
+
+ if (bdisp->dbg.last_duration > SECOND)
+ last_fps = 0;
+ else
+ last_fps = SECOND / (s32)bdisp->dbg.last_duration;
+
+ seq_printf(s, "HW processing (%d requests):\n", request->nb_req);
+ seq_printf(s, " Average: %5lld us (%3d fps)\n",
+ avg_time_us, avg_fps);
+ seq_printf(s, " Min-Max: %5lld us (%3d fps) - %5lld us (%3d fps)\n",
+ bdisp->dbg.min_duration, min_fps,
+ bdisp->dbg.max_duration, max_fps);
+ seq_printf(s, " Last: %5lld us (%3d fps)\n",
+ bdisp->dbg.last_duration, last_fps);
+
+ return 0;
+}
+
+#define bdisp_dbg_create_entry(name) \
+ debugfs_create_file(#name, S_IRUGO, bdisp->dbg.debugfs_entry, bdisp, \
+ &name##_fops)
+
+DEFINE_SHOW_ATTRIBUTE(regs);
+DEFINE_SHOW_ATTRIBUTE(last_nodes);
+DEFINE_SHOW_ATTRIBUTE(last_nodes_raw);
+DEFINE_SHOW_ATTRIBUTE(last_request);
+DEFINE_SHOW_ATTRIBUTE(perf);
+
+void bdisp_debugfs_create(struct bdisp_dev *bdisp)
+{
+ char dirname[16];
+
+ snprintf(dirname, sizeof(dirname), "%s%d", BDISP_NAME, bdisp->id);
+ bdisp->dbg.debugfs_entry = debugfs_create_dir(dirname, NULL);
+
+ bdisp_dbg_create_entry(regs);
+ bdisp_dbg_create_entry(last_nodes);
+ bdisp_dbg_create_entry(last_nodes_raw);
+ bdisp_dbg_create_entry(last_request);
+ bdisp_dbg_create_entry(perf);
+}
+
+void bdisp_debugfs_remove(struct bdisp_dev *bdisp)
+{
+ debugfs_remove_recursive(bdisp->dbg.debugfs_entry);
+ bdisp->dbg.debugfs_entry = NULL;
+}
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-filter.h b/drivers/media/platform/st/sti/bdisp/bdisp-filter.h
new file mode 100644
index 000000000..9e1a95fd2
--- /dev/null
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-filter.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#define BDISP_HF_NB 64
+#define BDISP_VF_NB 40
+
+/**
+ * struct bdisp_filter_h_spec - Horizontal filter specification
+ *
+ * @min: min scale factor for this filter (6.10 fixed point)
+ * @max: max scale factor for this filter (6.10 fixed point)
+ * @coef: filter coefficients
+ */
+struct bdisp_filter_h_spec {
+ const u16 min;
+ const u16 max;
+ const u8 coef[BDISP_HF_NB];
+};
+/**
+ * struct bdisp_filter_v_spec - Vertical filter specification
+ *
+ * @min: min scale factor for this filter (6.10 fixed point)
+ * @max: max scale factor for this filter (6.10 fixed point)
+ * @coef: filter coefficients
+ */
+struct bdisp_filter_v_spec {
+ const u16 min;
+ const u16 max;
+ const u8 coef[BDISP_VF_NB];
+};
+
+/* RGB YUV 601 standard conversion */
+static const u32 bdisp_rgb_to_yuv[] = {
+ 0x0e1e8bee, 0x08420419, 0xfb5ed471, 0x08004080,
+};
+
+static const u32 bdisp_yuv_to_rgb[] = {
+ 0x3324a800, 0xe604ab9c, 0x0004a957, 0x32121eeb,
+};
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-hw.c b/drivers/media/platform/st/sti/bdisp/bdisp-hw.c
new file mode 100644
index 000000000..a74e9fd65
--- /dev/null
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-hw.c
@@ -0,0 +1,1118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/delay.h>
+
+#include "bdisp.h"
+#include "bdisp-filter.h"
+#include "bdisp-reg.h"
+
+/* Max width of the source frame in a single node */
+#define MAX_SRC_WIDTH 2048
+
+/* Reset & boot poll config */
+#define POLL_RST_MAX 500
+#define POLL_RST_DELAY_MS 2
+
+enum bdisp_target_plan {
+ BDISP_RGB,
+ BDISP_Y,
+ BDISP_CBCR
+};
+
+struct bdisp_op_cfg {
+ bool cconv; /* RGB - YUV conversion */
+ bool hflip; /* Horizontal flip */
+ bool vflip; /* Vertical flip */
+ bool wide; /* Wide (>MAX_SRC_WIDTH) */
+ bool scale; /* Scale */
+ u16 h_inc; /* Horizontal increment in 6.10 format */
+ u16 v_inc; /* Vertical increment in 6.10 format */
+ bool src_interlaced; /* is the src an interlaced buffer */
+ u8 src_nbp; /* nb of planes of the src */
+ bool src_yuv; /* is the src a YUV color format */
+ bool src_420; /* is the src 4:2:0 chroma subsampled */
+ u8 dst_nbp; /* nb of planes of the dst */
+ bool dst_yuv; /* is the dst a YUV color format */
+ bool dst_420; /* is the dst 4:2:0 chroma subsampled */
+};
+
+struct bdisp_filter_addr {
+ u16 min; /* Filter min scale factor (6.10 fixed point) */
+ u16 max; /* Filter max scale factor (6.10 fixed point) */
+ void *virt; /* Virtual address for filter table */
+ dma_addr_t paddr; /* Physical address for filter table */
+};
+
+static const struct bdisp_filter_h_spec bdisp_h_spec[] = {
+ {
+ .min = 0,
+ .max = 921,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0x07, 0x3d, 0xfc, 0x01, 0x00,
+ 0x00, 0x01, 0xfd, 0x11, 0x36, 0xf9, 0x02, 0x00,
+ 0x00, 0x01, 0xfb, 0x1b, 0x2e, 0xf9, 0x02, 0x00,
+ 0x00, 0x01, 0xf9, 0x26, 0x26, 0xf9, 0x01, 0x00,
+ 0x00, 0x02, 0xf9, 0x30, 0x19, 0xfb, 0x01, 0x00,
+ 0x00, 0x02, 0xf9, 0x39, 0x0e, 0xfd, 0x01, 0x00,
+ 0x00, 0x01, 0xfc, 0x3e, 0x06, 0xff, 0x00, 0x00
+ }
+ },
+ {
+ .min = 921,
+ .max = 1024,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ }
+ },
+ {
+ .min = 1024,
+ .max = 1126,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ }
+ },
+ {
+ .min = 1126,
+ .max = 1228,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ }
+ },
+ {
+ .min = 1228,
+ .max = 1331,
+ .coef = {
+ 0xfd, 0x04, 0xfc, 0x05, 0x39, 0x05, 0xfc, 0x04,
+ 0xfc, 0x06, 0xf9, 0x0c, 0x39, 0xfe, 0x00, 0x02,
+ 0xfb, 0x08, 0xf6, 0x17, 0x35, 0xf9, 0x02, 0x00,
+ 0xfc, 0x08, 0xf4, 0x20, 0x30, 0xf4, 0x05, 0xff,
+ 0xfd, 0x07, 0xf4, 0x29, 0x28, 0xf3, 0x07, 0xfd,
+ 0xff, 0x05, 0xf5, 0x31, 0x1f, 0xf3, 0x08, 0xfc,
+ 0x00, 0x02, 0xf9, 0x38, 0x14, 0xf6, 0x08, 0xfb,
+ 0x02, 0x00, 0xff, 0x3a, 0x0b, 0xf8, 0x06, 0xfc
+ }
+ },
+ {
+ .min = 1331,
+ .max = 1433,
+ .coef = {
+ 0xfc, 0x06, 0xf9, 0x09, 0x34, 0x09, 0xf9, 0x06,
+ 0xfd, 0x07, 0xf7, 0x10, 0x32, 0x02, 0xfc, 0x05,
+ 0xfe, 0x07, 0xf6, 0x17, 0x2f, 0xfc, 0xff, 0x04,
+ 0xff, 0x06, 0xf5, 0x20, 0x2a, 0xf9, 0x01, 0x02,
+ 0x00, 0x04, 0xf6, 0x27, 0x25, 0xf6, 0x04, 0x00,
+ 0x02, 0x01, 0xf9, 0x2d, 0x1d, 0xf5, 0x06, 0xff,
+ 0x04, 0xff, 0xfd, 0x31, 0x15, 0xf5, 0x07, 0xfe,
+ 0x05, 0xfc, 0x02, 0x35, 0x0d, 0xf7, 0x07, 0xfd
+ }
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .coef = {
+ 0xfe, 0x06, 0xf8, 0x0b, 0x30, 0x0b, 0xf8, 0x06,
+ 0xff, 0x06, 0xf7, 0x12, 0x2d, 0x05, 0xfa, 0x06,
+ 0x00, 0x04, 0xf6, 0x18, 0x2c, 0x00, 0xfc, 0x06,
+ 0x01, 0x02, 0xf7, 0x1f, 0x27, 0xfd, 0xff, 0x04,
+ 0x03, 0x00, 0xf9, 0x24, 0x24, 0xf9, 0x00, 0x03,
+ 0x04, 0xff, 0xfd, 0x29, 0x1d, 0xf7, 0x02, 0x01,
+ 0x06, 0xfc, 0x00, 0x2d, 0x17, 0xf6, 0x04, 0x00,
+ 0x06, 0xfa, 0x05, 0x30, 0x0f, 0xf7, 0x06, 0xff
+ }
+ },
+ {
+ .min = 1536,
+ .max = 2048,
+ .coef = {
+ 0x05, 0xfd, 0xfb, 0x13, 0x25, 0x13, 0xfb, 0xfd,
+ 0x05, 0xfc, 0xfd, 0x17, 0x24, 0x0f, 0xf9, 0xff,
+ 0x04, 0xfa, 0xff, 0x1b, 0x24, 0x0b, 0xf9, 0x00,
+ 0x03, 0xf9, 0x01, 0x1f, 0x23, 0x08, 0xf8, 0x01,
+ 0x02, 0xf9, 0x04, 0x22, 0x20, 0x04, 0xf9, 0x02,
+ 0x01, 0xf8, 0x08, 0x25, 0x1d, 0x01, 0xf9, 0x03,
+ 0x00, 0xf9, 0x0c, 0x25, 0x1a, 0xfe, 0xfa, 0x04,
+ 0xff, 0xf9, 0x10, 0x26, 0x15, 0xfc, 0xfc, 0x05
+ }
+ },
+ {
+ .min = 2048,
+ .max = 3072,
+ .coef = {
+ 0xfc, 0xfd, 0x06, 0x13, 0x18, 0x13, 0x06, 0xfd,
+ 0xfc, 0xfe, 0x08, 0x15, 0x17, 0x12, 0x04, 0xfc,
+ 0xfb, 0xfe, 0x0a, 0x16, 0x18, 0x10, 0x03, 0xfc,
+ 0xfb, 0x00, 0x0b, 0x18, 0x17, 0x0f, 0x01, 0xfb,
+ 0xfb, 0x00, 0x0d, 0x19, 0x17, 0x0d, 0x00, 0xfb,
+ 0xfb, 0x01, 0x0f, 0x19, 0x16, 0x0b, 0x00, 0xfb,
+ 0xfc, 0x03, 0x11, 0x19, 0x15, 0x09, 0xfe, 0xfb,
+ 0xfc, 0x04, 0x12, 0x1a, 0x12, 0x08, 0xfe, 0xfc
+ }
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .coef = {
+ 0xfe, 0x02, 0x09, 0x0f, 0x0e, 0x0f, 0x09, 0x02,
+ 0xff, 0x02, 0x09, 0x0f, 0x10, 0x0e, 0x08, 0x01,
+ 0xff, 0x03, 0x0a, 0x10, 0x10, 0x0d, 0x07, 0x00,
+ 0x00, 0x04, 0x0b, 0x10, 0x0f, 0x0c, 0x06, 0x00,
+ 0x00, 0x05, 0x0c, 0x10, 0x0e, 0x0c, 0x05, 0x00,
+ 0x00, 0x06, 0x0c, 0x11, 0x0e, 0x0b, 0x04, 0x00,
+ 0x00, 0x07, 0x0d, 0x11, 0x0f, 0x0a, 0x03, 0xff,
+ 0x01, 0x08, 0x0e, 0x11, 0x0e, 0x09, 0x02, 0xff
+ }
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .coef = {
+ 0x00, 0x04, 0x09, 0x0c, 0x0e, 0x0c, 0x09, 0x04,
+ 0x01, 0x05, 0x09, 0x0c, 0x0d, 0x0c, 0x08, 0x04,
+ 0x01, 0x05, 0x0a, 0x0c, 0x0e, 0x0b, 0x08, 0x03,
+ 0x02, 0x06, 0x0a, 0x0d, 0x0c, 0x0b, 0x07, 0x03,
+ 0x02, 0x07, 0x0a, 0x0d, 0x0d, 0x0a, 0x07, 0x02,
+ 0x03, 0x07, 0x0b, 0x0d, 0x0c, 0x0a, 0x06, 0x02,
+ 0x03, 0x08, 0x0b, 0x0d, 0x0d, 0x0a, 0x05, 0x01,
+ 0x04, 0x08, 0x0c, 0x0d, 0x0c, 0x09, 0x05, 0x01
+ }
+ },
+ {
+ .min = 5120,
+ .max = 65535,
+ .coef = {
+ 0x03, 0x06, 0x09, 0x0b, 0x09, 0x0b, 0x09, 0x06,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x04, 0x07, 0x09, 0x0b, 0x0b, 0x0a, 0x08, 0x04,
+ 0x04, 0x07, 0x0a, 0x0b, 0x0b, 0x0a, 0x07, 0x04,
+ 0x04, 0x08, 0x0a, 0x0b, 0x0b, 0x09, 0x07, 0x04,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03
+ }
+ }
+};
+
+#define NB_H_FILTER ARRAY_SIZE(bdisp_h_spec)
+
+
+static const struct bdisp_filter_v_spec bdisp_v_spec[] = {
+ {
+ .min = 0,
+ .max = 1024,
+ .coef = {
+ 0x00, 0x00, 0x40, 0x00, 0x00,
+ 0x00, 0x06, 0x3d, 0xfd, 0x00,
+ 0xfe, 0x0f, 0x38, 0xfb, 0x00,
+ 0xfd, 0x19, 0x2f, 0xfb, 0x00,
+ 0xfc, 0x24, 0x24, 0xfc, 0x00,
+ 0xfb, 0x2f, 0x19, 0xfd, 0x00,
+ 0xfb, 0x38, 0x0f, 0xfe, 0x00,
+ 0xfd, 0x3d, 0x06, 0x00, 0x00
+ }
+ },
+ {
+ .min = 1024,
+ .max = 1331,
+ .coef = {
+ 0xfc, 0x05, 0x3e, 0x05, 0xfc,
+ 0xf8, 0x0e, 0x3b, 0xff, 0x00,
+ 0xf5, 0x18, 0x38, 0xf9, 0x02,
+ 0xf4, 0x21, 0x31, 0xf5, 0x05,
+ 0xf4, 0x2a, 0x27, 0xf4, 0x07,
+ 0xf6, 0x30, 0x1e, 0xf4, 0x08,
+ 0xf9, 0x35, 0x15, 0xf6, 0x07,
+ 0xff, 0x37, 0x0b, 0xf9, 0x06
+ }
+ },
+ {
+ .min = 1331,
+ .max = 1433,
+ .coef = {
+ 0xf8, 0x0a, 0x3c, 0x0a, 0xf8,
+ 0xf6, 0x12, 0x3b, 0x02, 0xfb,
+ 0xf4, 0x1b, 0x35, 0xfd, 0xff,
+ 0xf4, 0x23, 0x30, 0xf8, 0x01,
+ 0xf6, 0x29, 0x27, 0xf6, 0x04,
+ 0xf9, 0x2e, 0x1e, 0xf5, 0x06,
+ 0xfd, 0x31, 0x16, 0xf6, 0x06,
+ 0x02, 0x32, 0x0d, 0xf8, 0x07
+ }
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .coef = {
+ 0xf6, 0x0e, 0x38, 0x0e, 0xf6,
+ 0xf5, 0x15, 0x38, 0x06, 0xf8,
+ 0xf5, 0x1d, 0x33, 0x00, 0xfb,
+ 0xf6, 0x23, 0x2d, 0xfc, 0xfe,
+ 0xf9, 0x28, 0x26, 0xf9, 0x00,
+ 0xfc, 0x2c, 0x1e, 0xf7, 0x03,
+ 0x00, 0x2e, 0x18, 0xf6, 0x04,
+ 0x05, 0x2e, 0x11, 0xf7, 0x05
+ }
+ },
+ {
+ .min = 1536,
+ .max = 2048,
+ .coef = {
+ 0xfb, 0x13, 0x24, 0x13, 0xfb,
+ 0xfd, 0x17, 0x23, 0x0f, 0xfa,
+ 0xff, 0x1a, 0x23, 0x0b, 0xf9,
+ 0x01, 0x1d, 0x22, 0x07, 0xf9,
+ 0x04, 0x20, 0x1f, 0x04, 0xf9,
+ 0x07, 0x22, 0x1c, 0x01, 0xfa,
+ 0x0b, 0x24, 0x17, 0xff, 0xfb,
+ 0x0f, 0x24, 0x14, 0xfd, 0xfc
+ }
+ },
+ {
+ .min = 2048,
+ .max = 3072,
+ .coef = {
+ 0x05, 0x10, 0x16, 0x10, 0x05,
+ 0x06, 0x11, 0x16, 0x0f, 0x04,
+ 0x08, 0x13, 0x15, 0x0e, 0x02,
+ 0x09, 0x14, 0x16, 0x0c, 0x01,
+ 0x0b, 0x15, 0x15, 0x0b, 0x00,
+ 0x0d, 0x16, 0x13, 0x0a, 0x00,
+ 0x0f, 0x17, 0x13, 0x08, 0xff,
+ 0x11, 0x18, 0x12, 0x07, 0xfe
+ }
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .coef = {
+ 0x09, 0x0f, 0x10, 0x0f, 0x09,
+ 0x09, 0x0f, 0x12, 0x0e, 0x08,
+ 0x0a, 0x10, 0x11, 0x0e, 0x07,
+ 0x0b, 0x11, 0x11, 0x0d, 0x06,
+ 0x0c, 0x11, 0x12, 0x0c, 0x05,
+ 0x0d, 0x12, 0x11, 0x0c, 0x04,
+ 0x0e, 0x12, 0x11, 0x0b, 0x04,
+ 0x0f, 0x13, 0x11, 0x0a, 0x03
+ }
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .coef = {
+ 0x0a, 0x0e, 0x10, 0x0e, 0x0a,
+ 0x0b, 0x0e, 0x0f, 0x0e, 0x0a,
+ 0x0b, 0x0f, 0x10, 0x0d, 0x09,
+ 0x0c, 0x0f, 0x10, 0x0d, 0x08,
+ 0x0d, 0x0f, 0x0f, 0x0d, 0x08,
+ 0x0d, 0x10, 0x10, 0x0c, 0x07,
+ 0x0e, 0x10, 0x0f, 0x0c, 0x07,
+ 0x0f, 0x10, 0x10, 0x0b, 0x06
+ }
+ },
+ {
+ .min = 5120,
+ .max = 65535,
+ .coef = {
+ 0x0b, 0x0e, 0x0e, 0x0e, 0x0b,
+ 0x0b, 0x0e, 0x0f, 0x0d, 0x0b,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0d, 0x0f, 0x0e, 0x0d, 0x09,
+ 0x0d, 0x0f, 0x0f, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0e, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0f, 0x0c, 0x08
+ }
+ }
+};
+
+#define NB_V_FILTER ARRAY_SIZE(bdisp_v_spec)
+
+static struct bdisp_filter_addr bdisp_h_filter[NB_H_FILTER];
+static struct bdisp_filter_addr bdisp_v_filter[NB_V_FILTER];
+
+/**
+ * bdisp_hw_reset
+ * @bdisp: bdisp entity
+ *
+ * Resets HW
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+int bdisp_hw_reset(struct bdisp_dev *bdisp)
+{
+ unsigned int i;
+
+ dev_dbg(bdisp->dev, "%s\n", __func__);
+
+ /* Mask Interrupt */
+ writel(0, bdisp->regs + BLT_ITM0);
+
+ /* Reset */
+ writel(readl(bdisp->regs + BLT_CTL) | BLT_CTL_RESET,
+ bdisp->regs + BLT_CTL);
+ writel(0, bdisp->regs + BLT_CTL);
+
+ /* Wait for reset done */
+ for (i = 0; i < POLL_RST_MAX; i++) {
+ if (readl(bdisp->regs + BLT_STA1) & BLT_STA1_IDLE)
+ break;
+ udelay(POLL_RST_DELAY_MS * 1000);
+ }
+ if (i == POLL_RST_MAX)
+ dev_err(bdisp->dev, "Reset timeout\n");
+
+ return (i == POLL_RST_MAX) ? -EAGAIN : 0;
+}
+
+/**
+ * bdisp_hw_get_and_clear_irq
+ * @bdisp: bdisp entity
+ *
+ * Read then reset interrupt status
+ *
+ * RETURNS:
+ * 0 if expected interrupt was raised.
+ */
+int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp)
+{
+ u32 its;
+
+ its = readl(bdisp->regs + BLT_ITS);
+
+ /* Check for the only expected IT: LastNode of AQ1 */
+ if (!(its & BLT_ITS_AQ1_LNA)) {
+ dev_dbg(bdisp->dev, "Unexpected IT status: 0x%08X\n", its);
+ writel(its, bdisp->regs + BLT_ITS);
+ return -1;
+ }
+
+ /* Clear and mask */
+ writel(its, bdisp->regs + BLT_ITS);
+ writel(0, bdisp->regs + BLT_ITM0);
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_free_nodes
+ * @ctx: bdisp context
+ *
+ * Free node memory
+ *
+ * RETURNS:
+ * None
+ */
+void bdisp_hw_free_nodes(struct bdisp_ctx *ctx)
+{
+ if (ctx && ctx->node[0])
+ dma_free_attrs(ctx->bdisp_dev->dev,
+ sizeof(struct bdisp_node) * MAX_NB_NODE,
+ ctx->node[0], ctx->node_paddr[0],
+ DMA_ATTR_WRITE_COMBINE);
+}
+
+/**
+ * bdisp_hw_alloc_nodes
+ * @ctx: bdisp context
+ *
+ * Allocate dma memory for nodes
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx)
+{
+ struct device *dev = ctx->bdisp_dev->dev;
+ unsigned int i, node_size = sizeof(struct bdisp_node);
+ void *base;
+ dma_addr_t paddr;
+
+ /* Allocate all the nodes within a single memory page */
+ base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
+ GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
+ if (!base) {
+ dev_err(dev, "%s no mem\n", __func__);
+ return -ENOMEM;
+ }
+
+ memset(base, 0, node_size * MAX_NB_NODE);
+
+ for (i = 0; i < MAX_NB_NODE; i++) {
+ ctx->node[i] = base;
+ ctx->node_paddr[i] = paddr;
+ dev_dbg(dev, "node[%d]=0x%p (paddr=%pad)\n", i, ctx->node[i],
+ &paddr);
+ base += node_size;
+ paddr += node_size;
+ }
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_free_filters
+ * @dev: device
+ *
+ * Free filters memory
+ *
+ * RETURNS:
+ * None
+ */
+void bdisp_hw_free_filters(struct device *dev)
+{
+ int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
+
+ if (bdisp_h_filter[0].virt)
+ dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
+ bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE);
+}
+
+/**
+ * bdisp_hw_alloc_filters
+ * @dev: device
+ *
+ * Allocate dma memory for filters
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int bdisp_hw_alloc_filters(struct device *dev)
+{
+ unsigned int i, size;
+ void *base;
+ dma_addr_t paddr;
+
+ /* Allocate all the filters within a single memory page */
+ size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
+ base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+ if (!base)
+ return -ENOMEM;
+
+ /* Setup filter addresses */
+ for (i = 0; i < NB_H_FILTER; i++) {
+ bdisp_h_filter[i].min = bdisp_h_spec[i].min;
+ bdisp_h_filter[i].max = bdisp_h_spec[i].max;
+ memcpy(base, bdisp_h_spec[i].coef, BDISP_HF_NB);
+ bdisp_h_filter[i].virt = base;
+ bdisp_h_filter[i].paddr = paddr;
+ base += BDISP_HF_NB;
+ paddr += BDISP_HF_NB;
+ }
+
+ for (i = 0; i < NB_V_FILTER; i++) {
+ bdisp_v_filter[i].min = bdisp_v_spec[i].min;
+ bdisp_v_filter[i].max = bdisp_v_spec[i].max;
+ memcpy(base, bdisp_v_spec[i].coef, BDISP_VF_NB);
+ bdisp_v_filter[i].virt = base;
+ bdisp_v_filter[i].paddr = paddr;
+ base += BDISP_VF_NB;
+ paddr += BDISP_VF_NB;
+ }
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_get_hf_addr
+ * @inc: resize increment
+ *
+ * Find the horizontal filter table that fits the resize increment
+ *
+ * RETURNS:
+ * table physical address
+ */
+static dma_addr_t bdisp_hw_get_hf_addr(u16 inc)
+{
+ unsigned int i;
+
+ for (i = NB_H_FILTER - 1; i > 0; i--)
+ if ((bdisp_h_filter[i].min < inc) &&
+ (inc <= bdisp_h_filter[i].max))
+ break;
+
+ return bdisp_h_filter[i].paddr;
+}
+
+/**
+ * bdisp_hw_get_vf_addr
+ * @inc: resize increment
+ *
+ * Find the vertical filter table that fits the resize increment
+ *
+ * RETURNS:
+ * table physical address
+ */
+static dma_addr_t bdisp_hw_get_vf_addr(u16 inc)
+{
+ unsigned int i;
+
+ for (i = NB_V_FILTER - 1; i > 0; i--)
+ if ((bdisp_v_filter[i].min < inc) &&
+ (inc <= bdisp_v_filter[i].max))
+ break;
+
+ return bdisp_v_filter[i].paddr;
+}
+
+/**
+ * bdisp_hw_get_inc
+ * @from: input size
+ * @to: output size
+ * @inc: resize increment in 6.10 format
+ *
+ * Computes the increment (inverse of scale) in 6.10 format
+ *
+ * RETURNS:
+ * 0 on success
+ */
+static int bdisp_hw_get_inc(u32 from, u32 to, u16 *inc)
+{
+ u32 tmp;
+
+ if (!to)
+ return -EINVAL;
+
+ if (to == from) {
+ *inc = 1 << 10;
+ return 0;
+ }
+
+ tmp = (from << 10) / to;
+ if ((tmp > 0xFFFF) || (!tmp))
+ /* overflow (downscale x 63) or too small (upscale x 1024) */
+ return -EINVAL;
+
+ *inc = (u16)tmp;
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_get_hv_inc
+ * @ctx: device context
+ * @h_inc: horizontal increment
+ * @v_inc: vertical increment
+ *
+ * Computes the horizontal & vertical increments (inverse of scale)
+ *
+ * RETURNS:
+ * 0 on success
+ */
+static int bdisp_hw_get_hv_inc(struct bdisp_ctx *ctx, u16 *h_inc, u16 *v_inc)
+{
+ u32 src_w, src_h, dst_w, dst_h;
+
+ src_w = ctx->src.crop.width;
+ src_h = ctx->src.crop.height;
+ dst_w = ctx->dst.crop.width;
+ dst_h = ctx->dst.crop.height;
+
+ if (bdisp_hw_get_inc(src_w, dst_w, h_inc) ||
+ bdisp_hw_get_inc(src_h, dst_h, v_inc)) {
+ dev_err(ctx->bdisp_dev->dev,
+ "scale factors failed (%dx%d)->(%dx%d)\n",
+ src_w, src_h, dst_w, dst_h);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_get_op_cfg
+ * @ctx: device context
+ * @c: operation configuration
+ *
+ * Check which blitter operations are expected and sets the scaling increments
+ *
+ * RETURNS:
+ * 0 on success
+ */
+static int bdisp_hw_get_op_cfg(struct bdisp_ctx *ctx, struct bdisp_op_cfg *c)
+{
+ struct device *dev = ctx->bdisp_dev->dev;
+ struct bdisp_frame *src = &ctx->src;
+ struct bdisp_frame *dst = &ctx->dst;
+
+ if (src->width > MAX_SRC_WIDTH * MAX_VERTICAL_STRIDES) {
+ dev_err(dev, "Image width out of HW caps\n");
+ return -EINVAL;
+ }
+
+ c->wide = src->width > MAX_SRC_WIDTH;
+
+ c->hflip = ctx->hflip;
+ c->vflip = ctx->vflip;
+
+ c->src_interlaced = (src->field == V4L2_FIELD_INTERLACED);
+
+ c->src_nbp = src->fmt->nb_planes;
+ c->src_yuv = (src->fmt->pixelformat == V4L2_PIX_FMT_NV12) ||
+ (src->fmt->pixelformat == V4L2_PIX_FMT_YUV420);
+ c->src_420 = c->src_yuv;
+
+ c->dst_nbp = dst->fmt->nb_planes;
+ c->dst_yuv = (dst->fmt->pixelformat == V4L2_PIX_FMT_NV12) ||
+ (dst->fmt->pixelformat == V4L2_PIX_FMT_YUV420);
+ c->dst_420 = c->dst_yuv;
+
+ c->cconv = (c->src_yuv != c->dst_yuv);
+
+ if (bdisp_hw_get_hv_inc(ctx, &c->h_inc, &c->v_inc)) {
+ dev_err(dev, "Scale factor out of HW caps\n");
+ return -EINVAL;
+ }
+
+ /* Deinterlacing adjustment : stretch a field to a frame */
+ if (c->src_interlaced)
+ c->v_inc /= 2;
+
+ if ((c->h_inc != (1 << 10)) || (c->v_inc != (1 << 10)))
+ c->scale = true;
+ else
+ c->scale = false;
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_color_format
+ * @pixelformat: v4l2 pixel format
+ *
+ * v4l2 to bdisp pixel format convert
+ *
+ * RETURNS:
+ * bdisp pixel format
+ */
+static u32 bdisp_hw_color_format(u32 pixelformat)
+{
+ u32 ret;
+
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ ret = (BDISP_YUV_3B << BLT_TTY_COL_SHIFT);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ ret = (BDISP_NV12 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ ret = (BDISP_RGB565 << BLT_TTY_COL_SHIFT);
+ break;
+ case V4L2_PIX_FMT_XBGR32: /* This V4L format actually refers to xRGB */
+ ret = (BDISP_XRGB8888 << BLT_TTY_COL_SHIFT);
+ break;
+ case V4L2_PIX_FMT_RGB24: /* RGB888 format */
+ ret = (BDISP_RGB888 << BLT_TTY_COL_SHIFT) | BLT_TTY_BIG_END;
+ break;
+ case V4L2_PIX_FMT_ABGR32: /* This V4L format actually refers to ARGB */
+
+ default:
+ ret = (BDISP_ARGB8888 << BLT_TTY_COL_SHIFT) | BLT_TTY_ALPHA_R;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * bdisp_hw_build_node
+ * @ctx: device context
+ * @cfg: operation configuration
+ * @node: node to be set
+ * @t_plan: whether the node refers to a RGB/Y or a CbCr plane
+ * @src_x_offset: x offset in the source image
+ *
+ * Build a node
+ *
+ * RETURNS:
+ * None
+ */
+static void bdisp_hw_build_node(struct bdisp_ctx *ctx,
+ struct bdisp_op_cfg *cfg,
+ struct bdisp_node *node,
+ enum bdisp_target_plan t_plan, int src_x_offset)
+{
+ struct bdisp_frame *src = &ctx->src;
+ struct bdisp_frame *dst = &ctx->dst;
+ u16 h_inc, v_inc, yh_inc, yv_inc;
+ struct v4l2_rect src_rect = src->crop;
+ struct v4l2_rect dst_rect = dst->crop;
+ int dst_x_offset;
+ s32 dst_width = dst->crop.width;
+ u32 src_fmt, dst_fmt;
+ const u32 *ivmx;
+
+ dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);
+
+ memset(node, 0, sizeof(*node));
+
+ /* Adjust src and dst areas wrt src_x_offset */
+ src_rect.left += src_x_offset;
+ src_rect.width -= src_x_offset;
+ src_rect.width = min_t(__s32, MAX_SRC_WIDTH, src_rect.width);
+
+ dst_x_offset = (src_x_offset * dst_width) / ctx->src.crop.width;
+ dst_rect.left += dst_x_offset;
+ dst_rect.width = (src_rect.width * dst_width) / ctx->src.crop.width;
+
+ /* General */
+ src_fmt = src->fmt->pixelformat;
+ dst_fmt = dst->fmt->pixelformat;
+
+ node->nip = 0;
+ node->cic = BLT_CIC_ALL_GRP;
+ node->ack = BLT_ACK_BYPASS_S2S3;
+
+ switch (cfg->src_nbp) {
+ case 1:
+ /* Src2 = RGB / Src1 = Src3 = off */
+ node->ins = BLT_INS_S1_OFF | BLT_INS_S2_MEM | BLT_INS_S3_OFF;
+ break;
+ case 2:
+ /* Src3 = Y
+ * Src2 = CbCr or ColorFill if writing the Y plane
+ * Src1 = off */
+ node->ins = BLT_INS_S1_OFF | BLT_INS_S3_MEM;
+ if (t_plan == BDISP_Y)
+ node->ins |= BLT_INS_S2_CF;
+ else
+ node->ins |= BLT_INS_S2_MEM;
+ break;
+ case 3:
+ default:
+ /* Src3 = Y
+ * Src2 = Cb or ColorFill if writing the Y plane
+ * Src1 = Cr or ColorFill if writing the Y plane */
+ node->ins = BLT_INS_S3_MEM;
+ if (t_plan == BDISP_Y)
+ node->ins |= BLT_INS_S2_CF | BLT_INS_S1_CF;
+ else
+ node->ins |= BLT_INS_S2_MEM | BLT_INS_S1_MEM;
+ break;
+ }
+
+ /* Color convert */
+ node->ins |= cfg->cconv ? BLT_INS_IVMX : 0;
+ /* Scale needed if scaling OR 4:2:0 up/downsampling */
+ node->ins |= (cfg->scale || cfg->src_420 || cfg->dst_420) ?
+ BLT_INS_SCALE : 0;
+
+ /* Target */
+ node->tba = (t_plan == BDISP_CBCR) ? dst->paddr[1] : dst->paddr[0];
+
+ node->tty = dst->bytesperline;
+ node->tty |= bdisp_hw_color_format(dst_fmt);
+ node->tty |= BLT_TTY_DITHER;
+ node->tty |= (t_plan == BDISP_CBCR) ? BLT_TTY_CHROMA : 0;
+ node->tty |= cfg->hflip ? BLT_TTY_HSO : 0;
+ node->tty |= cfg->vflip ? BLT_TTY_VSO : 0;
+
+ if (cfg->dst_420 && (t_plan == BDISP_CBCR)) {
+ /* 420 chroma downsampling */
+ dst_rect.height /= 2;
+ dst_rect.width /= 2;
+ dst_rect.left /= 2;
+ dst_rect.top /= 2;
+ dst_x_offset /= 2;
+ dst_width /= 2;
+ }
+
+ node->txy = cfg->vflip ? (dst_rect.height - 1) : dst_rect.top;
+ node->txy <<= 16;
+ node->txy |= cfg->hflip ? (dst_width - dst_x_offset - 1) :
+ dst_rect.left;
+
+ node->tsz = dst_rect.height << 16 | dst_rect.width;
+
+ if (cfg->src_interlaced) {
+ /* handle only the top field which is half height of a frame */
+ src_rect.top /= 2;
+ src_rect.height /= 2;
+ }
+
+ if (cfg->src_nbp == 1) {
+ /* Src 2 : RGB */
+ node->s2ba = src->paddr[0];
+
+ node->s2ty = src->bytesperline;
+ if (cfg->src_interlaced)
+ node->s2ty *= 2;
+
+ node->s2ty |= bdisp_hw_color_format(src_fmt);
+
+ node->s2xy = src_rect.top << 16 | src_rect.left;
+ node->s2sz = src_rect.height << 16 | src_rect.width;
+ } else {
+ /* Src 2 : Cb or CbCr */
+ if (cfg->src_420) {
+ /* 420 chroma upsampling */
+ src_rect.top /= 2;
+ src_rect.left /= 2;
+ src_rect.width /= 2;
+ src_rect.height /= 2;
+ }
+
+ node->s2ba = src->paddr[1];
+
+ node->s2ty = src->bytesperline;
+ if (cfg->src_nbp == 3)
+ node->s2ty /= 2;
+ if (cfg->src_interlaced)
+ node->s2ty *= 2;
+
+ node->s2ty |= bdisp_hw_color_format(src_fmt);
+
+ node->s2xy = src_rect.top << 16 | src_rect.left;
+ node->s2sz = src_rect.height << 16 | src_rect.width;
+
+ if (cfg->src_nbp == 3) {
+ /* Src 1 : Cr */
+ node->s1ba = src->paddr[2];
+
+ node->s1ty = node->s2ty;
+ node->s1xy = node->s2xy;
+ }
+
+ /* Src 3 : Y */
+ node->s3ba = src->paddr[0];
+
+ node->s3ty = src->bytesperline;
+ if (cfg->src_interlaced)
+ node->s3ty *= 2;
+ node->s3ty |= bdisp_hw_color_format(src_fmt);
+
+ if ((t_plan != BDISP_CBCR) && cfg->src_420) {
+ /* No chroma upsampling for output RGB / Y plane */
+ node->s3xy = node->s2xy * 2;
+ node->s3sz = node->s2sz * 2;
+ } else {
+ /* No need to read Y (Src3) when writing Chroma */
+ node->s3ty |= BLT_S3TY_BLANK_ACC;
+ node->s3xy = node->s2xy;
+ node->s3sz = node->s2sz;
+ }
+ }
+
+ /* Resize (scale OR 4:2:0: chroma up/downsampling) */
+ if (node->ins & BLT_INS_SCALE) {
+ /* no need to compute Y when writing CbCr from RGB input */
+ bool skip_y = (t_plan == BDISP_CBCR) && !cfg->src_yuv;
+
+ /* FCTL */
+ if (cfg->scale) {
+ node->fctl = BLT_FCTL_HV_SCALE;
+ if (!skip_y)
+ node->fctl |= BLT_FCTL_Y_HV_SCALE;
+ } else {
+ node->fctl = BLT_FCTL_HV_SAMPLE;
+ if (!skip_y)
+ node->fctl |= BLT_FCTL_Y_HV_SAMPLE;
+ }
+
+ /* RSF - Chroma may need to be up/downsampled */
+ h_inc = cfg->h_inc;
+ v_inc = cfg->v_inc;
+ if (!cfg->src_420 && cfg->dst_420 && (t_plan == BDISP_CBCR)) {
+ /* RGB to 4:2:0 for Chroma: downsample */
+ h_inc *= 2;
+ v_inc *= 2;
+ } else if (cfg->src_420 && !cfg->dst_420) {
+ /* 4:2:0: to RGB: upsample*/
+ h_inc /= 2;
+ v_inc /= 2;
+ }
+ node->rsf = v_inc << 16 | h_inc;
+
+ /* RZI */
+ node->rzi = BLT_RZI_DEFAULT;
+
+ /* Filter table physical addr */
+ node->hfp = bdisp_hw_get_hf_addr(h_inc);
+ node->vfp = bdisp_hw_get_vf_addr(v_inc);
+
+ /* Y version */
+ if (!skip_y) {
+ yh_inc = cfg->h_inc;
+ yv_inc = cfg->v_inc;
+
+ node->y_rsf = yv_inc << 16 | yh_inc;
+ node->y_rzi = BLT_RZI_DEFAULT;
+ node->y_hfp = bdisp_hw_get_hf_addr(yh_inc);
+ node->y_vfp = bdisp_hw_get_vf_addr(yv_inc);
+ }
+ }
+
+ /* Versatile matrix for RGB / YUV conversion */
+ if (cfg->cconv) {
+ ivmx = cfg->src_yuv ? bdisp_yuv_to_rgb : bdisp_rgb_to_yuv;
+
+ node->ivmx0 = ivmx[0];
+ node->ivmx1 = ivmx[1];
+ node->ivmx2 = ivmx[2];
+ node->ivmx3 = ivmx[3];
+ }
+}
+
+/**
+ * bdisp_hw_build_all_nodes
+ * @ctx: device context
+ *
+ * Build all the nodes for the blitter operation
+ *
+ * RETURNS:
+ * 0 on success
+ */
+static int bdisp_hw_build_all_nodes(struct bdisp_ctx *ctx)
+{
+ struct bdisp_op_cfg cfg;
+ unsigned int i, nid = 0;
+ int src_x_offset = 0;
+
+ for (i = 0; i < MAX_NB_NODE; i++)
+ if (!ctx->node[i]) {
+ dev_err(ctx->bdisp_dev->dev, "node %d is null\n", i);
+ return -EINVAL;
+ }
+
+ /* Get configuration (scale, flip, ...) */
+ if (bdisp_hw_get_op_cfg(ctx, &cfg))
+ return -EINVAL;
+
+ /* Split source in vertical strides (HW constraint) */
+ for (i = 0; i < MAX_VERTICAL_STRIDES; i++) {
+ /* Build RGB/Y node and link it to the previous node */
+ bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
+ cfg.dst_nbp == 1 ? BDISP_RGB : BDISP_Y,
+ src_x_offset);
+ if (nid)
+ ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
+ nid++;
+
+ /* Build additional Cb(Cr) node, link it to the previous one */
+ if (cfg.dst_nbp > 1) {
+ bdisp_hw_build_node(ctx, &cfg, ctx->node[nid],
+ BDISP_CBCR, src_x_offset);
+ ctx->node[nid - 1]->nip = ctx->node_paddr[nid];
+ nid++;
+ }
+
+ /* Next stride until full width covered */
+ src_x_offset += MAX_SRC_WIDTH;
+ if (src_x_offset >= ctx->src.crop.width)
+ break;
+ }
+
+ /* Mark last node as the last */
+ ctx->node[nid - 1]->nip = 0;
+
+ return 0;
+}
+
+/**
+ * bdisp_hw_save_request
+ * @ctx: device context
+ *
+ * Save a copy of the request and of the built nodes
+ *
+ * RETURNS:
+ * None
+ */
+static void bdisp_hw_save_request(struct bdisp_ctx *ctx)
+{
+ struct bdisp_node **copy_node = ctx->bdisp_dev->dbg.copy_node;
+ struct bdisp_request *request = &ctx->bdisp_dev->dbg.copy_request;
+ struct bdisp_node **node = ctx->node;
+ int i;
+
+ /* Request copy */
+ request->src = ctx->src;
+ request->dst = ctx->dst;
+ request->hflip = ctx->hflip;
+ request->vflip = ctx->vflip;
+ request->nb_req++;
+
+ /* Nodes copy */
+ for (i = 0; i < MAX_NB_NODE; i++) {
+ /* Allocate memory if not done yet */
+ if (!copy_node[i]) {
+ copy_node[i] = devm_kzalloc(ctx->bdisp_dev->dev,
+ sizeof(*copy_node[i]),
+ GFP_ATOMIC);
+ if (!copy_node[i])
+ return;
+ }
+ *copy_node[i] = *node[i];
+ }
+}
+
+/**
+ * bdisp_hw_update
+ * @ctx: device context
+ *
+ * Send the request to the HW
+ *
+ * RETURNS:
+ * 0 on success
+ */
+int bdisp_hw_update(struct bdisp_ctx *ctx)
+{
+ int ret;
+ struct bdisp_dev *bdisp = ctx->bdisp_dev;
+ struct device *dev = bdisp->dev;
+ unsigned int node_id;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* build nodes */
+ ret = bdisp_hw_build_all_nodes(ctx);
+ if (ret) {
+ dev_err(dev, "cannot build nodes (%d)\n", ret);
+ return ret;
+ }
+
+ /* Save a copy of the request */
+ bdisp_hw_save_request(ctx);
+
+ /* Configure interrupt to 'Last Node Reached for AQ1' */
+ writel(BLT_AQ1_CTL_CFG, bdisp->regs + BLT_AQ1_CTL);
+ writel(BLT_ITS_AQ1_LNA, bdisp->regs + BLT_ITM0);
+
+ /* Write first node addr */
+ writel(ctx->node_paddr[0], bdisp->regs + BLT_AQ1_IP);
+
+ /* Find and write last node addr : this starts the HW processing */
+ for (node_id = 0; node_id < MAX_NB_NODE - 1; node_id++) {
+ if (!ctx->node[node_id]->nip)
+ break;
+ }
+ writel(ctx->node_paddr[node_id], bdisp->regs + BLT_AQ1_LNA);
+
+ return 0;
+}
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-reg.h b/drivers/media/platform/st/sti/bdisp/bdisp-reg.h
new file mode 100644
index 000000000..b07ecc903
--- /dev/null
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-reg.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+struct bdisp_node {
+ /* 0 - General */
+ u32 nip;
+ u32 cic;
+ u32 ins;
+ u32 ack;
+ /* 1 - Target */
+ u32 tba;
+ u32 tty;
+ u32 txy;
+ u32 tsz;
+ /* 2 - Color Fill */
+ u32 s1cf;
+ u32 s2cf;
+ /* 3 - Source 1 */
+ u32 s1ba;
+ u32 s1ty;
+ u32 s1xy;
+ u32 s1sz_tsz;
+ /* 4 - Source 2 */
+ u32 s2ba;
+ u32 s2ty;
+ u32 s2xy;
+ u32 s2sz;
+ /* 5 - Source 3 */
+ u32 s3ba;
+ u32 s3ty;
+ u32 s3xy;
+ u32 s3sz;
+ /* 6 - Clipping */
+ u32 cwo;
+ u32 cws;
+ /* 7 - CLUT */
+ u32 cco;
+ u32 cml;
+ /* 8 - Filter & Mask */
+ u32 fctl;
+ u32 pmk;
+ /* 9 - Chroma Filter */
+ u32 rsf;
+ u32 rzi;
+ u32 hfp;
+ u32 vfp;
+ /* 10 - Luma Filter */
+ u32 y_rsf;
+ u32 y_rzi;
+ u32 y_hfp;
+ u32 y_vfp;
+ /* 11 - Flicker */
+ u32 ff0;
+ u32 ff1;
+ u32 ff2;
+ u32 ff3;
+ /* 12 - Color Key */
+ u32 key1;
+ u32 key2;
+ /* 14 - Static Address & User */
+ u32 sar;
+ u32 usr;
+ /* 15 - Input Versatile Matrix */
+ u32 ivmx0;
+ u32 ivmx1;
+ u32 ivmx2;
+ u32 ivmx3;
+ /* 16 - Output Versatile Matrix */
+ u32 ovmx0;
+ u32 ovmx1;
+ u32 ovmx2;
+ u32 ovmx3;
+ /* 17 - Pace */
+ u32 pace;
+ /* 18 - VC1R & DEI */
+ u32 vc1r;
+ u32 dei;
+ /* 19 - Gradient Fill */
+ u32 hgf;
+ u32 vgf;
+};
+
+/* HW registers : static */
+#define BLT_CTL 0x0A00
+#define BLT_ITS 0x0A04
+#define BLT_STA1 0x0A08
+#define BLT_AQ1_CTL 0x0A60
+#define BLT_AQ1_IP 0x0A64
+#define BLT_AQ1_LNA 0x0A68
+#define BLT_AQ1_STA 0x0A6C
+#define BLT_ITM0 0x0AD0
+/* HW registers : plugs */
+#define BLT_PLUGS1_OP2 0x0B04
+#define BLT_PLUGS1_CHZ 0x0B08
+#define BLT_PLUGS1_MSZ 0x0B0C
+#define BLT_PLUGS1_PGZ 0x0B10
+#define BLT_PLUGS2_OP2 0x0B24
+#define BLT_PLUGS2_CHZ 0x0B28
+#define BLT_PLUGS2_MSZ 0x0B2C
+#define BLT_PLUGS2_PGZ 0x0B30
+#define BLT_PLUGS3_OP2 0x0B44
+#define BLT_PLUGS3_CHZ 0x0B48
+#define BLT_PLUGS3_MSZ 0x0B4C
+#define BLT_PLUGS3_PGZ 0x0B50
+#define BLT_PLUGT_OP2 0x0B84
+#define BLT_PLUGT_CHZ 0x0B88
+#define BLT_PLUGT_MSZ 0x0B8C
+#define BLT_PLUGT_PGZ 0x0B90
+/* HW registers : node */
+#define BLT_NIP 0x0C00
+#define BLT_CIC 0x0C04
+#define BLT_INS 0x0C08
+#define BLT_ACK 0x0C0C
+#define BLT_TBA 0x0C10
+#define BLT_TTY 0x0C14
+#define BLT_TXY 0x0C18
+#define BLT_TSZ 0x0C1C
+#define BLT_S1BA 0x0C28
+#define BLT_S1TY 0x0C2C
+#define BLT_S1XY 0x0C30
+#define BLT_S2BA 0x0C38
+#define BLT_S2TY 0x0C3C
+#define BLT_S2XY 0x0C40
+#define BLT_S2SZ 0x0C44
+#define BLT_S3BA 0x0C48
+#define BLT_S3TY 0x0C4C
+#define BLT_S3XY 0x0C50
+#define BLT_S3SZ 0x0C54
+#define BLT_FCTL 0x0C68
+#define BLT_RSF 0x0C70
+#define BLT_RZI 0x0C74
+#define BLT_HFP 0x0C78
+#define BLT_VFP 0x0C7C
+#define BLT_Y_RSF 0x0C80
+#define BLT_Y_RZI 0x0C84
+#define BLT_Y_HFP 0x0C88
+#define BLT_Y_VFP 0x0C8C
+#define BLT_IVMX0 0x0CC0
+#define BLT_IVMX1 0x0CC4
+#define BLT_IVMX2 0x0CC8
+#define BLT_IVMX3 0x0CCC
+#define BLT_OVMX0 0x0CD0
+#define BLT_OVMX1 0x0CD4
+#define BLT_OVMX2 0x0CD8
+#define BLT_OVMX3 0x0CDC
+#define BLT_DEI 0x0CEC
+/* HW registers : filters */
+#define BLT_HFC_N 0x0D00
+#define BLT_VFC_N 0x0D90
+#define BLT_Y_HFC_N 0x0E00
+#define BLT_Y_VFC_N 0x0E90
+#define BLT_NB_H_COEF 16
+#define BLT_NB_V_COEF 10
+
+/* Registers values */
+#define BLT_CTL_RESET BIT(31) /* Global soft reset */
+
+#define BLT_ITS_AQ1_LNA BIT(12) /* AQ1 LNA reached */
+
+#define BLT_STA1_IDLE BIT(0) /* BDISP idle */
+
+#define BLT_AQ1_CTL_CFG 0x80400003 /* Enable, P3, LNA reached */
+
+#define BLT_INS_S1_MASK (BIT(0) | BIT(1) | BIT(2))
+#define BLT_INS_S1_OFF 0x00000000 /* src1 disabled */
+#define BLT_INS_S1_MEM 0x00000001 /* src1 fetched from memory */
+#define BLT_INS_S1_CF 0x00000003 /* src1 color fill */
+#define BLT_INS_S1_COPY 0x00000004 /* src1 direct copy */
+#define BLT_INS_S1_FILL 0x00000007 /* src1 firect fill */
+#define BLT_INS_S2_MASK (BIT(3) | BIT(4))
+#define BLT_INS_S2_OFF 0x00000000 /* src2 disabled */
+#define BLT_INS_S2_MEM 0x00000008 /* src2 fetched from memory */
+#define BLT_INS_S2_CF 0x00000018 /* src2 color fill */
+#define BLT_INS_S3_MASK BIT(5)
+#define BLT_INS_S3_OFF 0x00000000 /* src3 disabled */
+#define BLT_INS_S3_MEM 0x00000020 /* src3 fetched from memory */
+#define BLT_INS_IVMX BIT(6) /* Input versatile matrix */
+#define BLT_INS_CLUT BIT(7) /* Color Look Up Table */
+#define BLT_INS_SCALE BIT(8) /* Scaling */
+#define BLT_INS_FLICK BIT(9) /* Flicker filter */
+#define BLT_INS_CLIP BIT(10) /* Clipping */
+#define BLT_INS_CKEY BIT(11) /* Color key */
+#define BLT_INS_OVMX BIT(12) /* Output versatile matrix */
+#define BLT_INS_DEI BIT(13) /* Deinterlace */
+#define BLT_INS_PMASK BIT(14) /* Plane mask */
+#define BLT_INS_VC1R BIT(17) /* VC1 Range mapping */
+#define BLT_INS_ROTATE BIT(18) /* Rotation */
+#define BLT_INS_GRAD BIT(19) /* Gradient fill */
+#define BLT_INS_AQLOCK BIT(29) /* AQ lock */
+#define BLT_INS_PACE BIT(30) /* Pace down */
+#define BLT_INS_IRQ BIT(31) /* Raise IRQ when node done */
+#define BLT_CIC_ALL_GRP 0x000FDFFC /* all valid groups present */
+#define BLT_ACK_BYPASS_S2S3 0x00000007 /* Bypass src2 and src3 */
+
+#define BLT_TTY_COL_SHIFT 16 /* Color format */
+#define BLT_TTY_COL_MASK 0x001F0000 /* Color format mask */
+#define BLT_TTY_ALPHA_R BIT(21) /* Alpha range */
+#define BLT_TTY_CR_NOT_CB BIT(22) /* CR not Cb */
+#define BLT_TTY_MB BIT(23) /* MB frame / field*/
+#define BLT_TTY_HSO BIT(24) /* H scan order */
+#define BLT_TTY_VSO BIT(25) /* V scan order */
+#define BLT_TTY_DITHER BIT(26) /* Dithering */
+#define BLT_TTY_CHROMA BIT(27) /* Write chroma / luma */
+#define BLT_TTY_BIG_END BIT(30) /* Big endianness */
+
+#define BLT_S1TY_A1_SUBSET BIT(22) /* A1 subset */
+#define BLT_S1TY_CHROMA_EXT BIT(26) /* Chroma Extended */
+#define BTL_S1TY_SUBBYTE BIT(28) /* Sub-byte fmt, pixel order */
+#define BLT_S1TY_RGB_EXP BIT(29) /* RGB expansion mode */
+
+#define BLT_S2TY_A1_SUBSET BIT(22) /* A1 subset */
+#define BLT_S2TY_CHROMA_EXT BIT(26) /* Chroma Extended */
+#define BTL_S2TY_SUBBYTE BIT(28) /* Sub-byte fmt, pixel order */
+#define BLT_S2TY_RGB_EXP BIT(29) /* RGB expansion mode */
+
+#define BLT_S3TY_BLANK_ACC BIT(26) /* Blank access */
+
+#define BLT_FCTL_HV_SCALE 0x00000055 /* H/V resize + color filter */
+#define BLT_FCTL_Y_HV_SCALE 0x33000000 /* Luma version */
+
+#define BLT_FCTL_HV_SAMPLE 0x00000044 /* H/V resize */
+#define BLT_FCTL_Y_HV_SAMPLE 0x22000000 /* Luma version */
+
+#define BLT_RZI_DEFAULT 0x20003000 /* H/VNB_repeat = 3/2 */
+
+/* Color format */
+#define BDISP_RGB565 0x00 /* RGB565 */
+#define BDISP_RGB888 0x01 /* RGB888 */
+#define BDISP_XRGB8888 0x02 /* RGB888_32 */
+#define BDISP_ARGB8888 0x05 /* ARGB888 */
+#define BDISP_NV12 0x16 /* YCbCr42x R2B */
+#define BDISP_YUV_3B 0x1E /* YUV (3 buffer) */
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
new file mode 100644
index 000000000..080da254b
--- /dev/null
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
@@ -0,0 +1,1428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "bdisp.h"
+
+#define BDISP_MAX_CTRL_NUM 10
+
+#define BDISP_WORK_TIMEOUT ((100 * HZ) / 1000)
+
+/* User configuration change */
+#define BDISP_PARAMS BIT(0) /* Config updated */
+#define BDISP_SRC_FMT BIT(1) /* Source set */
+#define BDISP_DST_FMT BIT(2) /* Destination set */
+#define BDISP_CTX_STOP_REQ BIT(3) /* Stop request */
+#define BDISP_CTX_ABORT BIT(4) /* Abort while device run */
+
+#define BDISP_MIN_W 1
+#define BDISP_MAX_W 8191
+#define BDISP_MIN_H 1
+#define BDISP_MAX_H 8191
+
+#define fh_to_ctx(__fh) container_of(__fh, struct bdisp_ctx, fh)
+
+enum bdisp_dev_flags {
+ ST_M2M_OPEN, /* Driver opened */
+ ST_M2M_RUNNING, /* HW device running */
+ ST_M2M_SUSPENDED, /* Driver suspended */
+ ST_M2M_SUSPENDING, /* Driver being suspended */
+};
+
+static const struct bdisp_fmt bdisp_formats[] = {
+ /* ARGB888. [31:0] A:R:G:B 8:8:8:8 little endian */
+ {
+ .pixelformat = V4L2_PIX_FMT_ABGR32, /* is actually ARGB */
+ .nb_planes = 1,
+ .bpp = 32,
+ .bpp_plane0 = 32,
+ .w_align = 1,
+ .h_align = 1
+ },
+ /* XRGB888. [31:0] x:R:G:B 8:8:8:8 little endian */
+ {
+ .pixelformat = V4L2_PIX_FMT_XBGR32, /* is actually xRGB */
+ .nb_planes = 1,
+ .bpp = 32,
+ .bpp_plane0 = 32,
+ .w_align = 1,
+ .h_align = 1
+ },
+ /* RGB565. [15:0] R:G:B 5:6:5 little endian */
+ {
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .nb_planes = 1,
+ .bpp = 16,
+ .bpp_plane0 = 16,
+ .w_align = 1,
+ .h_align = 1
+ },
+ /* NV12. YUV420SP - 1 plane for Y + 1 plane for (CbCr) */
+ {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .nb_planes = 2,
+ .bpp = 12,
+ .bpp_plane0 = 8,
+ .w_align = 2,
+ .h_align = 2
+ },
+ /* RGB888. [23:0] B:G:R 8:8:8 little endian */
+ {
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .nb_planes = 1,
+ .bpp = 24,
+ .bpp_plane0 = 24,
+ .w_align = 1,
+ .h_align = 1
+ },
+ /* YU12. YUV420P - 1 plane for Y + 1 plane for Cb + 1 plane for Cr
+ * To keep as the LAST element of this table (no support on capture)
+ */
+ {
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .nb_planes = 3,
+ .bpp = 12,
+ .bpp_plane0 = 8,
+ .w_align = 2,
+ .h_align = 2
+ }
+};
+
+/* Default format : HD ARGB32*/
+#define BDISP_DEF_WIDTH 1920
+#define BDISP_DEF_HEIGHT 1080
+
+static const struct bdisp_frame bdisp_dflt_fmt = {
+ .width = BDISP_DEF_WIDTH,
+ .height = BDISP_DEF_HEIGHT,
+ .fmt = &bdisp_formats[0],
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = BDISP_DEF_WIDTH * 4,
+ .sizeimage = BDISP_DEF_WIDTH * BDISP_DEF_HEIGHT * 4,
+ .colorspace = V4L2_COLORSPACE_REC709,
+ .crop = {0, 0, BDISP_DEF_WIDTH, BDISP_DEF_HEIGHT},
+ .paddr = {0, 0, 0, 0}
+};
+
+static inline void bdisp_ctx_state_lock_set(u32 state, struct bdisp_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->bdisp_dev->slock, flags);
+ ctx->state |= state;
+ spin_unlock_irqrestore(&ctx->bdisp_dev->slock, flags);
+}
+
+static inline void bdisp_ctx_state_lock_clear(u32 state, struct bdisp_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->bdisp_dev->slock, flags);
+ ctx->state &= ~state;
+ spin_unlock_irqrestore(&ctx->bdisp_dev->slock, flags);
+}
+
+static inline bool bdisp_ctx_state_is_set(u32 mask, struct bdisp_ctx *ctx)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&ctx->bdisp_dev->slock, flags);
+ ret = (ctx->state & mask) == mask;
+ spin_unlock_irqrestore(&ctx->bdisp_dev->slock, flags);
+
+ return ret;
+}
+
+static const struct bdisp_fmt *bdisp_find_fmt(u32 pixelformat)
+{
+ const struct bdisp_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bdisp_formats); i++) {
+ fmt = &bdisp_formats[i];
+ if (fmt->pixelformat == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct bdisp_frame *ctx_get_frame(struct bdisp_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->src;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->dst;
+ default:
+ dev_err(ctx->bdisp_dev->dev,
+ "Wrong buffer/video queue type (%d)\n", type);
+ break;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void bdisp_job_finish(struct bdisp_ctx *ctx, int vb_state)
+{
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+ if (WARN(!ctx || !ctx->fh.m2m_ctx, "Null hardware context\n"))
+ return;
+
+ dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (src_vb && dst_vb) {
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+ dst_vb->timecode = src_vb->timecode;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst_vb->flags |= src_vb->flags &
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src_vb, vb_state);
+ v4l2_m2m_buf_done(dst_vb, vb_state);
+
+ v4l2_m2m_job_finish(ctx->bdisp_dev->m2m.m2m_dev,
+ ctx->fh.m2m_ctx);
+ }
+}
+
+static int bdisp_ctx_stop_req(struct bdisp_ctx *ctx)
+{
+ struct bdisp_ctx *curr_ctx;
+ struct bdisp_dev *bdisp = ctx->bdisp_dev;
+ int ret;
+
+ dev_dbg(ctx->bdisp_dev->dev, "%s\n", __func__);
+
+ cancel_delayed_work(&bdisp->timeout_work);
+
+ curr_ctx = v4l2_m2m_get_curr_priv(bdisp->m2m.m2m_dev);
+ if (!test_bit(ST_M2M_RUNNING, &bdisp->state) || (curr_ctx != ctx))
+ return 0;
+
+ bdisp_ctx_state_lock_set(BDISP_CTX_STOP_REQ, ctx);
+
+ ret = wait_event_timeout(bdisp->irq_queue,
+ !bdisp_ctx_state_is_set(BDISP_CTX_STOP_REQ, ctx),
+ BDISP_WORK_TIMEOUT);
+
+ if (!ret) {
+ dev_err(ctx->bdisp_dev->dev, "%s IRQ timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void __bdisp_job_abort(struct bdisp_ctx *ctx)
+{
+ int ret;
+
+ ret = bdisp_ctx_stop_req(ctx);
+ if ((ret == -ETIMEDOUT) || (ctx->state & BDISP_CTX_ABORT)) {
+ bdisp_ctx_state_lock_clear(BDISP_CTX_STOP_REQ | BDISP_CTX_ABORT,
+ ctx);
+ bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void bdisp_job_abort(void *priv)
+{
+ __bdisp_job_abort((struct bdisp_ctx *)priv);
+}
+
+static int bdisp_get_addr(struct bdisp_ctx *ctx, struct vb2_buffer *vb,
+ struct bdisp_frame *frame, dma_addr_t *paddr)
+{
+ if (!vb || !frame)
+ return -EINVAL;
+
+ paddr[0] = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (frame->fmt->nb_planes > 1)
+ /* UV (NV12) or U (420P) */
+ paddr[1] = (dma_addr_t)(paddr[0] +
+ frame->bytesperline * frame->height);
+
+ if (frame->fmt->nb_planes > 2)
+ /* V (420P) */
+ paddr[2] = (dma_addr_t)(paddr[1] +
+ (frame->bytesperline * frame->height) / 4);
+
+ if (frame->fmt->nb_planes > 3)
+ dev_dbg(ctx->bdisp_dev->dev, "ignoring some planes\n");
+
+ dev_dbg(ctx->bdisp_dev->dev,
+ "%s plane[0]=%pad plane[1]=%pad plane[2]=%pad\n",
+ __func__, &paddr[0], &paddr[1], &paddr[2]);
+
+ return 0;
+}
+
+static int bdisp_get_bufs(struct bdisp_ctx *ctx)
+{
+ struct bdisp_frame *src, *dst;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ int ret;
+
+ src = &ctx->src;
+ dst = &ctx->dst;
+
+ src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ ret = bdisp_get_addr(ctx, &src_vb->vb2_buf, src, src->paddr);
+ if (ret)
+ return ret;
+
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ ret = bdisp_get_addr(ctx, &dst_vb->vb2_buf, dst, dst->paddr);
+ if (ret)
+ return ret;
+
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
+
+ return 0;
+}
+
+static void bdisp_device_run(void *priv)
+{
+ struct bdisp_ctx *ctx = priv;
+ struct bdisp_dev *bdisp;
+ unsigned long flags;
+ int err = 0;
+
+ if (WARN(!ctx, "Null hardware context\n"))
+ return;
+
+ bdisp = ctx->bdisp_dev;
+ dev_dbg(bdisp->dev, "%s\n", __func__);
+ spin_lock_irqsave(&bdisp->slock, flags);
+
+ if (bdisp->m2m.ctx != ctx) {
+ dev_dbg(bdisp->dev, "ctx updated: %p -> %p\n",
+ bdisp->m2m.ctx, ctx);
+ ctx->state |= BDISP_PARAMS;
+ bdisp->m2m.ctx = ctx;
+ }
+
+ if (ctx->state & BDISP_CTX_STOP_REQ) {
+ ctx->state &= ~BDISP_CTX_STOP_REQ;
+ ctx->state |= BDISP_CTX_ABORT;
+ wake_up(&bdisp->irq_queue);
+ goto out;
+ }
+
+ err = bdisp_get_bufs(ctx);
+ if (err) {
+ dev_err(bdisp->dev, "cannot get address\n");
+ goto out;
+ }
+
+ bdisp_dbg_perf_begin(bdisp);
+
+ err = bdisp_hw_reset(bdisp);
+ if (err) {
+ dev_err(bdisp->dev, "could not get HW ready\n");
+ goto out;
+ }
+
+ err = bdisp_hw_update(ctx);
+ if (err) {
+ dev_err(bdisp->dev, "could not send HW request\n");
+ goto out;
+ }
+
+ queue_delayed_work(bdisp->work_queue, &bdisp->timeout_work,
+ BDISP_WORK_TIMEOUT);
+ set_bit(ST_M2M_RUNNING, &bdisp->state);
+out:
+ ctx->state &= ~BDISP_PARAMS;
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+ if (err)
+ bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR);
+}
+
+static const struct v4l2_m2m_ops bdisp_m2m_ops = {
+ .device_run = bdisp_device_run,
+ .job_abort = bdisp_job_abort,
+};
+
+static int __bdisp_s_ctrl(struct bdisp_ctx *ctx, struct v4l2_ctrl *ctrl)
+{
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ default:
+ dev_err(ctx->bdisp_dev->dev, "unknown control %d\n", ctrl->id);
+ return -EINVAL;
+ }
+
+ ctx->state |= BDISP_PARAMS;
+
+ return 0;
+}
+
+static int bdisp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct bdisp_ctx *ctx = container_of(ctrl->handler, struct bdisp_ctx,
+ ctrl_handler);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctx->bdisp_dev->slock, flags);
+ ret = __bdisp_s_ctrl(ctx, ctrl);
+ spin_unlock_irqrestore(&ctx->bdisp_dev->slock, flags);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops bdisp_c_ops = {
+ .s_ctrl = bdisp_s_ctrl,
+};
+
+static int bdisp_ctrls_create(struct bdisp_ctx *ctx)
+{
+ if (ctx->ctrls_rdy)
+ return 0;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, BDISP_MAX_CTRL_NUM);
+
+ ctx->bdisp_ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &bdisp_c_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctx->bdisp_ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &bdisp_c_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ ctx->ctrls_rdy = true;
+
+ return 0;
+}
+
+static void bdisp_ctrls_delete(struct bdisp_ctx *ctx)
+{
+ if (ctx->ctrls_rdy) {
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ ctx->ctrls_rdy = false;
+ }
+}
+
+static int bdisp_queue_setup(struct vb2_queue *vq,
+ unsigned int *nb_buf, unsigned int *nb_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct bdisp_ctx *ctx = vb2_get_drv_priv(vq);
+ struct bdisp_frame *frame = ctx_get_frame(ctx, vq->type);
+
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ if (!frame->fmt) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid format\n");
+ return -EINVAL;
+ }
+
+ if (*nb_planes)
+ return sizes[0] < frame->sizeimage ? -EINVAL : 0;
+
+ *nb_planes = 1;
+ sizes[0] = frame->sizeimage;
+
+ return 0;
+}
+
+static int bdisp_buf_prepare(struct vb2_buffer *vb)
+{
+ struct bdisp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct bdisp_frame *frame = ctx_get_frame(ctx, vb->vb2_queue->type);
+
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ vb2_set_plane_payload(vb, 0, frame->sizeimage);
+
+ return 0;
+}
+
+static void bdisp_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct bdisp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* return to V4L2 any 0-size buffer so it can be dequeued by user */
+ if (!vb2_get_plane_payload(vb, 0)) {
+ dev_dbg(ctx->bdisp_dev->dev, "0 data buffer, skip it\n");
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ return;
+ }
+
+ if (ctx->fh.m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct bdisp_ctx *ctx = q->drv_priv;
+ struct vb2_v4l2_buffer *buf;
+ int ret = pm_runtime_resume_and_get(ctx->bdisp_dev->dev);
+
+ if (ret < 0) {
+ dev_err(ctx->bdisp_dev->dev, "failed to set runtime PM\n");
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ } else {
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bdisp_stop_streaming(struct vb2_queue *q)
+{
+ struct bdisp_ctx *ctx = q->drv_priv;
+
+ __bdisp_job_abort(ctx);
+
+ pm_runtime_put(ctx->bdisp_dev->dev);
+}
+
+static const struct vb2_ops bdisp_qops = {
+ .queue_setup = bdisp_queue_setup,
+ .buf_prepare = bdisp_buf_prepare,
+ .buf_queue = bdisp_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = bdisp_stop_streaming,
+ .start_streaming = bdisp_start_streaming,
+};
+
+static int queue_init(void *priv,
+ struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct bdisp_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &bdisp_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->bdisp_dev->lock;
+ src_vq->dev = ctx->bdisp_dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &bdisp_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->bdisp_dev->lock;
+ dst_vq->dev = ctx->bdisp_dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int bdisp_open(struct file *file)
+{
+ struct bdisp_dev *bdisp = video_drvdata(file);
+ struct bdisp_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&bdisp->lock))
+ return -ERESTARTSYS;
+
+ /* Allocate memory for both context and node */
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ ctx->bdisp_dev = bdisp;
+
+ if (bdisp_hw_alloc_nodes(ctx)) {
+ dev_err(bdisp->dev, "no memory for nodes\n");
+ ret = -ENOMEM;
+ goto mem_ctx;
+ }
+
+ v4l2_fh_init(&ctx->fh, bdisp->m2m.vdev);
+
+ ret = bdisp_ctrls_create(ctx);
+ if (ret) {
+ dev_err(bdisp->dev, "Failed to create control\n");
+ goto error_fh;
+ }
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ /* Default format */
+ ctx->src = bdisp_dflt_fmt;
+ ctx->dst = bdisp_dflt_fmt;
+
+ /* Setup the device context for mem2mem mode. */
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(bdisp->m2m.m2m_dev, ctx,
+ queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ dev_err(bdisp->dev, "Failed to initialize m2m context\n");
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto error_ctrls;
+ }
+
+ bdisp->m2m.refcnt++;
+ set_bit(ST_M2M_OPEN, &bdisp->state);
+
+ dev_dbg(bdisp->dev, "driver opened, ctx = 0x%p\n", ctx);
+
+ mutex_unlock(&bdisp->lock);
+
+ return 0;
+
+error_ctrls:
+ bdisp_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+error_fh:
+ v4l2_fh_exit(&ctx->fh);
+ bdisp_hw_free_nodes(ctx);
+mem_ctx:
+ kfree(ctx);
+unlock:
+ mutex_unlock(&bdisp->lock);
+
+ return ret;
+}
+
+static int bdisp_release(struct file *file)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(file->private_data);
+ struct bdisp_dev *bdisp = ctx->bdisp_dev;
+
+ dev_dbg(bdisp->dev, "%s\n", __func__);
+
+ mutex_lock(&bdisp->lock);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ bdisp_ctrls_delete(ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ if (--bdisp->m2m.refcnt <= 0)
+ clear_bit(ST_M2M_OPEN, &bdisp->state);
+
+ bdisp_hw_free_nodes(ctx);
+
+ kfree(ctx);
+
+ mutex_unlock(&bdisp->lock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations bdisp_fops = {
+ .owner = THIS_MODULE,
+ .open = bdisp_open,
+ .release = bdisp_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int bdisp_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_dev *bdisp = ctx->bdisp_dev;
+
+ strscpy(cap->driver, bdisp->pdev->name, sizeof(cap->driver));
+ strscpy(cap->card, bdisp->pdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s%d",
+ BDISP_NAME, bdisp->id);
+ return 0;
+}
+
+static int bdisp_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ const struct bdisp_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(bdisp_formats))
+ return -EINVAL;
+
+ fmt = &bdisp_formats[f->index];
+
+ if ((fmt->pixelformat == V4L2_PIX_FMT_YUV420) &&
+ (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
+ dev_dbg(ctx->bdisp_dev->dev, "No YU12 on capture\n");
+ return -EINVAL;
+ }
+ f->pixelformat = fmt->pixelformat;
+
+ return 0;
+}
+
+static int bdisp_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_pix_format *pix;
+ struct bdisp_frame *frame = ctx_get_frame(ctx, f->type);
+
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ pix = &f->fmt.pix;
+ pix->width = frame->width;
+ pix->height = frame->height;
+ pix->pixelformat = frame->fmt->pixelformat;
+ pix->field = frame->field;
+ pix->bytesperline = frame->bytesperline;
+ pix->sizeimage = frame->sizeimage;
+ pix->colorspace = (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ frame->colorspace : bdisp_dflt_fmt.colorspace;
+
+ return 0;
+}
+
+static int bdisp_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ const struct bdisp_fmt *format;
+ u32 in_w, in_h;
+
+ format = bdisp_find_fmt(pix->pixelformat);
+ if (!format) {
+ dev_dbg(ctx->bdisp_dev->dev, "Unknown format 0x%x\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ /* YUV420P only supported for VIDEO_OUTPUT */
+ if ((format->pixelformat == V4L2_PIX_FMT_YUV420) &&
+ (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)) {
+ dev_dbg(ctx->bdisp_dev->dev, "No YU12 on capture\n");
+ return -EINVAL;
+ }
+
+ /* Field (interlaced only supported on OUTPUT) */
+ if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
+ (pix->field != V4L2_FIELD_INTERLACED))
+ pix->field = V4L2_FIELD_NONE;
+
+ /* Adjust width & height */
+ in_w = pix->width;
+ in_h = pix->height;
+ v4l_bound_align_image(&pix->width,
+ BDISP_MIN_W, BDISP_MAX_W,
+ ffs(format->w_align) - 1,
+ &pix->height,
+ BDISP_MIN_H, BDISP_MAX_H,
+ ffs(format->h_align) - 1,
+ 0);
+ if ((pix->width != in_w) || (pix->height != in_h))
+ dev_dbg(ctx->bdisp_dev->dev,
+ "%s size updated: %dx%d -> %dx%d\n", __func__,
+ in_w, in_h, pix->width, pix->height);
+
+ pix->bytesperline = (pix->width * format->bpp_plane0) / 8;
+ pix->sizeimage = (pix->width * pix->height * format->bpp) / 8;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ pix->colorspace = bdisp_dflt_fmt.colorspace;
+
+ return 0;
+}
+
+static int bdisp_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct vb2_queue *vq;
+ struct bdisp_frame *frame;
+ struct v4l2_pix_format *pix;
+ int ret;
+ u32 state;
+
+ ret = bdisp_try_fmt(file, fh, f);
+ if (ret) {
+ dev_err(ctx->bdisp_dev->dev, "Cannot set format\n");
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_err(ctx->bdisp_dev->dev, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ frame = (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
+ &ctx->src : &ctx->dst;
+ pix = &f->fmt.pix;
+ frame->fmt = bdisp_find_fmt(pix->pixelformat);
+ if (!frame->fmt) {
+ dev_err(ctx->bdisp_dev->dev, "Unknown format 0x%x\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ frame->width = pix->width;
+ frame->height = pix->height;
+ frame->bytesperline = pix->bytesperline;
+ frame->sizeimage = pix->sizeimage;
+ frame->field = pix->field;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ frame->colorspace = pix->colorspace;
+
+ frame->crop.width = frame->width;
+ frame->crop.height = frame->height;
+ frame->crop.left = 0;
+ frame->crop.top = 0;
+
+ state = BDISP_PARAMS;
+ state |= (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
+ BDISP_DST_FMT : BDISP_SRC_FMT;
+ bdisp_ctx_state_lock_set(state, ctx);
+
+ return 0;
+}
+
+static int bdisp_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct bdisp_frame *frame;
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+
+ frame = ctx_get_frame(ctx, s->type);
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ switch (s->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* cropped frame */
+ s->r = frame->crop;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ /* complete frame */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ break;
+ default:
+ dev_err(ctx->bdisp_dev->dev, "Invalid target\n");
+ return -EINVAL;
+ }
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ /* composed (cropped) frame */
+ s->r = frame->crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* complete frame */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ break;
+ default:
+ dev_err(ctx->bdisp_dev->dev, "Invalid target\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ dev_err(ctx->bdisp_dev->dev, "Invalid type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int is_rect_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ /* Return 1 if a is enclosed in b, or 0 otherwise. */
+
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int bdisp_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct bdisp_frame *frame;
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct v4l2_rect *in, out;
+ bool valid = false;
+
+ if ((s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) &&
+ (s->target == V4L2_SEL_TGT_CROP))
+ valid = true;
+
+ if ((s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->target == V4L2_SEL_TGT_COMPOSE))
+ valid = true;
+
+ if (!valid) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid type / target\n");
+ return -EINVAL;
+ }
+
+ frame = ctx_get_frame(ctx, s->type);
+ if (IS_ERR(frame)) {
+ dev_err(ctx->bdisp_dev->dev, "Invalid frame (%p)\n", frame);
+ return PTR_ERR(frame);
+ }
+
+ in = &s->r;
+ out = *in;
+
+ /* Align and check origin */
+ out.left = ALIGN(in->left, frame->fmt->w_align);
+ out.top = ALIGN(in->top, frame->fmt->h_align);
+
+ if ((out.left < 0) || (out.left >= frame->width) ||
+ (out.top < 0) || (out.top >= frame->height)) {
+ dev_err(ctx->bdisp_dev->dev,
+ "Invalid crop: %dx%d@(%d,%d) vs frame: %dx%d\n",
+ out.width, out.height, out.left, out.top,
+ frame->width, frame->height);
+ return -EINVAL;
+ }
+
+ /* Align and check size */
+ out.width = ALIGN(in->width, frame->fmt->w_align);
+ out.height = ALIGN(in->height, frame->fmt->w_align);
+
+ if (((out.left + out.width) > frame->width) ||
+ ((out.top + out.height) > frame->height)) {
+ dev_err(ctx->bdisp_dev->dev,
+ "Invalid crop: %dx%d@(%d,%d) vs frame: %dx%d\n",
+ out.width, out.height, out.left, out.top,
+ frame->width, frame->height);
+ return -EINVAL;
+ }
+
+ /* Checks adjust constraints flags */
+ if (s->flags & V4L2_SEL_FLAG_LE && !is_rect_enclosed(&out, in))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE && !is_rect_enclosed(in, &out))
+ return -ERANGE;
+
+ if ((out.left != in->left) || (out.top != in->top) ||
+ (out.width != in->width) || (out.height != in->height)) {
+ dev_dbg(ctx->bdisp_dev->dev,
+ "%s crop updated: %dx%d@(%d,%d) -> %dx%d@(%d,%d)\n",
+ __func__, in->width, in->height, in->left, in->top,
+ out.width, out.height, out.left, out.top);
+ *in = out;
+ }
+
+ frame->crop = out;
+
+ bdisp_ctx_state_lock_set(BDISP_PARAMS, ctx);
+
+ return 0;
+}
+
+static int bdisp_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct bdisp_ctx *ctx = fh_to_ctx(fh);
+
+ if ((type == V4L2_BUF_TYPE_VIDEO_OUTPUT) &&
+ !bdisp_ctx_state_is_set(BDISP_SRC_FMT, ctx)) {
+ dev_err(ctx->bdisp_dev->dev, "src not defined\n");
+ return -EINVAL;
+ }
+
+ if ((type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ !bdisp_ctx_state_is_set(BDISP_DST_FMT, ctx)) {
+ dev_err(ctx->bdisp_dev->dev, "dst not defined\n");
+ return -EINVAL;
+ }
+
+ return v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, type);
+}
+
+static const struct v4l2_ioctl_ops bdisp_ioctl_ops = {
+ .vidioc_querycap = bdisp_querycap,
+ .vidioc_enum_fmt_vid_cap = bdisp_enum_fmt,
+ .vidioc_enum_fmt_vid_out = bdisp_enum_fmt,
+ .vidioc_g_fmt_vid_cap = bdisp_g_fmt,
+ .vidioc_g_fmt_vid_out = bdisp_g_fmt,
+ .vidioc_try_fmt_vid_cap = bdisp_try_fmt,
+ .vidioc_try_fmt_vid_out = bdisp_try_fmt,
+ .vidioc_s_fmt_vid_cap = bdisp_s_fmt,
+ .vidioc_s_fmt_vid_out = bdisp_s_fmt,
+ .vidioc_g_selection = bdisp_g_selection,
+ .vidioc_s_selection = bdisp_s_selection,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = bdisp_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int bdisp_register_device(struct bdisp_dev *bdisp)
+{
+ int ret;
+
+ if (!bdisp)
+ return -ENODEV;
+
+ bdisp->vdev.fops = &bdisp_fops;
+ bdisp->vdev.ioctl_ops = &bdisp_ioctl_ops;
+ bdisp->vdev.release = video_device_release_empty;
+ bdisp->vdev.lock = &bdisp->lock;
+ bdisp->vdev.vfl_dir = VFL_DIR_M2M;
+ bdisp->vdev.v4l2_dev = &bdisp->v4l2_dev;
+ bdisp->vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ snprintf(bdisp->vdev.name, sizeof(bdisp->vdev.name), "%s.%d",
+ BDISP_NAME, bdisp->id);
+
+ video_set_drvdata(&bdisp->vdev, bdisp);
+
+ bdisp->m2m.vdev = &bdisp->vdev;
+ bdisp->m2m.m2m_dev = v4l2_m2m_init(&bdisp_m2m_ops);
+ if (IS_ERR(bdisp->m2m.m2m_dev)) {
+ dev_err(bdisp->dev, "failed to initialize v4l2-m2m device\n");
+ return PTR_ERR(bdisp->m2m.m2m_dev);
+ }
+
+ ret = video_register_device(&bdisp->vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(bdisp->dev,
+ "%s(): failed to register video device\n", __func__);
+ v4l2_m2m_release(bdisp->m2m.m2m_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bdisp_unregister_device(struct bdisp_dev *bdisp)
+{
+ if (!bdisp)
+ return;
+
+ if (bdisp->m2m.m2m_dev)
+ v4l2_m2m_release(bdisp->m2m.m2m_dev);
+
+ video_unregister_device(bdisp->m2m.vdev);
+}
+
+static irqreturn_t bdisp_irq_thread(int irq, void *priv)
+{
+ struct bdisp_dev *bdisp = priv;
+ struct bdisp_ctx *ctx;
+
+ spin_lock(&bdisp->slock);
+
+ bdisp_dbg_perf_end(bdisp);
+
+ cancel_delayed_work(&bdisp->timeout_work);
+
+ if (!test_and_clear_bit(ST_M2M_RUNNING, &bdisp->state))
+ goto isr_unlock;
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDING, &bdisp->state)) {
+ set_bit(ST_M2M_SUSPENDED, &bdisp->state);
+ wake_up(&bdisp->irq_queue);
+ goto isr_unlock;
+ }
+
+ ctx = v4l2_m2m_get_curr_priv(bdisp->m2m.m2m_dev);
+ if (!ctx || !ctx->fh.m2m_ctx)
+ goto isr_unlock;
+
+ spin_unlock(&bdisp->slock);
+
+ bdisp_job_finish(ctx, VB2_BUF_STATE_DONE);
+
+ if (bdisp_ctx_state_is_set(BDISP_CTX_STOP_REQ, ctx)) {
+ bdisp_ctx_state_lock_clear(BDISP_CTX_STOP_REQ, ctx);
+ wake_up(&bdisp->irq_queue);
+ }
+
+ return IRQ_HANDLED;
+
+isr_unlock:
+ spin_unlock(&bdisp->slock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bdisp_irq_handler(int irq, void *priv)
+{
+ if (bdisp_hw_get_and_clear_irq((struct bdisp_dev *)priv))
+ return IRQ_NONE;
+ else
+ return IRQ_WAKE_THREAD;
+}
+
+static void bdisp_irq_timeout(struct work_struct *ptr)
+{
+ struct delayed_work *twork = to_delayed_work(ptr);
+ struct bdisp_dev *bdisp = container_of(twork, struct bdisp_dev,
+ timeout_work);
+ struct bdisp_ctx *ctx;
+
+ ctx = v4l2_m2m_get_curr_priv(bdisp->m2m.m2m_dev);
+
+ dev_err(ctx->bdisp_dev->dev, "Device work timeout\n");
+
+ spin_lock(&bdisp->slock);
+ clear_bit(ST_M2M_RUNNING, &bdisp->state);
+ spin_unlock(&bdisp->slock);
+
+ bdisp_hw_reset(bdisp);
+
+ bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR);
+}
+
+static int bdisp_m2m_suspend(struct bdisp_dev *bdisp)
+{
+ unsigned long flags;
+ int timeout;
+
+ spin_lock_irqsave(&bdisp->slock, flags);
+ if (!test_bit(ST_M2M_RUNNING, &bdisp->state)) {
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+ return 0;
+ }
+ clear_bit(ST_M2M_SUSPENDED, &bdisp->state);
+ set_bit(ST_M2M_SUSPENDING, &bdisp->state);
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+
+ timeout = wait_event_timeout(bdisp->irq_queue,
+ test_bit(ST_M2M_SUSPENDED, &bdisp->state),
+ BDISP_WORK_TIMEOUT);
+
+ clear_bit(ST_M2M_SUSPENDING, &bdisp->state);
+
+ if (!timeout) {
+ dev_err(bdisp->dev, "%s IRQ timeout\n", __func__);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int bdisp_m2m_resume(struct bdisp_dev *bdisp)
+{
+ struct bdisp_ctx *ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bdisp->slock, flags);
+ ctx = bdisp->m2m.ctx;
+ bdisp->m2m.ctx = NULL;
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+
+ if (test_and_clear_bit(ST_M2M_SUSPENDED, &bdisp->state))
+ bdisp_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
+ return 0;
+}
+
+static int bdisp_runtime_resume(struct device *dev)
+{
+ struct bdisp_dev *bdisp = dev_get_drvdata(dev);
+ int ret = clk_enable(bdisp->clock);
+
+ if (ret)
+ return ret;
+
+ return bdisp_m2m_resume(bdisp);
+}
+
+static int bdisp_runtime_suspend(struct device *dev)
+{
+ struct bdisp_dev *bdisp = dev_get_drvdata(dev);
+ int ret = bdisp_m2m_suspend(bdisp);
+
+ if (!ret)
+ clk_disable(bdisp->clock);
+
+ return ret;
+}
+
+static int bdisp_resume(struct device *dev)
+{
+ struct bdisp_dev *bdisp = dev_get_drvdata(dev);
+ unsigned long flags;
+ int opened;
+
+ spin_lock_irqsave(&bdisp->slock, flags);
+ opened = test_bit(ST_M2M_OPEN, &bdisp->state);
+ spin_unlock_irqrestore(&bdisp->slock, flags);
+
+ if (!opened)
+ return 0;
+
+ if (!pm_runtime_suspended(dev))
+ return bdisp_runtime_resume(dev);
+
+ return 0;
+}
+
+static int bdisp_suspend(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ return bdisp_runtime_suspend(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops bdisp_pm_ops = {
+ .suspend = bdisp_suspend,
+ .resume = bdisp_resume,
+ .runtime_suspend = bdisp_runtime_suspend,
+ .runtime_resume = bdisp_runtime_resume,
+};
+
+static int bdisp_remove(struct platform_device *pdev)
+{
+ struct bdisp_dev *bdisp = platform_get_drvdata(pdev);
+
+ bdisp_unregister_device(bdisp);
+
+ bdisp_hw_free_filters(bdisp->dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ bdisp_debugfs_remove(bdisp);
+
+ v4l2_device_unregister(&bdisp->v4l2_dev);
+
+ if (!IS_ERR(bdisp->clock))
+ clk_unprepare(bdisp->clock);
+
+ destroy_workqueue(bdisp->work_queue);
+
+ dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+
+ return 0;
+}
+
+static int bdisp_probe(struct platform_device *pdev)
+{
+ struct bdisp_dev *bdisp;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ bdisp = devm_kzalloc(dev, sizeof(struct bdisp_dev), GFP_KERNEL);
+ if (!bdisp)
+ return -ENOMEM;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ bdisp->pdev = pdev;
+ bdisp->dev = dev;
+ platform_set_drvdata(pdev, bdisp);
+
+ if (dev->of_node)
+ bdisp->id = of_alias_get_id(pdev->dev.of_node, BDISP_NAME);
+ else
+ bdisp->id = pdev->id;
+
+ init_waitqueue_head(&bdisp->irq_queue);
+ INIT_DELAYED_WORK(&bdisp->timeout_work, bdisp_irq_timeout);
+ bdisp->work_queue = create_workqueue(BDISP_NAME);
+ if (!bdisp->work_queue)
+ return -ENOMEM;
+
+ spin_lock_init(&bdisp->slock);
+ mutex_init(&bdisp->lock);
+
+ /* get resources */
+ bdisp->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bdisp->regs)) {
+ ret = PTR_ERR(bdisp->regs);
+ goto err_wq;
+ }
+
+ bdisp->clock = devm_clk_get(dev, BDISP_NAME);
+ if (IS_ERR(bdisp->clock)) {
+ dev_err(dev, "failed to get clock\n");
+ ret = PTR_ERR(bdisp->clock);
+ goto err_wq;
+ }
+
+ ret = clk_prepare(bdisp->clock);
+ if (ret < 0) {
+ dev_err(dev, "clock prepare failed\n");
+ bdisp->clock = ERR_PTR(-EINVAL);
+ goto err_wq;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_clk;
+
+ ret = devm_request_threaded_irq(dev, ret, bdisp_irq_handler,
+ bdisp_irq_thread, IRQF_ONESHOT,
+ pdev->name, bdisp);
+ if (ret) {
+ dev_err(dev, "failed to install irq\n");
+ goto err_clk;
+ }
+
+ /* v4l2 register */
+ ret = v4l2_device_register(dev, &bdisp->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "failed to register\n");
+ goto err_clk;
+ }
+
+ /* Debug */
+ bdisp_debugfs_create(bdisp);
+
+ /* Power management */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to set PM\n");
+ goto err_remove;
+ }
+
+ /* Filters */
+ if (bdisp_hw_alloc_filters(bdisp->dev)) {
+ dev_err(bdisp->dev, "no memory for filters\n");
+ ret = -ENOMEM;
+ goto err_pm;
+ }
+
+ /* Register */
+ ret = bdisp_register_device(bdisp);
+ if (ret) {
+ dev_err(dev, "failed to register\n");
+ goto err_filter;
+ }
+
+ dev_info(dev, "%s%d registered as /dev/video%d\n", BDISP_NAME,
+ bdisp->id, bdisp->vdev.num);
+
+ pm_runtime_put(dev);
+
+ return 0;
+
+err_filter:
+ bdisp_hw_free_filters(bdisp->dev);
+err_pm:
+ pm_runtime_put(dev);
+err_remove:
+ pm_runtime_disable(dev);
+ bdisp_debugfs_remove(bdisp);
+ v4l2_device_unregister(&bdisp->v4l2_dev);
+err_clk:
+ clk_unprepare(bdisp->clock);
+err_wq:
+ destroy_workqueue(bdisp->work_queue);
+ return ret;
+}
+
+static const struct of_device_id bdisp_match_types[] = {
+ {
+ .compatible = "st,stih407-bdisp",
+ },
+ { /* end node */ }
+};
+
+MODULE_DEVICE_TABLE(of, bdisp_match_types);
+
+static struct platform_driver bdisp_driver = {
+ .probe = bdisp_probe,
+ .remove = bdisp_remove,
+ .driver = {
+ .name = BDISP_NAME,
+ .of_match_table = bdisp_match_types,
+ .pm = &bdisp_pm_ops,
+ },
+};
+
+module_platform_driver(bdisp_driver);
+
+MODULE_DESCRIPTION("2D blitter for STMicroelectronics SoC");
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp.h b/drivers/media/platform/st/sti/bdisp/bdisp.h
new file mode 100644
index 000000000..3fb009d24
--- /dev/null
+++ b/drivers/media/platform/st/sti/bdisp/bdisp.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/ktime.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#define BDISP_NAME "bdisp"
+
+/*
+ * Max nb of nodes in node-list:
+ * - 2 nodes to handle wide 4K pictures
+ * - 2 nodes to handle two planes (Y & CbCr) */
+#define MAX_OUTPUT_PLANES 2
+#define MAX_VERTICAL_STRIDES 2
+#define MAX_NB_NODE (MAX_OUTPUT_PLANES * MAX_VERTICAL_STRIDES)
+
+/* struct bdisp_ctrls - bdisp control set
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ */
+struct bdisp_ctrls {
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+};
+
+/**
+ * struct bdisp_fmt - driver's internal color format data
+ * @pixelformat:fourcc code for this format
+ * @nb_planes: number of planes (ex: [0]=RGB/Y - [1]=Cb/Cr, ...)
+ * @bpp: bits per pixel (general)
+ * @bpp_plane0: byte per pixel for the 1st plane
+ * @w_align: width alignment in pixel (multiple of)
+ * @h_align: height alignment in pixel (multiple of)
+ */
+struct bdisp_fmt {
+ u32 pixelformat;
+ u8 nb_planes;
+ u8 bpp;
+ u8 bpp_plane0;
+ u8 w_align;
+ u8 h_align;
+};
+
+/**
+ * struct bdisp_frame - frame properties
+ *
+ * @width: frame width (including padding)
+ * @height: frame height (including padding)
+ * @fmt: pointer to frame format descriptor
+ * @field: frame / field type
+ * @bytesperline: stride of the 1st plane
+ * @sizeimage: image size in bytes
+ * @colorspace: colorspace
+ * @crop: crop area
+ * @paddr: image physical addresses per plane ([0]=RGB/Y - [1]=Cb/Cr, ...)
+ */
+struct bdisp_frame {
+ u32 width;
+ u32 height;
+ const struct bdisp_fmt *fmt;
+ enum v4l2_field field;
+ u32 bytesperline;
+ u32 sizeimage;
+ enum v4l2_colorspace colorspace;
+ struct v4l2_rect crop;
+ dma_addr_t paddr[4];
+};
+
+/**
+ * struct bdisp_request - bdisp request
+ *
+ * @src: source frame properties
+ * @dst: destination frame properties
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @nb_req: number of run request
+ */
+struct bdisp_request {
+ struct bdisp_frame src;
+ struct bdisp_frame dst;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ int nb_req;
+};
+
+/**
+ * struct bdisp_ctx - device context data
+ *
+ * @src: source frame properties
+ * @dst: destination frame properties
+ * @state: flags to keep track of user configuration
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @bdisp_dev: the device this context applies to
+ * @node: node array
+ * @node_paddr: node physical address array
+ * @fh: v4l2 file handle
+ * @ctrl_handler: v4l2 controls handler
+ * @bdisp_ctrls: bdisp control set
+ * @ctrls_rdy: true if the control handler is initialized
+ */
+struct bdisp_ctx {
+ struct bdisp_frame src;
+ struct bdisp_frame dst;
+ u32 state;
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ struct bdisp_dev *bdisp_dev;
+ struct bdisp_node *node[MAX_NB_NODE];
+ dma_addr_t node_paddr[MAX_NB_NODE];
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct bdisp_ctrls bdisp_ctrls;
+ bool ctrls_rdy;
+};
+
+/**
+ * struct bdisp_m2m_device - v4l2 memory-to-memory device data
+ *
+ * @vdev: video device node for v4l2 m2m mode
+ * @m2m_dev: v4l2 m2m device data
+ * @ctx: hardware context data
+ * @refcnt: reference counter
+ */
+struct bdisp_m2m_device {
+ struct video_device *vdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct bdisp_ctx *ctx;
+ int refcnt;
+};
+
+/**
+ * struct bdisp_dbg - debug info
+ *
+ * @debugfs_entry: debugfs
+ * @copy_node: array of last used nodes
+ * @copy_request: last bdisp request
+ * @hw_start: start time of last HW request
+ * @last_duration: last HW processing duration in microsecs
+ * @min_duration: min HW processing duration in microsecs
+ * @max_duration: max HW processing duration in microsecs
+ * @tot_duration: total HW processing duration in microsecs
+ */
+struct bdisp_dbg {
+ struct dentry *debugfs_entry;
+ struct bdisp_node *copy_node[MAX_NB_NODE];
+ struct bdisp_request copy_request;
+ ktime_t hw_start;
+ s64 last_duration;
+ s64 min_duration;
+ s64 max_duration;
+ s64 tot_duration;
+};
+
+/**
+ * struct bdisp_dev - abstraction for bdisp entity
+ *
+ * @v4l2_dev: v4l2 device
+ * @vdev: video device
+ * @pdev: platform device
+ * @dev: device
+ * @lock: mutex protecting this data structure
+ * @slock: spinlock protecting this data structure
+ * @id: device index
+ * @m2m: memory-to-memory V4L2 device information
+ * @state: flags used to synchronize m2m and capture mode operation
+ * @clock: IP clock
+ * @regs: registers
+ * @irq_queue: interrupt handler waitqueue
+ * @work_queue: workqueue to handle timeouts
+ * @timeout_work: IRQ timeout structure
+ * @dbg: debug info
+ */
+struct bdisp_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct platform_device *pdev;
+ struct device *dev;
+ spinlock_t slock;
+ struct mutex lock;
+ u16 id;
+ struct bdisp_m2m_device m2m;
+ unsigned long state;
+ struct clk *clock;
+ void __iomem *regs;
+ wait_queue_head_t irq_queue;
+ struct workqueue_struct *work_queue;
+ struct delayed_work timeout_work;
+ struct bdisp_dbg dbg;
+};
+
+void bdisp_hw_free_nodes(struct bdisp_ctx *ctx);
+int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx);
+void bdisp_hw_free_filters(struct device *dev);
+int bdisp_hw_alloc_filters(struct device *dev);
+int bdisp_hw_reset(struct bdisp_dev *bdisp);
+int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp);
+int bdisp_hw_update(struct bdisp_ctx *ctx);
+
+void bdisp_debugfs_remove(struct bdisp_dev *bdisp);
+void bdisp_debugfs_create(struct bdisp_dev *bdisp);
+void bdisp_dbg_perf_begin(struct bdisp_dev *bdisp);
+void bdisp_dbg_perf_end(struct bdisp_dev *bdisp);
diff --git a/drivers/media/platform/st/sti/c8sectpfe/Kconfig b/drivers/media/platform/st/sti/c8sectpfe/Kconfig
new file mode 100644
index 000000000..702b91050
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/Kconfig
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DVB_C8SECTPFE
+ tristate "STMicroelectronics C8SECTPFE DVB support"
+ depends on DVB_PLATFORM_DRIVERS
+ depends on PINCTRL && DVB_CORE && I2C
+ depends on ARCH_STI || ARCH_MULTIPLATFORM || COMPILE_TEST
+ select FW_LOADER
+ select DEBUG_FS
+ select DVB_LNBP21 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV090x if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STB6100 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV6110 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0900 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
+
+ help
+ This adds support for DVB front-end cards connected
+ to TS inputs of STiH407/410 SoC.
+
+ The driver currently supports C8SECTPFE's TS input block,
+ memdma engine, and HW PID filtering.
+
+ Supported DVB front-end cards are:
+ - STMicroelectronics DVB-T B2100A (STV0367 + TDA18212)
+ - STMicroelectronics DVB-S/S2 STV0903 + STV6110 + LNBP24 board
+
+ To compile this driver as a module, choose M here: the
+ module will be called c8sectpfe.
diff --git a/drivers/media/platform/st/sti/c8sectpfe/Makefile b/drivers/media/platform/st/sti/c8sectpfe/Makefile
new file mode 100644
index 000000000..aedfc725c
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+c8sectpfe-y += c8sectpfe-core.o c8sectpfe-common.o c8sectpfe-dvb.o \
+ c8sectpfe-debugfs.o
+
+obj-$(CONFIG_DVB_C8SECTPFE) += c8sectpfe.o
+
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
+ccflags-y += -I $(srctree)/drivers/media/tuners/
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c
new file mode 100644
index 000000000..5df67da25
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * c8sectpfe-common.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dvb/dmx.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+
+#include <media/dmxdev.h>
+#include <media/dvbdev.h>
+#include <media/dvb_demux.h>
+#include <media/dvb_frontend.h>
+#include <media/dvb_net.h>
+
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-dvb.h"
+
+static int register_dvb(struct stdemux *demux, struct dvb_adapter *adap,
+ void *start_feed, void *stop_feed,
+ struct c8sectpfei *fei)
+{
+ int result;
+
+ demux->dvb_demux.dmx.capabilities = DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING;
+
+ demux->dvb_demux.priv = demux;
+ demux->dvb_demux.filternum = C8SECTPFE_MAXCHANNEL;
+ demux->dvb_demux.feednum = C8SECTPFE_MAXCHANNEL;
+
+ demux->dvb_demux.start_feed = start_feed;
+ demux->dvb_demux.stop_feed = stop_feed;
+ demux->dvb_demux.write_to_decoder = NULL;
+
+ result = dvb_dmx_init(&demux->dvb_demux);
+ if (result < 0) {
+ dev_err(fei->dev, "dvb_dmx_init failed (errno = %d)\n",
+ result);
+ goto err_dmx;
+ }
+
+ demux->dmxdev.filternum = demux->dvb_demux.filternum;
+ demux->dmxdev.demux = &demux->dvb_demux.dmx;
+ demux->dmxdev.capabilities = 0;
+
+ result = dvb_dmxdev_init(&demux->dmxdev, adap);
+ if (result < 0) {
+ dev_err(fei->dev, "dvb_dmxdev_init failed (errno = %d)\n",
+ result);
+
+ goto err_dmxdev;
+ }
+
+ demux->hw_frontend.source = DMX_FRONTEND_0 + demux->tsin_index;
+
+ result = demux->dvb_demux.dmx.add_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+ if (result < 0) {
+ dev_err(fei->dev, "add_frontend failed (errno = %d)\n", result);
+ goto err_fe_hw;
+ }
+
+ demux->mem_frontend.source = DMX_MEMORY_FE;
+ result = demux->dvb_demux.dmx.add_frontend(&demux->dvb_demux.dmx,
+ &demux->mem_frontend);
+ if (result < 0) {
+ dev_err(fei->dev, "add_frontend failed (%d)\n", result);
+ goto err_fe_mem;
+ }
+
+ result = demux->dvb_demux.dmx.connect_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+ if (result < 0) {
+ dev_err(fei->dev, "connect_frontend (%d)\n", result);
+ goto err_fe_con;
+ }
+
+ return 0;
+
+err_fe_con:
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->mem_frontend);
+err_fe_mem:
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+err_fe_hw:
+ dvb_dmxdev_release(&demux->dmxdev);
+err_dmxdev:
+ dvb_dmx_release(&demux->dvb_demux);
+err_dmx:
+ return result;
+
+}
+
+static void unregister_dvb(struct stdemux *demux)
+{
+
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->mem_frontend);
+
+ demux->dvb_demux.dmx.remove_frontend(&demux->dvb_demux.dmx,
+ &demux->hw_frontend);
+
+ dvb_dmxdev_release(&demux->dmxdev);
+
+ dvb_dmx_release(&demux->dvb_demux);
+}
+
+static struct c8sectpfe *c8sectpfe_create(struct c8sectpfei *fei,
+ void *start_feed,
+ void *stop_feed)
+{
+ struct c8sectpfe *c8sectpfe;
+ int result;
+ int i, j;
+
+ short int ids[] = { -1 };
+
+ c8sectpfe = kzalloc(sizeof(struct c8sectpfe), GFP_KERNEL);
+ if (!c8sectpfe)
+ goto err1;
+
+ mutex_init(&c8sectpfe->lock);
+
+ c8sectpfe->device = fei->dev;
+
+ result = dvb_register_adapter(&c8sectpfe->adapter, "STi c8sectpfe",
+ THIS_MODULE, fei->dev, ids);
+ if (result < 0) {
+ dev_err(fei->dev, "dvb_register_adapter failed (errno = %d)\n",
+ result);
+ goto err2;
+ }
+
+ c8sectpfe->adapter.priv = fei;
+
+ for (i = 0; i < fei->tsin_count; i++) {
+
+ c8sectpfe->demux[i].tsin_index = i;
+ c8sectpfe->demux[i].c8sectpfei = fei;
+
+ result = register_dvb(&c8sectpfe->demux[i], &c8sectpfe->adapter,
+ start_feed, stop_feed, fei);
+ if (result < 0) {
+ dev_err(fei->dev,
+ "register_dvb feed=%d failed (errno = %d)\n",
+ result, i);
+
+ /* we take a all or nothing approach */
+ for (j = 0; j < i; j++)
+ unregister_dvb(&c8sectpfe->demux[j]);
+ goto err3;
+ }
+ }
+
+ c8sectpfe->num_feeds = fei->tsin_count;
+
+ return c8sectpfe;
+err3:
+ dvb_unregister_adapter(&c8sectpfe->adapter);
+err2:
+ kfree(c8sectpfe);
+err1:
+ return NULL;
+};
+
+static void c8sectpfe_delete(struct c8sectpfe *c8sectpfe)
+{
+ int i;
+
+ if (!c8sectpfe)
+ return;
+
+ for (i = 0; i < c8sectpfe->num_feeds; i++)
+ unregister_dvb(&c8sectpfe->demux[i]);
+
+ dvb_unregister_adapter(&c8sectpfe->adapter);
+
+ kfree(c8sectpfe);
+};
+
+void c8sectpfe_tuner_unregister_frontend(struct c8sectpfe *c8sectpfe,
+ struct c8sectpfei *fei)
+{
+ int n;
+ struct channel_info *tsin;
+
+ for (n = 0; n < fei->tsin_count; n++) {
+
+ tsin = fei->channel_data[n];
+
+ if (tsin) {
+ if (tsin->frontend) {
+ dvb_unregister_frontend(tsin->frontend);
+ dvb_frontend_detach(tsin->frontend);
+ }
+
+ i2c_put_adapter(tsin->i2c_adapter);
+
+ if (tsin->i2c_client) {
+ module_put(tsin->i2c_client->dev.driver->owner);
+ i2c_unregister_device(tsin->i2c_client);
+ }
+ }
+ }
+
+ c8sectpfe_delete(c8sectpfe);
+};
+
+int c8sectpfe_tuner_register_frontend(struct c8sectpfe **c8sectpfe,
+ struct c8sectpfei *fei,
+ void *start_feed,
+ void *stop_feed)
+{
+ struct channel_info *tsin;
+ struct dvb_frontend *frontend;
+ int n, res;
+
+ *c8sectpfe = c8sectpfe_create(fei, start_feed, stop_feed);
+ if (!*c8sectpfe)
+ return -ENOMEM;
+
+ for (n = 0; n < fei->tsin_count; n++) {
+ tsin = fei->channel_data[n];
+
+ res = c8sectpfe_frontend_attach(&frontend, *c8sectpfe, tsin, n);
+ if (res)
+ goto err;
+
+ res = dvb_register_frontend(&c8sectpfe[0]->adapter, frontend);
+ if (res < 0) {
+ dev_err(fei->dev, "dvb_register_frontend failed (%d)\n",
+ res);
+ goto err;
+ }
+
+ tsin->frontend = frontend;
+ }
+
+ return 0;
+
+err:
+ c8sectpfe_tuner_unregister_frontend(*c8sectpfe, fei);
+ return res;
+}
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h
new file mode 100644
index 000000000..f8d97841f
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * c8sectpfe-common.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#ifndef _C8SECTPFE_COMMON_H_
+#define _C8SECTPFE_COMMON_H_
+
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/frontend.h>
+#include <linux/gpio.h>
+
+#include <media/dmxdev.h>
+#include <media/dvb_demux.h>
+#include <media/dvb_frontend.h>
+#include <media/dvb_net.h>
+
+/* Maximum number of channels */
+#define C8SECTPFE_MAXADAPTER (4)
+#define C8SECTPFE_MAXCHANNEL 64
+#define STPTI_MAXCHANNEL 64
+
+#define MAX_INPUTBLOCKS 7
+
+struct c8sectpfe;
+struct stdemux;
+
+struct stdemux {
+ struct dvb_demux dvb_demux;
+ struct dmxdev dmxdev;
+ struct dmx_frontend hw_frontend;
+ struct dmx_frontend mem_frontend;
+ int tsin_index;
+ int running_feed_count;
+ struct c8sectpfei *c8sectpfei;
+};
+
+struct c8sectpfe {
+ struct stdemux demux[MAX_INPUTBLOCKS];
+ struct mutex lock;
+ struct dvb_adapter adapter;
+ struct device *device;
+ int mapping;
+ int num_feeds;
+};
+
+/* Channel registration */
+int c8sectpfe_tuner_register_frontend(struct c8sectpfe **c8sectpfe,
+ struct c8sectpfei *fei,
+ void *start_feed,
+ void *stop_feed);
+
+void c8sectpfe_tuner_unregister_frontend(struct c8sectpfe *c8sectpfe,
+ struct c8sectpfei *fei);
+
+#endif
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
new file mode 100644
index 000000000..1dbb89f0d
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -0,0 +1,1187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * c8sectpfe-core.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author:Peter Bennett <peter.bennett@st.com>
+ * Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dvb/dmx.h>
+#include <linux/dvb/frontend.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-debugfs.h"
+#include <media/dmxdev.h>
+#include <media/dvb_demux.h>
+#include <media/dvb_frontend.h>
+#include <media/dvb_net.h>
+
+#define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
+MODULE_FIRMWARE(FIRMWARE_MEMDMA);
+
+#define PID_TABLE_SIZE 1024
+#define POLL_MSECS 50
+
+static int load_c8sectpfe_fw(struct c8sectpfei *fei);
+
+#define TS_PKT_SIZE 188
+#define HEADER_SIZE (4)
+#define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
+
+#define FEI_ALIGNMENT (32)
+/* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
+#define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
+
+#define FIFO_LEN 1024
+
+static void c8sectpfe_timer_interrupt(struct timer_list *t)
+{
+ struct c8sectpfei *fei = from_timer(fei, t, timer);
+ struct channel_info *channel;
+ int chan_num;
+
+ /* iterate through input block channels */
+ for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
+ channel = fei->channel_data[chan_num];
+
+ /* is this descriptor initialised and TP enabled */
+ if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
+ tasklet_schedule(&channel->tsklet);
+ }
+
+ fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
+ add_timer(&fei->timer);
+}
+
+static void channel_swdemux_tsklet(struct tasklet_struct *t)
+{
+ struct channel_info *channel = from_tasklet(channel, t, tsklet);
+ struct c8sectpfei *fei;
+ unsigned long wp, rp;
+ int pos, num_packets, n, size;
+ u8 *buf;
+
+ if (unlikely(!channel || !channel->irec))
+ return;
+
+ fei = channel->fei;
+
+ wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
+ rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
+
+ pos = rp - channel->back_buffer_busaddr;
+
+ /* has it wrapped */
+ if (wp < rp)
+ wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
+
+ size = wp - rp;
+ num_packets = size / PACKET_SIZE;
+
+ /* manage cache so data is visible to CPU */
+ dma_sync_single_for_cpu(fei->dev,
+ rp,
+ size,
+ DMA_FROM_DEVICE);
+
+ buf = channel->back_buffer_aligned;
+
+ dev_dbg(fei->dev,
+ "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
+ channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
+
+ for (n = 0; n < num_packets; n++) {
+ dvb_dmx_swfilter_packets(
+ &fei->c8sectpfe[0]->
+ demux[channel->demux_mapping].dvb_demux,
+ &buf[pos], 1);
+
+ pos += PACKET_SIZE;
+ }
+
+ /* advance the read pointer */
+ if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
+ writel(channel->back_buffer_busaddr, channel->irec +
+ DMA_PRDS_BUSRP_TP(0));
+ else
+ writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
+}
+
+static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *demux = dvbdmxfeed->demux;
+ struct stdemux *stdemux = (struct stdemux *)demux->priv;
+ struct c8sectpfei *fei = stdemux->c8sectpfei;
+ struct channel_info *channel;
+ u32 tmp;
+ unsigned long *bitmap;
+ int ret;
+
+ switch (dvbdmxfeed->type) {
+ case DMX_TYPE_TS:
+ break;
+ case DMX_TYPE_SEC:
+ break;
+ default:
+ dev_err(fei->dev, "%s:%d Error bailing\n"
+ , __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (dvbdmxfeed->type == DMX_TYPE_TS) {
+ switch (dvbdmxfeed->pes_type) {
+ case DMX_PES_VIDEO:
+ case DMX_PES_AUDIO:
+ case DMX_PES_TELETEXT:
+ case DMX_PES_PCR:
+ case DMX_PES_OTHER:
+ break;
+ default:
+ dev_err(fei->dev, "%s:%d Error bailing\n"
+ , __func__, __LINE__);
+ return -EINVAL;
+ }
+ }
+
+ if (!atomic_read(&fei->fw_loaded)) {
+ ret = load_c8sectpfe_fw(fei);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&fei->lock);
+
+ channel = fei->channel_data[stdemux->tsin_index];
+
+ bitmap = channel->pid_buffer_aligned;
+
+ /* 8192 is a special PID */
+ if (dvbdmxfeed->pid == 8192) {
+ tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+ tmp &= ~C8SECTPFE_PID_ENABLE;
+ writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+
+ } else {
+ bitmap_set(bitmap, dvbdmxfeed->pid, 1);
+ }
+
+ /* manage cache so PID bitmap is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ channel->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ channel->active = 1;
+
+ if (fei->global_feed_count == 0) {
+ fei->timer.expires = jiffies +
+ msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
+
+ add_timer(&fei->timer);
+ }
+
+ if (stdemux->running_feed_count == 0) {
+
+ dev_dbg(fei->dev, "Starting channel=%p\n", channel);
+
+ tasklet_setup(&channel->tsklet, channel_swdemux_tsklet);
+
+ /* Reset the internal inputblock sram pointers */
+ writel(channel->fifo,
+ fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
+ writel(channel->fifo + FIFO_LEN - 1,
+ fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
+
+ writel(channel->fifo,
+ fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
+ writel(channel->fifo,
+ fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
+
+
+ /* reset read / write memdma ptrs for this channel */
+ writel(channel->back_buffer_busaddr, channel->irec +
+ DMA_PRDS_BUSBASE_TP(0));
+
+ tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+ writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
+
+ writel(channel->back_buffer_busaddr, channel->irec +
+ DMA_PRDS_BUSWP_TP(0));
+
+ /* Issue a reset and enable InputBlock */
+ writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
+ , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
+
+ /* and enable the tp */
+ writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
+
+ dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
+ , __func__, __LINE__, stdemux);
+ }
+
+ stdemux->running_feed_count++;
+ fei->global_feed_count++;
+
+ mutex_unlock(&fei->lock);
+
+ return 0;
+}
+
+static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+
+ struct dvb_demux *demux = dvbdmxfeed->demux;
+ struct stdemux *stdemux = (struct stdemux *)demux->priv;
+ struct c8sectpfei *fei = stdemux->c8sectpfei;
+ struct channel_info *channel;
+ int idlereq;
+ u32 tmp;
+ int ret;
+ unsigned long *bitmap;
+
+ if (!atomic_read(&fei->fw_loaded)) {
+ ret = load_c8sectpfe_fw(fei);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&fei->lock);
+
+ channel = fei->channel_data[stdemux->tsin_index];
+
+ bitmap = channel->pid_buffer_aligned;
+
+ if (dvbdmxfeed->pid == 8192) {
+ tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+ tmp |= C8SECTPFE_PID_ENABLE;
+ writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
+ } else {
+ bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
+ }
+
+ /* manage cache so data is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ channel->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ if (--stdemux->running_feed_count == 0) {
+
+ channel = fei->channel_data[stdemux->tsin_index];
+
+ /* TP re-configuration on page 168 of functional spec */
+
+ /* disable IB (prevents more TS data going to memdma) */
+ writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
+
+ /* disable this channels descriptor */
+ writel(0, channel->irec + DMA_PRDS_TPENABLE);
+
+ tasklet_disable(&channel->tsklet);
+
+ /* now request memdma channel goes idle */
+ idlereq = (1 << channel->tsin_id) | IDLEREQ;
+ writel(idlereq, fei->io + DMA_IDLE_REQ);
+
+ /* wait for idle irq handler to signal completion */
+ ret = wait_for_completion_timeout(&channel->idle_completion,
+ msecs_to_jiffies(100));
+
+ if (ret == 0)
+ dev_warn(fei->dev,
+ "Timeout waiting for idle irq on tsin%d\n",
+ channel->tsin_id);
+
+ reinit_completion(&channel->idle_completion);
+
+ /* reset read / write ptrs for this channel */
+
+ writel(channel->back_buffer_busaddr,
+ channel->irec + DMA_PRDS_BUSBASE_TP(0));
+
+ tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+ writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
+
+ writel(channel->back_buffer_busaddr,
+ channel->irec + DMA_PRDS_BUSWP_TP(0));
+
+ dev_dbg(fei->dev,
+ "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
+ __func__, __LINE__, stdemux, channel->tsin_id);
+
+ /* turn off all PIDS in the bitmap */
+ memset(channel->pid_buffer_aligned, 0, PID_TABLE_SIZE);
+
+ /* manage cache so data is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ channel->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ channel->active = 0;
+ }
+
+ if (--fei->global_feed_count == 0) {
+ dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
+ , __func__, __LINE__, fei->global_feed_count);
+
+ del_timer(&fei->timer);
+ }
+
+ mutex_unlock(&fei->lock);
+
+ return 0;
+}
+
+static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
+{
+ int i;
+
+ for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
+ if (!fei->channel_data[i])
+ continue;
+
+ if (fei->channel_data[i]->tsin_id == tsin_num)
+ return fei->channel_data[i];
+ }
+
+ return NULL;
+}
+
+static void c8sectpfe_getconfig(struct c8sectpfei *fei)
+{
+ struct c8sectpfe_hw *hw = &fei->hw_stats;
+
+ hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
+ hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
+ hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
+ hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
+ hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
+ hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
+ hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
+
+ dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
+ dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
+ dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
+ dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
+ , hw->num_swts);
+ dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
+ dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
+ dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
+ dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
+ , hw->num_tp);
+}
+
+static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
+{
+ struct c8sectpfei *fei = priv;
+ struct channel_info *chan;
+ int bit;
+ unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
+
+ /* page 168 of functional spec: Clear the idle request
+ by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
+
+ /* signal idle completion */
+ for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
+
+ chan = find_channel(fei, bit);
+
+ if (chan)
+ complete(&chan->idle_completion);
+ }
+
+ writel(0, fei->io + DMA_IDLE_REQ);
+
+ return IRQ_HANDLED;
+}
+
+
+static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
+{
+ if (!fei || !tsin)
+ return;
+
+ if (tsin->back_buffer_busaddr)
+ if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
+ dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
+ FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
+
+ kfree(tsin->back_buffer_start);
+
+ if (tsin->pid_buffer_busaddr)
+ if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
+ dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
+ PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
+
+ kfree(tsin->pid_buffer_start);
+}
+
+#define MAX_NAME 20
+
+static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
+ struct channel_info *tsin)
+{
+ int ret;
+ u32 tmp;
+ char tsin_pin_name[MAX_NAME];
+
+ if (!fei || !tsin)
+ return -EINVAL;
+
+ dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
+ , __func__, __LINE__, tsin, tsin->tsin_id);
+
+ init_completion(&tsin->idle_completion);
+
+ tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE + FEI_ALIGNMENT, GFP_KERNEL);
+ if (!tsin->back_buffer_start) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ /* Ensure backbuffer is 32byte aligned */
+ tsin->back_buffer_aligned = tsin->back_buffer_start + FEI_ALIGNMENT;
+
+ tsin->back_buffer_aligned = PTR_ALIGN(tsin->back_buffer_aligned, FEI_ALIGNMENT);
+
+ tsin->back_buffer_busaddr = dma_map_single(fei->dev,
+ tsin->back_buffer_aligned,
+ FEI_BUFFER_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
+ dev_err(fei->dev, "failed to map back_buffer\n");
+ ret = -EFAULT;
+ goto err_unmap;
+ }
+
+ /*
+ * The pid buffer can be configured (in hw) for byte or bit
+ * per pid. By powers of deduction we conclude stih407 family
+ * is configured (at SoC design stage) for bit per pid.
+ */
+ tsin->pid_buffer_start = kzalloc(PID_TABLE_SIZE + PID_TABLE_SIZE, GFP_KERNEL);
+ if (!tsin->pid_buffer_start) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ /*
+ * PID buffer needs to be aligned to size of the pid table
+ * which at bit per pid is 1024 bytes (8192 pids / 8).
+ * PIDF_BASE register enforces this alignment when writing
+ * the register.
+ */
+
+ tsin->pid_buffer_aligned = tsin->pid_buffer_start + PID_TABLE_SIZE;
+
+ tsin->pid_buffer_aligned = PTR_ALIGN(tsin->pid_buffer_aligned, PID_TABLE_SIZE);
+
+ tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
+ tsin->pid_buffer_aligned,
+ PID_TABLE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
+ dev_err(fei->dev, "failed to map pid_bitmap\n");
+ ret = -EFAULT;
+ goto err_unmap;
+ }
+
+ /* manage cache so pid bitmap is visible to HW */
+ dma_sync_single_for_device(fei->dev,
+ tsin->pid_buffer_busaddr,
+ PID_TABLE_SIZE,
+ DMA_TO_DEVICE);
+
+ snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
+ (tsin->serial_not_parallel ? "serial" : "parallel"));
+
+ tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
+ if (IS_ERR(tsin->pstate)) {
+ dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
+ , __func__, tsin_pin_name);
+ ret = PTR_ERR(tsin->pstate);
+ goto err_unmap;
+ }
+
+ ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
+
+ if (ret) {
+ dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
+ , __func__);
+ goto err_unmap;
+ }
+
+ /* Enable this input block */
+ tmp = readl(fei->io + SYS_INPUT_CLKEN);
+ tmp |= BIT(tsin->tsin_id);
+ writel(tmp, fei->io + SYS_INPUT_CLKEN);
+
+ if (tsin->serial_not_parallel)
+ tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
+
+ if (tsin->invert_ts_clk)
+ tmp |= C8SECTPFE_INVERT_TSCLK;
+
+ if (tsin->async_not_sync)
+ tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
+
+ tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
+
+ writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
+
+ writel(C8SECTPFE_SYNC(0x9) |
+ C8SECTPFE_DROP(0x9) |
+ C8SECTPFE_TOKEN(0x47),
+ fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
+
+ writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
+
+ /* Place the FIFO's at the end of the irec descriptors */
+
+ tsin->fifo = (tsin->tsin_id * FIFO_LEN);
+
+ writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
+ writel(tsin->fifo + FIFO_LEN - 1,
+ fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
+
+ writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
+ writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
+
+ writel(tsin->pid_buffer_busaddr,
+ fei->io + PIDF_BASE(tsin->tsin_id));
+
+ dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
+ tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
+ &tsin->pid_buffer_busaddr);
+
+ /* Configure and enable HW PID filtering */
+
+ /*
+ * The PID value is created by assembling the first 8 bytes of
+ * the TS packet into a 64-bit word in big-endian format. A
+ * slice of that 64-bit word is taken from
+ * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
+ */
+ tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
+ | C8SECTPFE_PID_OFFSET(40));
+
+ writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
+
+ dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
+ tsin->tsin_id,
+ readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
+ readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
+ readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
+ readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
+
+ /* Get base addpress of pointer record block from DMEM */
+ tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
+ readl(fei->io + DMA_PTRREC_BASE);
+
+ /* fill out pointer record data structure */
+
+ /* advance pointer record block to our channel */
+ tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
+
+ writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
+
+ writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
+
+ writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
+
+ writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
+
+ /* read/write pointers with physical bus address */
+
+ writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
+
+ tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
+ writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
+
+ writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
+ writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
+
+ /* initialize tasklet */
+ tasklet_setup(&tsin->tsklet, channel_swdemux_tsklet);
+
+ return 0;
+
+err_unmap:
+ free_input_block(fei, tsin);
+ return ret;
+}
+
+static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
+{
+ struct c8sectpfei *fei = priv;
+
+ dev_err(fei->dev, "%s: error handling not yet implemented\n"
+ , __func__);
+
+ /*
+ * TODO FIXME we should detect some error conditions here
+ * and ideally do something about them!
+ */
+
+ return IRQ_HANDLED;
+}
+
+static int c8sectpfe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *np = dev->of_node;
+ struct c8sectpfei *fei;
+ struct resource *res;
+ int ret, index = 0;
+ struct channel_info *tsin;
+
+ /* Allocate the c8sectpfei structure */
+ fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
+ if (!fei)
+ return -ENOMEM;
+
+ fei->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
+ fei->io = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fei->io))
+ return PTR_ERR(fei->io);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "c8sectpfe-ram");
+ fei->sram = devm_ioremap_resource(dev, res);
+ if (IS_ERR(fei->sram))
+ return PTR_ERR(fei->sram);
+
+ fei->sram_size = resource_size(res);
+
+ fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
+ if (fei->idle_irq < 0)
+ return fei->idle_irq;
+
+ fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
+ if (fei->error_irq < 0)
+ return fei->error_irq;
+
+ platform_set_drvdata(pdev, fei);
+
+ fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
+ if (IS_ERR(fei->c8sectpfeclk)) {
+ dev_err(dev, "c8sectpfe clk not found\n");
+ return PTR_ERR(fei->c8sectpfeclk);
+ }
+
+ ret = clk_prepare_enable(fei->c8sectpfeclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable c8sectpfe clock\n");
+ return ret;
+ }
+
+ /* to save power disable all IP's (on by default) */
+ writel(0, fei->io + SYS_INPUT_CLKEN);
+
+ /* Enable memdma clock */
+ writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
+
+ /* clear internal sram */
+ memset_io(fei->sram, 0x0, fei->sram_size);
+
+ c8sectpfe_getconfig(fei);
+
+ ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
+ 0, "c8sectpfe-idle-irq", fei);
+ if (ret) {
+ dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
+ goto err_clk_disable;
+ }
+
+ ret = devm_request_irq(dev, fei->error_irq,
+ c8sectpfe_error_irq_handler, 0,
+ "c8sectpfe-error-irq", fei);
+ if (ret) {
+ dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
+ goto err_clk_disable;
+ }
+
+ fei->tsin_count = of_get_child_count(np);
+
+ if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
+ fei->tsin_count > fei->hw_stats.num_ib) {
+
+ dev_err(dev, "More tsin declared than exist on SoC!\n");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ fei->pinctrl = devm_pinctrl_get(dev);
+
+ if (IS_ERR(fei->pinctrl)) {
+ dev_err(dev, "Error getting tsin pins\n");
+ ret = PTR_ERR(fei->pinctrl);
+ goto err_clk_disable;
+ }
+
+ for_each_child_of_node(np, child) {
+ struct device_node *i2c_bus;
+
+ fei->channel_data[index] = devm_kzalloc(dev,
+ sizeof(struct channel_info),
+ GFP_KERNEL);
+
+ if (!fei->channel_data[index]) {
+ ret = -ENOMEM;
+ goto err_node_put;
+ }
+
+ tsin = fei->channel_data[index];
+
+ tsin->fei = fei;
+
+ ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
+ if (ret) {
+ dev_err(&pdev->dev, "No tsin_num found\n");
+ goto err_node_put;
+ }
+
+ /* sanity check value */
+ if (tsin->tsin_id > fei->hw_stats.num_ib) {
+ dev_err(&pdev->dev,
+ "tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
+ tsin->tsin_id, fei->hw_stats.num_ib);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ tsin->invert_ts_clk = of_property_read_bool(child,
+ "invert-ts-clk");
+
+ tsin->serial_not_parallel = of_property_read_bool(child,
+ "serial-not-parallel");
+
+ tsin->async_not_sync = of_property_read_bool(child,
+ "async-not-sync");
+
+ ret = of_property_read_u32(child, "dvb-card",
+ &tsin->dvb_card);
+ if (ret) {
+ dev_err(&pdev->dev, "No dvb-card found\n");
+ goto err_node_put;
+ }
+
+ i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
+ if (!i2c_bus) {
+ dev_err(&pdev->dev, "No i2c-bus found\n");
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+ tsin->i2c_adapter =
+ of_find_i2c_adapter_by_node(i2c_bus);
+ if (!tsin->i2c_adapter) {
+ dev_err(&pdev->dev, "No i2c adapter found\n");
+ of_node_put(i2c_bus);
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+ of_node_put(i2c_bus);
+
+ tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
+
+ ret = gpio_is_valid(tsin->rst_gpio);
+ if (!ret) {
+ dev_err(dev,
+ "reset gpio for tsin%d not valid (gpio=%d)\n",
+ tsin->tsin_id, tsin->rst_gpio);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ ret = devm_gpio_request_one(dev, tsin->rst_gpio,
+ GPIOF_OUT_INIT_LOW, "NIM reset");
+ if (ret && ret != -EBUSY) {
+ dev_err(dev, "Can't request tsin%d reset gpio\n"
+ , fei->channel_data[index]->tsin_id);
+ goto err_node_put;
+ }
+
+ if (!ret) {
+ /* toggle reset lines */
+ gpio_direction_output(tsin->rst_gpio, 0);
+ usleep_range(3500, 5000);
+ gpio_direction_output(tsin->rst_gpio, 1);
+ usleep_range(3000, 5000);
+ }
+
+ tsin->demux_mapping = index;
+
+ dev_dbg(fei->dev,
+ "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
+ fei->channel_data[index], index,
+ tsin->tsin_id, tsin->invert_ts_clk,
+ tsin->serial_not_parallel, tsin->async_not_sync,
+ tsin->dvb_card);
+
+ index++;
+ }
+
+ /* Setup timer interrupt */
+ timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
+
+ mutex_init(&fei->lock);
+
+ /* Get the configuration information about the tuners */
+ ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
+ (void *)fei,
+ c8sectpfe_start_feed,
+ c8sectpfe_stop_feed);
+ if (ret) {
+ dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
+ ret);
+ goto err_clk_disable;
+ }
+
+ c8sectpfe_debugfs_init(fei);
+
+ return 0;
+
+err_node_put:
+ of_node_put(child);
+err_clk_disable:
+ clk_disable_unprepare(fei->c8sectpfeclk);
+ return ret;
+}
+
+static int c8sectpfe_remove(struct platform_device *pdev)
+{
+ struct c8sectpfei *fei = platform_get_drvdata(pdev);
+ struct channel_info *channel;
+ int i;
+
+ wait_for_completion(&fei->fw_ack);
+
+ c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
+
+ /*
+ * Now loop through and un-configure each of the InputBlock resources
+ */
+ for (i = 0; i < fei->tsin_count; i++) {
+ channel = fei->channel_data[i];
+ free_input_block(fei, channel);
+ }
+
+ c8sectpfe_debugfs_exit(fei);
+
+ dev_info(fei->dev, "Stopping memdma SLIM core\n");
+ if (readl(fei->io + DMA_CPU_RUN))
+ writel(0x0, fei->io + DMA_CPU_RUN);
+
+ /* unclock all internal IP's */
+ if (readl(fei->io + SYS_INPUT_CLKEN))
+ writel(0, fei->io + SYS_INPUT_CLKEN);
+
+ if (readl(fei->io + SYS_OTHER_CLKEN))
+ writel(0, fei->io + SYS_OTHER_CLKEN);
+
+ clk_disable_unprepare(fei->c8sectpfeclk);
+
+ return 0;
+}
+
+
+static int configure_channels(struct c8sectpfei *fei)
+{
+ int index = 0, ret;
+ struct device_node *child, *np = fei->dev->of_node;
+
+ /* iterate round each tsin and configure memdma descriptor and IB hw */
+ for_each_child_of_node(np, child) {
+ ret = configure_memdma_and_inputblock(fei,
+ fei->channel_data[index]);
+ if (ret) {
+ dev_err(fei->dev,
+ "configure_memdma_and_inputblock failed\n");
+ of_node_put(child);
+ goto err_unmap;
+ }
+ index++;
+ }
+
+ return 0;
+
+err_unmap:
+ while (--index >= 0)
+ free_input_block(fei, fei->channel_data[index]);
+
+ return ret;
+}
+
+static int
+c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr;
+ char class;
+
+ if (!fw) {
+ dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
+ return -EINVAL;
+ }
+
+ if (fw->size < sizeof(struct elf32_hdr)) {
+ dev_err(fei->dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ ehdr = (struct elf32_hdr *)fw->data;
+
+ /* We only support ELF32 at this point */
+ class = ehdr->e_ident[EI_CLASS];
+ if (class != ELFCLASS32) {
+ dev_err(fei->dev, "Unsupported class: %d\n", class);
+ return -EINVAL;
+ }
+
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
+ dev_err(fei->dev, "Unsupported firmware endianness\n");
+ return -EINVAL;
+ }
+
+ if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
+ dev_err(fei->dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(fei->dev, "Image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
+ /* Check ELF magic */
+ ehdr = (Elf32_Ehdr *)fw->data;
+ if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
+ ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
+ ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
+ ehdr->e_ident[EI_MAG3] != ELFMAG3) {
+ dev_err(fei->dev, "Invalid ELF magic\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_type != ET_EXEC) {
+ dev_err(fei->dev, "Unsupported ELF header type\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_phoff > fw->size) {
+ dev_err(fei->dev, "Firmware size is too small\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
+ const struct firmware *fw, u8 __iomem *dest,
+ int seg_num)
+{
+ const u8 *imem_src = fw->data + phdr->p_offset;
+ int i;
+
+ /*
+ * For IMEM segments, the segment contains 24-bit
+ * instructions which must be padded to 32-bit
+ * instructions before being written. The written
+ * segment is padded with NOP instructions.
+ */
+
+ dev_dbg(fei->dev,
+ "Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
+ seg_num, phdr->p_paddr, phdr->p_filesz, dest,
+ phdr->p_memsz + phdr->p_memsz / 3);
+
+ for (i = 0; i < phdr->p_filesz; i++) {
+
+ writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
+
+ /* Every 3 bytes, add an additional
+ * padding zero in destination */
+ if (i % 3 == 2) {
+ dest++;
+ writeb(0x00, (void __iomem *)dest);
+ }
+
+ dest++;
+ imem_src++;
+ }
+}
+
+static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
+ const struct firmware *fw, u8 __iomem *dst, int seg_num)
+{
+ /*
+ * For DMEM segments copy the segment data from the ELF
+ * file and pad segment with zeroes
+ */
+
+ dev_dbg(fei->dev,
+ "Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
+ seg_num, phdr->p_paddr, phdr->p_filesz,
+ dst, phdr->p_memsz);
+
+ memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
+ phdr->p_filesz);
+
+ memset((void __force *)dst + phdr->p_filesz, 0,
+ phdr->p_memsz - phdr->p_filesz);
+}
+
+static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
+{
+ Elf32_Ehdr *ehdr;
+ Elf32_Phdr *phdr;
+ u8 __iomem *dst;
+ int err = 0, i;
+
+ if (!fw || !fei)
+ return -EINVAL;
+
+ ehdr = (Elf32_Ehdr *)fw->data;
+ phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+
+ /* Only consider LOAD segments */
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ /*
+ * Check segment is contained within the fw->data buffer
+ */
+ if (phdr->p_offset + phdr->p_filesz > fw->size) {
+ dev_err(fei->dev,
+ "Segment %d is outside of firmware file\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * MEMDMA IMEM has executable flag set, otherwise load
+ * this segment into DMEM.
+ *
+ */
+
+ if (phdr->p_flags & PF_X) {
+ dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
+ /*
+ * The Slim ELF file uses 32-bit word addressing for
+ * load offsets.
+ */
+ dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
+ load_imem_segment(fei, phdr, fw, dst, i);
+ } else {
+ dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
+ /*
+ * The Slim ELF file uses 32-bit word addressing for
+ * load offsets.
+ */
+ dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
+ load_dmem_segment(fei, phdr, fw, dst, i);
+ }
+ }
+
+ release_firmware(fw);
+ return err;
+}
+
+static int load_c8sectpfe_fw(struct c8sectpfei *fei)
+{
+ const struct firmware *fw;
+ int err;
+
+ dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
+
+ err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
+ if (err)
+ return err;
+
+ err = c8sectpfe_elf_sanity_check(fei, fw);
+ if (err) {
+ dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
+ , err);
+ release_firmware(fw);
+ return err;
+ }
+
+ err = load_slim_core_fw(fw, fei);
+ if (err) {
+ dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
+ return err;
+ }
+
+ /* now the firmware is loaded configure the input blocks */
+ err = configure_channels(fei);
+ if (err) {
+ dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
+ return err;
+ }
+
+ /*
+ * STBus target port can access IMEM and DMEM ports
+ * without waiting for CPU
+ */
+ writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
+
+ dev_info(fei->dev, "Boot the memdma SLIM core\n");
+ writel(0x1, fei->io + DMA_CPU_RUN);
+
+ atomic_set(&fei->fw_loaded, 1);
+
+ return 0;
+}
+
+static const struct of_device_id c8sectpfe_match[] = {
+ { .compatible = "st,stih407-c8sectpfe" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, c8sectpfe_match);
+
+static struct platform_driver c8sectpfe_driver = {
+ .driver = {
+ .name = "c8sectpfe",
+ .of_match_table = of_match_ptr(c8sectpfe_match),
+ },
+ .probe = c8sectpfe_probe,
+ .remove = c8sectpfe_remove,
+};
+
+module_platform_driver(c8sectpfe_driver);
+
+MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
+MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
+MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h
new file mode 100644
index 000000000..c9d602190
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * c8sectpfe-core.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author:Peter Bennett <peter.bennett@st.com>
+ * Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#ifndef _C8SECTPFE_CORE_H_
+#define _C8SECTPFE_CORE_H_
+
+#define C8SECTPFEI_MAXCHANNEL 16
+#define C8SECTPFEI_MAXADAPTER 3
+
+#define C8SECTPFE_MAX_TSIN_CHAN 8
+
+struct channel_info {
+
+ int tsin_id;
+ bool invert_ts_clk;
+ bool serial_not_parallel;
+ bool async_not_sync;
+ int i2c;
+ int dvb_card;
+
+ int rst_gpio;
+
+ struct i2c_adapter *i2c_adapter;
+ struct i2c_adapter *tuner_i2c;
+ struct i2c_adapter *lnb_i2c;
+ struct i2c_client *i2c_client;
+ struct dvb_frontend *frontend;
+
+ struct pinctrl_state *pstate;
+
+ int demux_mapping;
+ int active;
+
+ void *back_buffer_start;
+ void *back_buffer_aligned;
+ dma_addr_t back_buffer_busaddr;
+
+ void *pid_buffer_start;
+ void *pid_buffer_aligned;
+ dma_addr_t pid_buffer_busaddr;
+
+ unsigned long fifo;
+
+ struct completion idle_completion;
+ struct tasklet_struct tsklet;
+
+ struct c8sectpfei *fei;
+ void __iomem *irec;
+
+};
+
+struct c8sectpfe_hw {
+ int num_ib;
+ int num_mib;
+ int num_swts;
+ int num_tsout;
+ int num_ccsc;
+ int num_ram;
+ int num_tp;
+};
+
+struct c8sectpfei {
+
+ struct device *dev;
+ struct pinctrl *pinctrl;
+
+ struct dentry *root;
+ struct debugfs_regset32 *regset;
+ struct completion fw_ack;
+ atomic_t fw_loaded;
+
+ int tsin_count;
+
+ struct c8sectpfe_hw hw_stats;
+
+ struct c8sectpfe *c8sectpfe[C8SECTPFEI_MAXADAPTER];
+
+ int mapping[C8SECTPFEI_MAXCHANNEL];
+
+ struct mutex lock;
+
+ struct timer_list timer; /* timer interrupts for outputs */
+
+ void __iomem *io;
+ void __iomem *sram;
+
+ unsigned long sram_size;
+
+ struct channel_info *channel_data[C8SECTPFE_MAX_TSIN_CHAN];
+
+ struct clk *c8sectpfeclk;
+ int nima_rst_gpio;
+ int nimb_rst_gpio;
+
+ int idle_irq;
+ int error_irq;
+
+ int global_feed_count;
+};
+
+/* C8SECTPFE SYS Regs list */
+
+#define SYS_INPUT_ERR_STATUS 0x0
+#define SYS_OTHER_ERR_STATUS 0x8
+#define SYS_INPUT_ERR_MASK 0x10
+#define SYS_OTHER_ERR_MASK 0x18
+#define SYS_DMA_ROUTE 0x20
+#define SYS_INPUT_CLKEN 0x30
+#define IBENABLE_MASK 0x7F
+
+#define SYS_OTHER_CLKEN 0x38
+#define TSDMAENABLE BIT(1)
+#define MEMDMAENABLE BIT(0)
+
+#define SYS_CFG_NUM_IB 0x200
+#define SYS_CFG_NUM_MIB 0x204
+#define SYS_CFG_NUM_SWTS 0x208
+#define SYS_CFG_NUM_TSOUT 0x20C
+#define SYS_CFG_NUM_CCSC 0x210
+#define SYS_CFG_NUM_RAM 0x214
+#define SYS_CFG_NUM_TP 0x218
+
+/* Input Block Regs */
+
+#define C8SECTPFE_INPUTBLK_OFFSET 0x1000
+#define C8SECTPFE_CHANNEL_OFFSET(x) ((x*0x40) + C8SECTPFE_INPUTBLK_OFFSET)
+
+#define C8SECTPFE_IB_IP_FMT_CFG(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x00)
+#define C8SECTPFE_IGNORE_ERR_AT_SOP BIT(7)
+#define C8SECTPFE_IGNORE_ERR_IN_PKT BIT(6)
+#define C8SECTPFE_IGNORE_ERR_IN_BYTE BIT(5)
+#define C8SECTPFE_INVERT_TSCLK BIT(4)
+#define C8SECTPFE_ALIGN_BYTE_SOP BIT(3)
+#define C8SECTPFE_ASYNC_NOT_SYNC BIT(2)
+#define C8SECTPFE_BYTE_ENDIANNESS_MSB BIT(1)
+#define C8SECTPFE_SERIAL_NOT_PARALLEL BIT(0)
+
+#define C8SECTPFE_IB_SYNCLCKDRP_CFG(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x04)
+#define C8SECTPFE_SYNC(x) (x & 0xf)
+#define C8SECTPFE_DROP(x) ((x<<4) & 0xf)
+#define C8SECTPFE_TOKEN(x) ((x<<8) & 0xff00)
+#define C8SECTPFE_SLDENDIANNESS BIT(16)
+
+#define C8SECTPFE_IB_TAGBYTES_CFG(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x08)
+#define C8SECTPFE_TAG_HEADER(x) (x << 16)
+#define C8SECTPFE_TAG_COUNTER(x) ((x<<1) & 0x7fff)
+#define C8SECTPFE_TAG_ENABLE BIT(0)
+
+#define C8SECTPFE_IB_PID_SET(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x0C)
+#define C8SECTPFE_PID_OFFSET(x) (x & 0x3f)
+#define C8SECTPFE_PID_NUMBITS(x) ((x << 6) & 0xfff)
+#define C8SECTPFE_PID_ENABLE BIT(31)
+
+#define C8SECTPFE_IB_PKT_LEN(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x10)
+
+#define C8SECTPFE_IB_BUFF_STRT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x14)
+#define C8SECTPFE_IB_BUFF_END(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x18)
+#define C8SECTPFE_IB_READ_PNT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x1C)
+#define C8SECTPFE_IB_WRT_PNT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x20)
+
+#define C8SECTPFE_IB_PRI_THRLD(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x24)
+#define C8SECTPFE_PRI_VALUE(x) (x & 0x7fffff)
+#define C8SECTPFE_PRI_LOWPRI(x) ((x & 0xf) << 24)
+#define C8SECTPFE_PRI_HIGHPRI(x) ((x & 0xf) << 28)
+
+#define C8SECTPFE_IB_STAT(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x28)
+#define C8SECTPFE_STAT_FIFO_OVERFLOW(x) (x & 0x1)
+#define C8SECTPFE_STAT_BUFFER_OVERFLOW(x) (x & 0x2)
+#define C8SECTPFE_STAT_OUTOFORDERRP(x) (x & 0x4)
+#define C8SECTPFE_STAT_PID_OVERFLOW(x) (x & 0x8)
+#define C8SECTPFE_STAT_PKT_OVERFLOW(x) (x & 0x10)
+#define C8SECTPFE_STAT_ERROR_PACKETS(x) ((x >> 8) & 0xf)
+#define C8SECTPFE_STAT_SHORT_PACKETS(x) ((x >> 12) & 0xf)
+
+#define C8SECTPFE_IB_MASK(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x2C)
+#define C8SECTPFE_MASK_FIFO_OVERFLOW BIT(0)
+#define C8SECTPFE_MASK_BUFFER_OVERFLOW BIT(1)
+#define C8SECTPFE_MASK_OUTOFORDERRP(x) BIT(2)
+#define C8SECTPFE_MASK_PID_OVERFLOW(x) BIT(3)
+#define C8SECTPFE_MASK_PKT_OVERFLOW(x) BIT(4)
+#define C8SECTPFE_MASK_ERROR_PACKETS(x) ((x & 0xf) << 8)
+#define C8SECTPFE_MASK_SHORT_PACKETS(x) ((x & 0xf) >> 12)
+
+#define C8SECTPFE_IB_SYS(x) (C8SECTPFE_CHANNEL_OFFSET(x) + 0x30)
+#define C8SECTPFE_SYS_RESET BIT(1)
+#define C8SECTPFE_SYS_ENABLE BIT(0)
+
+/*
+ * Pointer record data structure required for each input block
+ * see Table 82 on page 167 of functional specification.
+ */
+
+#define DMA_PRDS_MEMBASE 0x0 /* Internal sram base address */
+#define DMA_PRDS_MEMTOP 0x4 /* Internal sram top address */
+
+/*
+ * TS packet size, including tag bytes added by input block,
+ * rounded up to the next multiple of 8 bytes. The packet size,
+ * including any tagging bytes and rounded up to the nearest
+ * multiple of 8 bytes must be less than 255 bytes.
+ */
+#define DMA_PRDS_PKTSIZE 0x8
+#define DMA_PRDS_TPENABLE 0xc
+
+#define TP0_OFFSET 0x10
+#define DMA_PRDS_BUSBASE_TP(x) ((0x10*x) + TP0_OFFSET)
+#define DMA_PRDS_BUSTOP_TP(x) ((0x10*x) + TP0_OFFSET + 0x4)
+#define DMA_PRDS_BUSWP_TP(x) ((0x10*x) + TP0_OFFSET + 0x8)
+#define DMA_PRDS_BUSRP_TP(x) ((0x10*x) + TP0_OFFSET + 0xc)
+
+#define DMA_PRDS_SIZE (0x20)
+
+#define DMA_MEMDMA_OFFSET 0x4000
+#define DMA_IMEM_OFFSET 0x0
+#define DMA_DMEM_OFFSET 0x4000
+#define DMA_CPU 0x8000
+#define DMA_PER_OFFSET 0xb000
+
+#define DMA_MEMDMA_DMEM (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET)
+#define DMA_MEMDMA_IMEM (DMA_MEMDMA_OFFSET + DMA_IMEM_OFFSET)
+
+/* XP70 Slim core regs */
+#define DMA_CPU_ID (DMA_MEMDMA_OFFSET + DMA_CPU + 0x0)
+#define DMA_CPU_VCR (DMA_MEMDMA_OFFSET + DMA_CPU + 0x4)
+#define DMA_CPU_RUN (DMA_MEMDMA_OFFSET + DMA_CPU + 0x8)
+#define DMA_CPU_CLOCKGATE (DMA_MEMDMA_OFFSET + DMA_CPU + 0xc)
+#define DMA_CPU_PC (DMA_MEMDMA_OFFSET + DMA_CPU + 0x20)
+
+/* Enable Interrupt for a IB */
+#define DMA_PER_TPn_DREQ_MASK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xd00)
+/* Ack interrupt by setting corresponding bit */
+#define DMA_PER_TPn_DACK_SET (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xd80)
+#define DMA_PER_TPn_DREQ (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xe00)
+#define DMA_PER_TPn_DACK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xe80)
+#define DMA_PER_DREQ_MODE (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf80)
+#define DMA_PER_STBUS_SYNC (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf88)
+#define DMA_PER_STBUS_ACCESS (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf8c)
+#define DMA_PER_STBUS_ADDRESS (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xf90)
+#define DMA_PER_IDLE_INT (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfa8)
+#define DMA_PER_PRIORITY (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfac)
+#define DMA_PER_MAX_OPCODE (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfb0)
+#define DMA_PER_MAX_CHUNK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfb4)
+#define DMA_PER_PAGE_SIZE (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfbc)
+#define DMA_PER_MBOX_STATUS (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfc0)
+#define DMA_PER_MBOX_SET (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfc8)
+#define DMA_PER_MBOX_CLEAR (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfd0)
+#define DMA_PER_MBOX_MASK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfd8)
+#define DMA_PER_INJECT_PKT_SRC (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe0)
+#define DMA_PER_INJECT_PKT_DEST (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe4)
+#define DMA_PER_INJECT_PKT_ADDR (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfe8)
+#define DMA_PER_INJECT_PKT (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xfec)
+#define DMA_PER_PAT_PTR_INIT (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff0)
+#define DMA_PER_PAT_PTR (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff4)
+#define DMA_PER_SLEEP_MASK (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xff8)
+#define DMA_PER_SLEEP_COUNTER (DMA_MEMDMA_OFFSET + DMA_PER_OFFSET + 0xffc)
+/* #define DMA_RF_CPUREGn DMA_RFBASEADDR n=0 to 15) slim regsa */
+
+/* The following are from DMA_DMEM_BaseAddress */
+#define DMA_FIRMWARE_VERSION (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x0)
+#define DMA_PTRREC_BASE (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x4)
+#define DMA_PTRREC_INPUT_OFFSET (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x8)
+#define DMA_ERRREC_BASE (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0xc)
+#define DMA_ERROR_RECORD(n) ((n*4) + DMA_ERRREC_BASE + 0x4)
+#define DMA_IDLE_REQ (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x10)
+#define IDLEREQ BIT(31)
+
+#define DMA_FIRMWARE_CONFIG (DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET + 0x14)
+
+/* Regs for PID Filter */
+
+#define PIDF_OFFSET 0x2800
+#define PIDF_BASE(n) ((n*4) + PIDF_OFFSET)
+#define PIDF_LEAK_ENABLE (PIDF_OFFSET + 0x100)
+#define PIDF_LEAK_STATUS (PIDF_OFFSET + 0x108)
+#define PIDF_LEAK_COUNT_RESET (PIDF_OFFSET + 0x110)
+#define PIDF_LEAK_COUNTER (PIDF_OFFSET + 0x114)
+
+#endif /* _C8SECTPFE_CORE_H_ */
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c
new file mode 100644
index 000000000..301fa10f4
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * c8sectpfe-debugfs.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "c8sectpfe-debugfs.h"
+
+#define dump_register(nm ...) \
+{ \
+ .name = #nm, \
+ .offset = nm, \
+}
+
+static const struct debugfs_reg32 fei_sys_regs[] = {
+ dump_register(SYS_INPUT_ERR_STATUS),
+ dump_register(SYS_OTHER_ERR_STATUS),
+ dump_register(SYS_INPUT_ERR_MASK),
+ dump_register(SYS_DMA_ROUTE),
+ dump_register(SYS_INPUT_CLKEN),
+ dump_register(IBENABLE_MASK),
+ dump_register(SYS_OTHER_CLKEN),
+ dump_register(SYS_CFG_NUM_IB),
+ dump_register(SYS_CFG_NUM_MIB),
+ dump_register(SYS_CFG_NUM_SWTS),
+ dump_register(SYS_CFG_NUM_TSOUT),
+ dump_register(SYS_CFG_NUM_CCSC),
+ dump_register(SYS_CFG_NUM_RAM),
+ dump_register(SYS_CFG_NUM_TP),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(0)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(0)),
+ dump_register(C8SECTPFE_IB_PID_SET(0)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(0)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(0)),
+ dump_register(C8SECTPFE_IB_BUFF_END(0)),
+ dump_register(C8SECTPFE_IB_READ_PNT(0)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(0)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(0)),
+ dump_register(C8SECTPFE_IB_STAT(0)),
+ dump_register(C8SECTPFE_IB_MASK(0)),
+ dump_register(C8SECTPFE_IB_SYS(0)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(1)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(1)),
+ dump_register(C8SECTPFE_IB_PID_SET(1)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(1)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(1)),
+ dump_register(C8SECTPFE_IB_BUFF_END(1)),
+ dump_register(C8SECTPFE_IB_READ_PNT(1)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(1)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(1)),
+ dump_register(C8SECTPFE_IB_STAT(1)),
+ dump_register(C8SECTPFE_IB_MASK(1)),
+ dump_register(C8SECTPFE_IB_SYS(1)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(2)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(2)),
+ dump_register(C8SECTPFE_IB_PID_SET(2)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(2)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(2)),
+ dump_register(C8SECTPFE_IB_BUFF_END(2)),
+ dump_register(C8SECTPFE_IB_READ_PNT(2)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(2)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(2)),
+ dump_register(C8SECTPFE_IB_STAT(2)),
+ dump_register(C8SECTPFE_IB_MASK(2)),
+ dump_register(C8SECTPFE_IB_SYS(2)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(3)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(3)),
+ dump_register(C8SECTPFE_IB_PID_SET(3)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(3)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(3)),
+ dump_register(C8SECTPFE_IB_BUFF_END(3)),
+ dump_register(C8SECTPFE_IB_READ_PNT(3)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(3)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(3)),
+ dump_register(C8SECTPFE_IB_STAT(3)),
+ dump_register(C8SECTPFE_IB_MASK(3)),
+ dump_register(C8SECTPFE_IB_SYS(3)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(4)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(4)),
+ dump_register(C8SECTPFE_IB_PID_SET(4)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(4)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(4)),
+ dump_register(C8SECTPFE_IB_BUFF_END(4)),
+ dump_register(C8SECTPFE_IB_READ_PNT(4)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(4)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(4)),
+ dump_register(C8SECTPFE_IB_STAT(4)),
+ dump_register(C8SECTPFE_IB_MASK(4)),
+ dump_register(C8SECTPFE_IB_SYS(4)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(5)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(5)),
+ dump_register(C8SECTPFE_IB_PID_SET(5)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(5)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(5)),
+ dump_register(C8SECTPFE_IB_BUFF_END(5)),
+ dump_register(C8SECTPFE_IB_READ_PNT(5)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(5)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(5)),
+ dump_register(C8SECTPFE_IB_STAT(5)),
+ dump_register(C8SECTPFE_IB_MASK(5)),
+ dump_register(C8SECTPFE_IB_SYS(5)),
+
+ dump_register(C8SECTPFE_IB_IP_FMT_CFG(6)),
+ dump_register(C8SECTPFE_IB_TAGBYTES_CFG(6)),
+ dump_register(C8SECTPFE_IB_PID_SET(6)),
+ dump_register(C8SECTPFE_IB_PKT_LEN(6)),
+ dump_register(C8SECTPFE_IB_BUFF_STRT(6)),
+ dump_register(C8SECTPFE_IB_BUFF_END(6)),
+ dump_register(C8SECTPFE_IB_READ_PNT(6)),
+ dump_register(C8SECTPFE_IB_WRT_PNT(6)),
+ dump_register(C8SECTPFE_IB_PRI_THRLD(6)),
+ dump_register(C8SECTPFE_IB_STAT(6)),
+ dump_register(C8SECTPFE_IB_MASK(6)),
+ dump_register(C8SECTPFE_IB_SYS(6)),
+
+ dump_register(DMA_CPU_ID),
+ dump_register(DMA_CPU_VCR),
+ dump_register(DMA_CPU_RUN),
+ dump_register(DMA_CPU_PC),
+
+ dump_register(DMA_PER_TPn_DREQ_MASK),
+ dump_register(DMA_PER_TPn_DACK_SET),
+ dump_register(DMA_PER_TPn_DREQ),
+ dump_register(DMA_PER_TPn_DACK),
+ dump_register(DMA_PER_DREQ_MODE),
+ dump_register(DMA_PER_STBUS_SYNC),
+ dump_register(DMA_PER_STBUS_ACCESS),
+ dump_register(DMA_PER_STBUS_ADDRESS),
+ dump_register(DMA_PER_IDLE_INT),
+ dump_register(DMA_PER_PRIORITY),
+ dump_register(DMA_PER_MAX_OPCODE),
+ dump_register(DMA_PER_MAX_CHUNK),
+ dump_register(DMA_PER_PAGE_SIZE),
+ dump_register(DMA_PER_MBOX_STATUS),
+ dump_register(DMA_PER_MBOX_SET),
+ dump_register(DMA_PER_MBOX_CLEAR),
+ dump_register(DMA_PER_MBOX_MASK),
+ dump_register(DMA_PER_INJECT_PKT_SRC),
+ dump_register(DMA_PER_INJECT_PKT_DEST),
+ dump_register(DMA_PER_INJECT_PKT_ADDR),
+ dump_register(DMA_PER_INJECT_PKT),
+ dump_register(DMA_PER_PAT_PTR_INIT),
+ dump_register(DMA_PER_PAT_PTR),
+ dump_register(DMA_PER_SLEEP_MASK),
+ dump_register(DMA_PER_SLEEP_COUNTER),
+
+ dump_register(DMA_FIRMWARE_VERSION),
+ dump_register(DMA_PTRREC_BASE),
+ dump_register(DMA_PTRREC_INPUT_OFFSET),
+ dump_register(DMA_ERRREC_BASE),
+
+ dump_register(DMA_ERROR_RECORD(0)),
+ dump_register(DMA_ERROR_RECORD(1)),
+ dump_register(DMA_ERROR_RECORD(2)),
+ dump_register(DMA_ERROR_RECORD(3)),
+ dump_register(DMA_ERROR_RECORD(4)),
+ dump_register(DMA_ERROR_RECORD(5)),
+ dump_register(DMA_ERROR_RECORD(6)),
+ dump_register(DMA_ERROR_RECORD(7)),
+ dump_register(DMA_ERROR_RECORD(8)),
+ dump_register(DMA_ERROR_RECORD(9)),
+ dump_register(DMA_ERROR_RECORD(10)),
+ dump_register(DMA_ERROR_RECORD(11)),
+ dump_register(DMA_ERROR_RECORD(12)),
+ dump_register(DMA_ERROR_RECORD(13)),
+ dump_register(DMA_ERROR_RECORD(14)),
+ dump_register(DMA_ERROR_RECORD(15)),
+ dump_register(DMA_ERROR_RECORD(16)),
+ dump_register(DMA_ERROR_RECORD(17)),
+ dump_register(DMA_ERROR_RECORD(18)),
+ dump_register(DMA_ERROR_RECORD(19)),
+ dump_register(DMA_ERROR_RECORD(20)),
+ dump_register(DMA_ERROR_RECORD(21)),
+ dump_register(DMA_ERROR_RECORD(22)),
+
+ dump_register(DMA_IDLE_REQ),
+ dump_register(DMA_FIRMWARE_CONFIG),
+
+ dump_register(PIDF_BASE(0)),
+ dump_register(PIDF_BASE(1)),
+ dump_register(PIDF_BASE(2)),
+ dump_register(PIDF_BASE(3)),
+ dump_register(PIDF_BASE(4)),
+ dump_register(PIDF_BASE(5)),
+ dump_register(PIDF_BASE(6)),
+ dump_register(PIDF_BASE(7)),
+ dump_register(PIDF_BASE(8)),
+ dump_register(PIDF_BASE(9)),
+ dump_register(PIDF_BASE(10)),
+ dump_register(PIDF_BASE(11)),
+ dump_register(PIDF_BASE(12)),
+ dump_register(PIDF_BASE(13)),
+ dump_register(PIDF_BASE(14)),
+ dump_register(PIDF_BASE(15)),
+ dump_register(PIDF_BASE(16)),
+ dump_register(PIDF_BASE(17)),
+ dump_register(PIDF_BASE(18)),
+ dump_register(PIDF_BASE(19)),
+ dump_register(PIDF_BASE(20)),
+ dump_register(PIDF_BASE(21)),
+ dump_register(PIDF_BASE(22)),
+ dump_register(PIDF_LEAK_ENABLE),
+ dump_register(PIDF_LEAK_STATUS),
+ dump_register(PIDF_LEAK_COUNT_RESET),
+ dump_register(PIDF_LEAK_COUNTER),
+};
+
+void c8sectpfe_debugfs_init(struct c8sectpfei *fei)
+{
+ fei->regset = devm_kzalloc(fei->dev, sizeof(*fei->regset), GFP_KERNEL);
+ if (!fei->regset)
+ return;
+
+ fei->regset->regs = fei_sys_regs;
+ fei->regset->nregs = ARRAY_SIZE(fei_sys_regs);
+ fei->regset->base = fei->io;
+
+ fei->root = debugfs_create_dir("c8sectpfe", NULL);
+ debugfs_create_regset32("registers", S_IRUGO, fei->root, fei->regset);
+}
+
+void c8sectpfe_debugfs_exit(struct c8sectpfei *fei)
+{
+ debugfs_remove_recursive(fei->root);
+ fei->root = NULL;
+}
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h
new file mode 100644
index 000000000..d2c35fb32
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * c8sectpfe-debugfs.h - C8SECTPFE STi DVB driver debugfs header
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Authors: Peter Griffin <peter.griffin@linaro.org>
+ */
+
+#ifndef __C8SECTPFE_DEBUG_H
+#define __C8SECTPFE_DEBUG_H
+
+#include "c8sectpfe-core.h"
+
+void c8sectpfe_debugfs_init(struct c8sectpfei *);
+void c8sectpfe_debugfs_exit(struct c8sectpfei *);
+
+#endif /* __C8SECTPFE_DEBUG_H */
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c
new file mode 100644
index 000000000..feb48cb54
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * c8sectpfe-dvb.c - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+
+#include <dt-bindings/media/c8sectpfe.h>
+
+#include "c8sectpfe-common.h"
+#include "c8sectpfe-core.h"
+#include "c8sectpfe-dvb.h"
+
+#include "dvb-pll.h"
+#include "lnbh24.h"
+#include "stv0367.h"
+#include "stv0367_priv.h"
+#include "stv6110x.h"
+#include "stv090x.h"
+#include "tda18212.h"
+
+static inline const char *dvb_card_str(unsigned int c)
+{
+ switch (c) {
+ case STV0367_TDA18212_NIMA_1: return "STV0367_TDA18212_NIMA_1";
+ case STV0367_TDA18212_NIMA_2: return "STV0367_TDA18212_NIMA_2";
+ case STV0367_TDA18212_NIMB_1: return "STV0367_TDA18212_NIMB_1";
+ case STV0367_TDA18212_NIMB_2: return "STV0367_TDA18212_NIMB_2";
+ case STV0903_6110_LNB24_NIMA: return "STV0903_6110_LNB24_NIMA";
+ case STV0903_6110_LNB24_NIMB: return "STV0903_6110_LNB24_NIMB";
+ default: return "unknown dvb frontend card";
+ }
+}
+
+static struct stv090x_config stv090x_config = {
+ .device = STV0903,
+ .demod_mode = STV090x_SINGLE,
+ .clk_mode = STV090x_CLK_EXT,
+ .xtal = 16000000,
+ .address = 0x69,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
+ .ts2_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
+
+ .repeater_level = STV090x_RPTLEVEL_64,
+
+ .tuner_init = NULL,
+ .tuner_set_mode = NULL,
+ .tuner_set_frequency = NULL,
+ .tuner_get_frequency = NULL,
+ .tuner_set_bandwidth = NULL,
+ .tuner_get_bandwidth = NULL,
+ .tuner_set_bbgain = NULL,
+ .tuner_get_bbgain = NULL,
+ .tuner_set_refclk = NULL,
+ .tuner_get_status = NULL,
+};
+
+static struct stv6110x_config stv6110x_config = {
+ .addr = 0x60,
+ .refclk = 16000000,
+};
+
+#define NIMA 0
+#define NIMB 1
+
+static struct stv0367_config stv0367_tda18212_config[] = {
+ {
+ .demod_address = 0x1c,
+ .xtal = 16000000,
+ .if_khz = 4500,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ }, {
+ .demod_address = 0x1d,
+ .xtal = 16000000,
+ .if_khz = 4500,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ }, {
+ .demod_address = 0x1e,
+ .xtal = 16000000,
+ .if_khz = 4500,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ },
+};
+
+static struct tda18212_config tda18212_conf = {
+ .if_dvbt_6 = 4150,
+ .if_dvbt_7 = 4150,
+ .if_dvbt_8 = 4500,
+ .if_dvbc = 5000,
+};
+
+int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
+ struct c8sectpfe *c8sectpfe,
+ struct channel_info *tsin, int chan_num)
+{
+ struct tda18212_config *tda18212;
+ const struct stv6110x_devctl *fe2;
+ struct i2c_client *client;
+ struct i2c_board_info tda18212_info = {
+ .type = "tda18212",
+ .addr = 0x60,
+ };
+
+ if (!tsin)
+ return -EINVAL;
+
+ switch (tsin->dvb_card) {
+
+ case STV0367_TDA18212_NIMA_1:
+ case STV0367_TDA18212_NIMA_2:
+ case STV0367_TDA18212_NIMB_1:
+ case STV0367_TDA18212_NIMB_2:
+ if (tsin->dvb_card == STV0367_TDA18212_NIMA_1)
+ *fe = dvb_attach(stv0367ter_attach,
+ &stv0367_tda18212_config[0],
+ tsin->i2c_adapter);
+ else if (tsin->dvb_card == STV0367_TDA18212_NIMB_1)
+ *fe = dvb_attach(stv0367ter_attach,
+ &stv0367_tda18212_config[1],
+ tsin->i2c_adapter);
+ else
+ *fe = dvb_attach(stv0367ter_attach,
+ &stv0367_tda18212_config[2],
+ tsin->i2c_adapter);
+
+ if (!*fe) {
+ dev_err(c8sectpfe->device,
+ "%s: stv0367ter_attach failed for NIM card %s\n"
+ , __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ }
+
+ /*
+ * init the demod so that i2c gate_ctrl
+ * to the tuner works correctly
+ */
+ (*fe)->ops.init(*fe);
+
+ /* Allocate the tda18212 structure */
+ tda18212 = devm_kzalloc(c8sectpfe->device,
+ sizeof(struct tda18212_config),
+ GFP_KERNEL);
+ if (!tda18212) {
+ dev_err(c8sectpfe->device,
+ "%s: devm_kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(tda18212, &tda18212_conf,
+ sizeof(struct tda18212_config));
+
+ tda18212->fe = (*fe);
+
+ tda18212_info.platform_data = tda18212;
+
+ /* attach tuner */
+ request_module("tda18212");
+ client = i2c_new_client_device(tsin->i2c_adapter,
+ &tda18212_info);
+ if (!i2c_client_has_driver(client)) {
+ dvb_frontend_detach(*fe);
+ return -ENODEV;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ dvb_frontend_detach(*fe);
+ return -ENODEV;
+ }
+
+ tsin->i2c_client = client;
+
+ break;
+
+ case STV0903_6110_LNB24_NIMA:
+ *fe = dvb_attach(stv090x_attach, &stv090x_config,
+ tsin->i2c_adapter, STV090x_DEMODULATOR_0);
+ if (!*fe) {
+ dev_err(c8sectpfe->device, "%s: stv090x_attach failed\n"
+ "\tfor NIM card %s\n",
+ __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ }
+
+ fe2 = dvb_attach(stv6110x_attach, *fe,
+ &stv6110x_config, tsin->i2c_adapter);
+ if (!fe2) {
+ dev_err(c8sectpfe->device,
+ "%s: stv6110x_attach failed for NIM card %s\n"
+ , __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ }
+
+ stv090x_config.tuner_init = fe2->tuner_init;
+ stv090x_config.tuner_set_mode = fe2->tuner_set_mode;
+ stv090x_config.tuner_set_frequency = fe2->tuner_set_frequency;
+ stv090x_config.tuner_get_frequency = fe2->tuner_get_frequency;
+ stv090x_config.tuner_set_bandwidth = fe2->tuner_set_bandwidth;
+ stv090x_config.tuner_get_bandwidth = fe2->tuner_get_bandwidth;
+ stv090x_config.tuner_set_bbgain = fe2->tuner_set_bbgain;
+ stv090x_config.tuner_get_bbgain = fe2->tuner_get_bbgain;
+ stv090x_config.tuner_set_refclk = fe2->tuner_set_refclk;
+ stv090x_config.tuner_get_status = fe2->tuner_get_status;
+
+ dvb_attach(lnbh24_attach, *fe, tsin->i2c_adapter, 0, 0, 0x9);
+ break;
+
+ default:
+ dev_err(c8sectpfe->device,
+ "%s: DVB frontend card %s not yet supported\n",
+ __func__, dvb_card_str(tsin->dvb_card));
+ return -ENODEV;
+ }
+
+ (*fe)->id = chan_num;
+
+ dev_info(c8sectpfe->device,
+ "DVB frontend card %s successfully attached",
+ dvb_card_str(tsin->dvb_card));
+ return 0;
+}
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h
new file mode 100644
index 000000000..3d87a9ae8
--- /dev/null
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * c8sectpfe-common.h - C8SECTPFE STi DVB driver
+ *
+ * Copyright (c) STMicroelectronics 2015
+ *
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ */
+#ifndef _C8SECTPFE_DVB_H_
+#define _C8SECTPFE_DVB_H_
+
+int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
+ struct c8sectpfe *c8sectpfe, struct channel_info *tsin,
+ int chan_num);
+
+#endif
diff --git a/drivers/media/platform/st/sti/delta/Kconfig b/drivers/media/platform/st/sti/delta/Kconfig
new file mode 100644
index 000000000..efa936b1c
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/Kconfig
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_STI_DELTA
+ tristate "STMicroelectronics DELTA multi-format video decoder V4L2 driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_STI || COMPILE_TEST
+ help
+ This V4L2 driver enables DELTA multi-format video decoder
+ of STMicroelectronics STiH4xx SoC series allowing hardware
+ decoding of various compressed video bitstream format in
+ raw uncompressed format.
+
+ Use this option to see the decoders available for such
+ hardware.
+
+ Please notice that the driver will only be built if
+ at least one of the DELTA decoder below is selected.
+
+config VIDEO_STI_DELTA_MJPEG
+ bool "STMicroelectronics DELTA MJPEG support"
+ default y
+ depends on VIDEO_STI_DELTA
+ help
+ Enables DELTA MJPEG hardware support.
+
+ To compile this driver as a module, choose M here:
+ the module will be called st-delta.
+
+config VIDEO_STI_DELTA_DRIVER
+ tristate
+ depends on VIDEO_STI_DELTA
+ depends on VIDEO_STI_DELTA_MJPEG
+ default VIDEO_STI_DELTA_MJPEG
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select RPMSG
diff --git a/drivers/media/platform/st/sti/delta/Makefile b/drivers/media/platform/st/sti/delta/Makefile
new file mode 100644
index 000000000..32412fa4c
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_STI_DELTA_DRIVER) += st-delta.o
+st-delta-y := delta-v4l2.o delta-mem.o delta-ipc.o delta-debug.o
+
+# MJPEG support
+st-delta-$(CONFIG_VIDEO_STI_DELTA_MJPEG) += delta-mjpeg-hdr.o
+st-delta-$(CONFIG_VIDEO_STI_DELTA_MJPEG) += delta-mjpeg-dec.o
diff --git a/drivers/media/platform/st/sti/delta/delta-cfg.h b/drivers/media/platform/st/sti/delta/delta-cfg.h
new file mode 100644
index 000000000..f47c6e6ff
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-cfg.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_CFG_H
+#define DELTA_CFG_H
+
+#define DELTA_FW_VERSION "21.1-3"
+
+#define DELTA_MIN_WIDTH 32
+#define DELTA_MAX_WIDTH 4096
+#define DELTA_MIN_HEIGHT 32
+#define DELTA_MAX_HEIGHT 2400
+
+/* DELTA requires a 32x32 pixels alignment for frames */
+#define DELTA_WIDTH_ALIGNMENT 32
+#define DELTA_HEIGHT_ALIGNMENT 32
+
+#define DELTA_DEFAULT_WIDTH DELTA_MIN_WIDTH
+#define DELTA_DEFAULT_HEIGHT DELTA_MIN_HEIGHT
+#define DELTA_DEFAULT_FRAMEFORMAT V4L2_PIX_FMT_NV12
+#define DELTA_DEFAULT_STREAMFORMAT V4L2_PIX_FMT_MJPEG
+
+#define DELTA_MAX_RESO (DELTA_MAX_WIDTH * DELTA_MAX_HEIGHT)
+
+/* guard value for number of access units */
+#define DELTA_MAX_AUS 10
+
+/* IP perf dependent, can be tuned */
+#define DELTA_PEAK_FRAME_SMOOTHING 2
+
+/*
+ * guard output frame count:
+ * - at least 1 frame needed for display
+ * - at worst 21
+ * ( max h264 dpb (16) +
+ * decoding peak smoothing (2) +
+ * user display pipeline (3) )
+ */
+#define DELTA_MIN_FRAME_USER 1
+#define DELTA_MAX_DPB 16
+#define DELTA_MAX_FRAME_USER 3 /* platform/use-case dependent */
+#define DELTA_MAX_FRAMES (DELTA_MAX_DPB + DELTA_PEAK_FRAME_SMOOTHING +\
+ DELTA_MAX_FRAME_USER)
+
+#if DELTA_MAX_FRAMES > VIDEO_MAX_FRAME
+#undef DELTA_MAX_FRAMES
+#define DELTA_MAX_FRAMES (VIDEO_MAX_FRAME)
+#endif
+
+/* extra space to be allocated to store codec specific data per frame */
+#define DELTA_MAX_FRAME_PRIV_SIZE 100
+
+/* PM runtime auto power-off after 5ms of inactivity */
+#define DELTA_HW_AUTOSUSPEND_DELAY_MS 5
+
+#define DELTA_MAX_DECODERS 10
+#ifdef CONFIG_VIDEO_STI_DELTA_MJPEG
+extern const struct delta_dec mjpegdec;
+#endif
+
+#endif /* DELTA_CFG_H */
diff --git a/drivers/media/platform/st/sti/delta/delta-debug.c b/drivers/media/platform/st/sti/delta/delta-debug.c
new file mode 100644
index 000000000..4b2eb6b63
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-debug.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Hugues Fruchet <hugues.fruchet@st.com>
+ * Fabrice Lecoultre <fabrice.lecoultre@st.com>
+ * for STMicroelectronics.
+ */
+
+#include "delta.h"
+#include "delta-debug.h"
+
+char *delta_streaminfo_str(struct delta_streaminfo *s, char *str,
+ unsigned int len)
+{
+ if (!s)
+ return NULL;
+
+ snprintf(str, len,
+ "%4.4s %dx%d %s %s dpb=%d %s %s %s%dx%d@(%d,%d) %s%d/%d",
+ (char *)&s->streamformat, s->width, s->height,
+ s->profile, s->level, s->dpb,
+ (s->field == V4L2_FIELD_NONE) ? "progressive" : "interlaced",
+ s->other,
+ s->flags & DELTA_STREAMINFO_FLAG_CROP ? "crop=" : "",
+ s->crop.width, s->crop.height,
+ s->crop.left, s->crop.top,
+ s->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT ? "par=" : "",
+ s->pixelaspect.numerator,
+ s->pixelaspect.denominator);
+
+ return str;
+}
+
+char *delta_frameinfo_str(struct delta_frameinfo *f, char *str,
+ unsigned int len)
+{
+ if (!f)
+ return NULL;
+
+ snprintf(str, len,
+ "%4.4s %dx%d aligned %dx%d %s %s%dx%d@(%d,%d) %s%d/%d",
+ (char *)&f->pixelformat, f->width, f->height,
+ f->aligned_width, f->aligned_height,
+ (f->field == V4L2_FIELD_NONE) ? "progressive" : "interlaced",
+ f->flags & DELTA_STREAMINFO_FLAG_CROP ? "crop=" : "",
+ f->crop.width, f->crop.height,
+ f->crop.left, f->crop.top,
+ f->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT ? "par=" : "",
+ f->pixelaspect.numerator,
+ f->pixelaspect.denominator);
+
+ return str;
+}
+
+void delta_trace_summary(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+ struct delta_streaminfo *s = &ctx->streaminfo;
+ unsigned char str[100] = "";
+
+ if (!(ctx->flags & DELTA_FLAG_STREAMINFO))
+ return;
+
+ dev_dbg(delta->dev, "%s %s, %d frames decoded, %d frames output, %d frames dropped, %d stream errors, %d decode errors",
+ ctx->name,
+ delta_streaminfo_str(s, str, sizeof(str)),
+ ctx->decoded_frames,
+ ctx->output_frames,
+ ctx->dropped_frames,
+ ctx->stream_errors,
+ ctx->decode_errors);
+}
diff --git a/drivers/media/platform/st/sti/delta/delta-debug.h b/drivers/media/platform/st/sti/delta/delta-debug.h
new file mode 100644
index 000000000..fa9025262
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-debug.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Hugues Fruchet <hugues.fruchet@st.com>
+ * Fabrice Lecoultre <fabrice.lecoultre@st.com>
+ * for STMicroelectronics.
+ */
+
+#ifndef DELTA_DEBUG_H
+#define DELTA_DEBUG_H
+
+char *delta_streaminfo_str(struct delta_streaminfo *s, char *str,
+ unsigned int len);
+char *delta_frameinfo_str(struct delta_frameinfo *f, char *str,
+ unsigned int len);
+void delta_trace_summary(struct delta_ctx *ctx);
+
+#endif /* DELTA_DEBUG_H */
diff --git a/drivers/media/platform/st/sti/delta/delta-ipc.c b/drivers/media/platform/st/sti/delta/delta-ipc.c
new file mode 100644
index 000000000..21d3e08e2
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-ipc.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#include <linux/rpmsg.h>
+
+#include "delta.h"
+#include "delta-ipc.h"
+#include "delta-mem.h"
+
+#define IPC_TIMEOUT 100
+#define IPC_SANITY_TAG 0xDEADBEEF
+
+enum delta_ipc_fw_command {
+ DELTA_IPC_OPEN,
+ DELTA_IPC_SET_STREAM,
+ DELTA_IPC_DECODE,
+ DELTA_IPC_CLOSE
+};
+
+#define to_rpmsg_driver(__drv) container_of(__drv, struct rpmsg_driver, drv)
+#define to_delta(__d) container_of(__d, struct delta_dev, rpmsg_driver)
+
+#define to_ctx(hdl) ((struct delta_ipc_ctx *)hdl)
+#define to_pctx(ctx) container_of(ctx, struct delta_ctx, ipc_ctx)
+
+struct delta_ipc_header_msg {
+ u32 tag;
+ void *host_hdl;
+ u32 copro_hdl;
+ u32 command;
+};
+
+#define to_host_hdl(ctx) ((void *)ctx)
+
+#define msg_to_ctx(msg) ((struct delta_ipc_ctx *)(msg)->header.host_hdl)
+#define msg_to_copro_hdl(msg) ((msg)->header.copro_hdl)
+
+static inline dma_addr_t to_paddr(struct delta_ipc_ctx *ctx, void *vaddr)
+{
+ return (ctx->ipc_buf->paddr + (vaddr - ctx->ipc_buf->vaddr));
+}
+
+static inline bool is_valid_data(struct delta_ipc_ctx *ctx,
+ void *data, u32 size)
+{
+ return ((data >= ctx->ipc_buf->vaddr) &&
+ ((data + size) <= (ctx->ipc_buf->vaddr + ctx->ipc_buf->size)));
+}
+
+/*
+ * IPC shared memory (@ipc_buf_size, @ipc_buf_paddr) is sent to copro
+ * at each instance opening. This memory is allocated by IPC client
+ * and given through delta_ipc_open(). All messages parameters
+ * (open, set_stream, decode) will have their phy address within
+ * this IPC shared memory, avoiding de-facto recopies inside delta-ipc.
+ * All the below messages structures are used on both host and firmware
+ * side and are packed (use only of 32 bits size fields in messages
+ * structures to ensure packing):
+ * - struct delta_ipc_open_msg
+ * - struct delta_ipc_set_stream_msg
+ * - struct delta_ipc_decode_msg
+ * - struct delta_ipc_close_msg
+ * - struct delta_ipc_cb_msg
+ */
+struct delta_ipc_open_msg {
+ struct delta_ipc_header_msg header;
+ u32 ipc_buf_size;
+ dma_addr_t ipc_buf_paddr;
+ char name[32];
+ u32 param_size;
+ dma_addr_t param_paddr;
+};
+
+struct delta_ipc_set_stream_msg {
+ struct delta_ipc_header_msg header;
+ u32 param_size;
+ dma_addr_t param_paddr;
+};
+
+struct delta_ipc_decode_msg {
+ struct delta_ipc_header_msg header;
+ u32 param_size;
+ dma_addr_t param_paddr;
+ u32 status_size;
+ dma_addr_t status_paddr;
+};
+
+struct delta_ipc_close_msg {
+ struct delta_ipc_header_msg header;
+};
+
+struct delta_ipc_cb_msg {
+ struct delta_ipc_header_msg header;
+ int err;
+};
+
+static void build_msg_header(struct delta_ipc_ctx *ctx,
+ enum delta_ipc_fw_command command,
+ struct delta_ipc_header_msg *header)
+{
+ header->tag = IPC_SANITY_TAG;
+ header->host_hdl = to_host_hdl(ctx);
+ header->copro_hdl = ctx->copro_hdl;
+ header->command = command;
+}
+
+int delta_ipc_open(struct delta_ctx *pctx, const char *name,
+ struct delta_ipc_param *param, u32 ipc_buf_size,
+ struct delta_buf **ipc_buf, void **hdl)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_ctx *ctx = &pctx->ipc_ctx;
+ struct delta_ipc_open_msg msg;
+ struct delta_buf *buf = &ctx->ipc_buf_struct;
+ int ret;
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, rpmsg is not initialized\n",
+ pctx->name);
+ pctx->sys_errors++;
+ return -EINVAL;
+ }
+
+ if (!name) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, no name given\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!param || !param->data || !param->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, empty parameter\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!ipc_buf_size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, no size given for ipc buffer\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (param->size > ipc_buf_size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, too large ipc parameter (%d bytes while max %d expected)\n",
+ pctx->name,
+ param->size, ctx->ipc_buf->size);
+ return -EINVAL;
+ }
+
+ /* init */
+ init_completion(&ctx->done);
+
+ /*
+ * allocation of contiguous buffer for
+ * data of commands exchanged between
+ * host and firmware coprocessor
+ */
+ ret = hw_alloc(pctx, ipc_buf_size,
+ "ipc data buffer", buf);
+ if (ret)
+ return ret;
+ ctx->ipc_buf = buf;
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_OPEN, &msg.header);
+
+ msg.ipc_buf_size = ipc_buf_size;
+ msg.ipc_buf_paddr = ctx->ipc_buf->paddr;
+
+ strscpy(msg.name, name, sizeof(msg.name));
+
+ msg.param_size = param->size;
+ memcpy(ctx->ipc_buf->vaddr, param->data, msg.param_size);
+ msg.param_paddr = ctx->ipc_buf->paddr;
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, rpmsg_send failed (%d) for DELTA_IPC_OPEN (name=%s, size=%d, data=%p)\n",
+ pctx->name,
+ ret, name, param->size, param->data);
+ goto err;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, timeout waiting for DELTA_IPC_OPEN callback (name=%s, size=%d, data=%p)\n",
+ pctx->name,
+ name, param->size, param->data);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ /* command completed, check error */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to open, DELTA_IPC_OPEN completed but with error (%d) (name=%s, size=%d, data=%p)\n",
+ pctx->name,
+ ctx->cb_err, name, param->size, param->data);
+ ret = -EIO;
+ goto err;
+ }
+
+ *ipc_buf = ctx->ipc_buf;
+ *hdl = (void *)ctx;
+
+ return 0;
+
+err:
+ pctx->sys_errors++;
+ hw_free(pctx, ctx->ipc_buf);
+ ctx->ipc_buf = NULL;
+
+ return ret;
+};
+
+int delta_ipc_set_stream(void *hdl, struct delta_ipc_param *param)
+{
+ struct delta_ipc_ctx *ctx = to_ctx(hdl);
+ struct delta_ctx *pctx = to_pctx(ctx);
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_set_stream_msg msg;
+ int ret;
+
+ if (!hdl) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, invalid ipc handle\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, rpmsg is not initialized\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!param || !param->data || !param->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, empty parameter\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (param->size > ctx->ipc_buf->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, too large ipc parameter(%d bytes while max %d expected)\n",
+ pctx->name,
+ param->size, ctx->ipc_buf->size);
+ return -EINVAL;
+ }
+
+ if (!is_valid_data(ctx, param->data, param->size)) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, parameter is not in expected address range (size=%d, data=%p not in %p..%p)\n",
+ pctx->name,
+ param->size,
+ param->data,
+ ctx->ipc_buf->vaddr,
+ ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
+ return -EINVAL;
+ }
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_SET_STREAM, &msg.header);
+
+ msg.param_size = param->size;
+ msg.param_paddr = to_paddr(ctx, param->data);
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, rpmsg_send failed (%d) for DELTA_IPC_SET_STREAM (size=%d, data=%p)\n",
+ pctx->name,
+ ret, param->size, param->data);
+ pctx->sys_errors++;
+ return ret;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, timeout waiting for DELTA_IPC_SET_STREAM callback (size=%d, data=%p)\n",
+ pctx->name,
+ param->size, param->data);
+ pctx->sys_errors++;
+ return -ETIMEDOUT;
+ }
+
+ /* command completed, check status */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to set stream, DELTA_IPC_SET_STREAM completed but with error (%d) (size=%d, data=%p)\n",
+ pctx->name,
+ ctx->cb_err, param->size, param->data);
+ pctx->sys_errors++;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int delta_ipc_decode(void *hdl, struct delta_ipc_param *param,
+ struct delta_ipc_param *status)
+{
+ struct delta_ipc_ctx *ctx = to_ctx(hdl);
+ struct delta_ctx *pctx = to_pctx(ctx);
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_decode_msg msg;
+ int ret;
+
+ if (!hdl) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, invalid ipc handle\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, rpmsg is not initialized\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!param || !param->data || !param->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, empty parameter\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (!status || !status->data || !status->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, empty status\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ if (param->size + status->size > ctx->ipc_buf->size) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, too large ipc parameter (%d bytes (param) + %d bytes (status) while max %d expected)\n",
+ pctx->name,
+ param->size,
+ status->size,
+ ctx->ipc_buf->size);
+ return -EINVAL;
+ }
+
+ if (!is_valid_data(ctx, param->data, param->size)) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, parameter is not in expected address range (size=%d, data=%p not in %p..%p)\n",
+ pctx->name,
+ param->size,
+ param->data,
+ ctx->ipc_buf->vaddr,
+ ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
+ return -EINVAL;
+ }
+
+ if (!is_valid_data(ctx, status->data, status->size)) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, status is not in expected address range (size=%d, data=%p not in %p..%p)\n",
+ pctx->name,
+ status->size,
+ status->data,
+ ctx->ipc_buf->vaddr,
+ ctx->ipc_buf->vaddr + ctx->ipc_buf->size - 1);
+ return -EINVAL;
+ }
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_DECODE, &msg.header);
+
+ msg.param_size = param->size;
+ msg.param_paddr = to_paddr(ctx, param->data);
+
+ msg.status_size = status->size;
+ msg.status_paddr = to_paddr(ctx, status->data);
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, rpmsg_send failed (%d) for DELTA_IPC_DECODE (size=%d, data=%p)\n",
+ pctx->name,
+ ret, param->size, param->data);
+ pctx->sys_errors++;
+ return ret;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, timeout waiting for DELTA_IPC_DECODE callback (size=%d, data=%p)\n",
+ pctx->name,
+ param->size, param->data);
+ pctx->sys_errors++;
+ return -ETIMEDOUT;
+ }
+
+ /* command completed, check status */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to decode, DELTA_IPC_DECODE completed but with error (%d) (size=%d, data=%p)\n",
+ pctx->name,
+ ctx->cb_err, param->size, param->data);
+ pctx->sys_errors++;
+ return -EIO;
+ }
+
+ return 0;
+};
+
+void delta_ipc_close(void *hdl)
+{
+ struct delta_ipc_ctx *ctx = to_ctx(hdl);
+ struct delta_ctx *pctx = to_pctx(ctx);
+ struct delta_dev *delta = pctx->dev;
+ struct rpmsg_device *rpmsg_device = delta->rpmsg_device;
+ struct delta_ipc_close_msg msg;
+ int ret;
+
+ if (!hdl) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, invalid ipc handle\n",
+ pctx->name);
+ return;
+ }
+
+ if (ctx->ipc_buf) {
+ hw_free(pctx, ctx->ipc_buf);
+ ctx->ipc_buf = NULL;
+ }
+
+ if (!rpmsg_device) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, rpmsg is not initialized\n",
+ pctx->name);
+ return;
+ }
+
+ /* build rpmsg message */
+ build_msg_header(ctx, DELTA_IPC_CLOSE, &msg.header);
+
+ /* send it */
+ ret = rpmsg_send(rpmsg_device->ept, &msg, sizeof(msg));
+ if (ret) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, rpmsg_send failed (%d) for DELTA_IPC_CLOSE\n",
+ pctx->name, ret);
+ pctx->sys_errors++;
+ return;
+ }
+
+ /* wait for acknowledge */
+ if (!wait_for_completion_timeout
+ (&ctx->done, msecs_to_jiffies(IPC_TIMEOUT))) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, timeout waiting for DELTA_IPC_CLOSE callback\n",
+ pctx->name);
+ pctx->sys_errors++;
+ return;
+ }
+
+ /* command completed, check status */
+ if (ctx->cb_err) {
+ dev_err(delta->dev,
+ "%s ipc: failed to close, DELTA_IPC_CLOSE completed but with error (%d)\n",
+ pctx->name, ctx->cb_err);
+ pctx->sys_errors++;
+ }
+};
+
+static int delta_ipc_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct delta_ipc_ctx *ctx;
+ struct delta_ipc_cb_msg *msg;
+
+ /* sanity check */
+ if (!rpdev) {
+ dev_err(NULL, "rpdev is NULL\n");
+ return -EINVAL;
+ }
+
+ if (!data || !len) {
+ dev_err(&rpdev->dev,
+ "unexpected empty message received from src=%d\n", src);
+ return -EINVAL;
+ }
+
+ if (len != sizeof(*msg)) {
+ dev_err(&rpdev->dev,
+ "unexpected message length received from src=%d (received %d bytes while %zu bytes expected)\n",
+ len, src, sizeof(*msg));
+ return -EINVAL;
+ }
+
+ msg = (struct delta_ipc_cb_msg *)data;
+ if (msg->header.tag != IPC_SANITY_TAG) {
+ dev_err(&rpdev->dev,
+ "unexpected message tag received from src=%d (received %x tag while %x expected)\n",
+ src, msg->header.tag, IPC_SANITY_TAG);
+ return -EINVAL;
+ }
+
+ ctx = msg_to_ctx(msg);
+ if (!ctx) {
+ dev_err(&rpdev->dev,
+ "unexpected message with NULL host_hdl received from src=%d\n",
+ src);
+ return -EINVAL;
+ }
+
+ /*
+ * if not already known, save copro instance context
+ * to ensure re-entrance on copro side
+ */
+ if (!ctx->copro_hdl)
+ ctx->copro_hdl = msg_to_copro_hdl(msg);
+
+ /*
+ * all is fine,
+ * update status & complete command
+ */
+ ctx->cb_err = msg->err;
+ complete(&ctx->done);
+
+ return 0;
+}
+
+static int delta_ipc_probe(struct rpmsg_device *rpmsg_device)
+{
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpmsg_device->dev.driver);
+ struct delta_dev *delta = to_delta(rpdrv);
+
+ delta->rpmsg_device = rpmsg_device;
+
+ return 0;
+}
+
+static void delta_ipc_remove(struct rpmsg_device *rpmsg_device)
+{
+ struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpmsg_device->dev.driver);
+ struct delta_dev *delta = to_delta(rpdrv);
+
+ delta->rpmsg_device = NULL;
+}
+
+static struct rpmsg_device_id delta_ipc_device_id_table[] = {
+ {.name = "rpmsg-delta"},
+ {},
+};
+
+static struct rpmsg_driver delta_rpmsg_driver = {
+ .drv = {.name = KBUILD_MODNAME},
+ .id_table = delta_ipc_device_id_table,
+ .probe = delta_ipc_probe,
+ .callback = delta_ipc_cb,
+ .remove = delta_ipc_remove,
+};
+
+int delta_ipc_init(struct delta_dev *delta)
+{
+ delta->rpmsg_driver = delta_rpmsg_driver;
+
+ return register_rpmsg_driver(&delta->rpmsg_driver);
+}
+
+void delta_ipc_exit(struct delta_dev *delta)
+{
+ unregister_rpmsg_driver(&delta->rpmsg_driver);
+}
diff --git a/drivers/media/platform/st/sti/delta/delta-ipc.h b/drivers/media/platform/st/sti/delta/delta-ipc.h
new file mode 100644
index 000000000..9fba6b5d1
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-ipc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_IPC_H
+#define DELTA_IPC_H
+
+int delta_ipc_init(struct delta_dev *delta);
+void delta_ipc_exit(struct delta_dev *delta);
+
+/*
+ * delta_ipc_open - open a decoding instance on firmware side
+ * @ctx: (in) delta context
+ * @name: (in) name of decoder to be used
+ * @param: (in) open command parameters specific to decoder
+ * @param.size: (in) size of parameter
+ * @param.data: (in) virtual address of parameter
+ * @ipc_buf_size: (in) size of IPC shared buffer between host
+ * and copro used to share command data.
+ * Client have to set here the size of the biggest
+ * command parameters (+ status if any).
+ * Allocation will be done in this function which
+ * will give back to client in @ipc_buf the virtual
+ * & physical addresses & size of shared IPC buffer.
+ * All the further command data (parameters + status)
+ * have to be written in this shared IPC buffer
+ * virtual memory. This is done to avoid
+ * unnecessary copies of command data.
+ * @ipc_buf: (out) allocated IPC shared buffer
+ * @ipc_buf.size: (out) allocated size
+ * @ipc_buf.vaddr: (out) virtual address where to copy
+ * further command data
+ * @hdl: (out) handle of decoding instance.
+ */
+
+int delta_ipc_open(struct delta_ctx *ctx, const char *name,
+ struct delta_ipc_param *param, u32 ipc_buf_size,
+ struct delta_buf **ipc_buf, void **hdl);
+
+/*
+ * delta_ipc_set_stream - set information about stream to decoder
+ * @hdl: (in) handle of decoding instance.
+ * @param: (in) set stream command parameters specific to decoder
+ * @param.size: (in) size of parameter
+ * @param.data: (in) virtual address of parameter. Must be
+ * within IPC shared buffer range
+ */
+int delta_ipc_set_stream(void *hdl, struct delta_ipc_param *param);
+
+/*
+ * delta_ipc_decode - frame decoding synchronous request, returns only
+ * after decoding completion on firmware side.
+ * @hdl: (in) handle of decoding instance.
+ * @param: (in) decode command parameters specific to decoder
+ * @param.size: (in) size of parameter
+ * @param.data: (in) virtual address of parameter. Must be
+ * within IPC shared buffer range
+ * @status: (in/out) decode command status specific to decoder
+ * @status.size: (in) size of status
+ * @status.data: (in/out) virtual address of status. Must be
+ * within IPC shared buffer range.
+ * Status is filled by decoding instance
+ * after decoding completion.
+ */
+int delta_ipc_decode(void *hdl, struct delta_ipc_param *param,
+ struct delta_ipc_param *status);
+
+/*
+ * delta_ipc_close - close decoding instance
+ * @hdl: (in) handle of decoding instance to close.
+ */
+void delta_ipc_close(void *hdl);
+
+#endif /* DELTA_IPC_H */
diff --git a/drivers/media/platform/st/sti/delta/delta-mem.c b/drivers/media/platform/st/sti/delta/delta-mem.c
new file mode 100644
index 000000000..aeccd5058
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-mem.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#include "delta.h"
+#include "delta-mem.h"
+
+int hw_alloc(struct delta_ctx *ctx, u32 size, const char *name,
+ struct delta_buf *buf)
+{
+ struct delta_dev *delta = ctx->dev;
+ dma_addr_t dma_addr;
+ void *addr;
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
+
+ addr = dma_alloc_attrs(delta->dev, size, &dma_addr,
+ GFP_KERNEL | __GFP_NOWARN, attrs);
+ if (!addr) {
+ dev_err(delta->dev,
+ "%s hw_alloc:dma_alloc_coherent failed for %s (size=%d)\n",
+ ctx->name, name, size);
+ ctx->sys_errors++;
+ return -ENOMEM;
+ }
+
+ buf->size = size;
+ buf->paddr = dma_addr;
+ buf->vaddr = addr;
+ buf->name = name;
+ buf->attrs = attrs;
+
+ dev_dbg(delta->dev,
+ "%s allocate %d bytes of HW memory @(virt=0x%p, phy=0x%pad): %s\n",
+ ctx->name, size, buf->vaddr, &buf->paddr, buf->name);
+
+ return 0;
+}
+
+void hw_free(struct delta_ctx *ctx, struct delta_buf *buf)
+{
+ struct delta_dev *delta = ctx->dev;
+
+ dev_dbg(delta->dev,
+ "%s free %d bytes of HW memory @(virt=0x%p, phy=0x%pad): %s\n",
+ ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);
+
+ dma_free_attrs(delta->dev, buf->size,
+ buf->vaddr, buf->paddr, buf->attrs);
+}
diff --git a/drivers/media/platform/st/sti/delta/delta-mem.h b/drivers/media/platform/st/sti/delta/delta-mem.h
new file mode 100644
index 000000000..ff7d02f00
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-mem.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_MEM_H
+#define DELTA_MEM_H
+
+int hw_alloc(struct delta_ctx *ctx, u32 size, const char *name,
+ struct delta_buf *buf);
+void hw_free(struct delta_ctx *ctx, struct delta_buf *buf);
+
+#endif /* DELTA_MEM_H */
diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
new file mode 100644
index 000000000..0533d4a08
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#include <linux/slab.h>
+
+#include "delta.h"
+#include "delta-ipc.h"
+#include "delta-mjpeg.h"
+#include "delta-mjpeg-fw.h"
+
+#define DELTA_MJPEG_MAX_RESO DELTA_MAX_RESO
+
+struct delta_mjpeg_ctx {
+ /* jpeg header */
+ struct mjpeg_header header_struct;
+ struct mjpeg_header *header;
+
+ /* ipc */
+ void *ipc_hdl;
+ struct delta_buf *ipc_buf;
+
+ /* decoded output frame */
+ struct delta_frame *out_frame;
+
+ unsigned char str[3000];
+};
+
+#define to_ctx(ctx) ((struct delta_mjpeg_ctx *)(ctx)->priv)
+
+static char *ipc_open_param_str(struct jpeg_video_decode_init_params_t *p,
+ char *str, unsigned int len)
+{
+ char *b = str;
+
+ if (!p)
+ return "";
+
+ b += snprintf(b, len,
+ "jpeg_video_decode_init_params_t\n"
+ "circular_buffer_begin_addr_p 0x%x\n"
+ "circular_buffer_end_addr_p 0x%x\n",
+ p->circular_buffer_begin_addr_p,
+ p->circular_buffer_end_addr_p);
+
+ return str;
+}
+
+static char *ipc_decode_param_str(struct jpeg_decode_params_t *p,
+ char *str, unsigned int len)
+{
+ char *b = str;
+
+ if (!p)
+ return "";
+
+ b += snprintf(b, len,
+ "jpeg_decode_params_t\n"
+ "picture_start_addr_p 0x%x\n"
+ "picture_end_addr_p 0x%x\n"
+ "decoding_mode %d\n"
+ "display_buffer_addr.display_decimated_luma_p 0x%x\n"
+ "display_buffer_addr.display_decimated_chroma_p 0x%x\n"
+ "main_aux_enable %d\n"
+ "additional_flags 0x%x\n"
+ "field_flag %x\n"
+ "is_jpeg_image %x\n",
+ p->picture_start_addr_p,
+ p->picture_end_addr_p,
+ p->decoding_mode,
+ p->display_buffer_addr.display_decimated_luma_p,
+ p->display_buffer_addr.display_decimated_chroma_p,
+ p->main_aux_enable, p->additional_flags,
+ p->field_flag,
+ p->is_jpeg_image);
+
+ return str;
+}
+
+static inline bool is_stream_error(enum jpeg_decoding_error_t err)
+{
+ switch (err) {
+ case JPEG_DECODER_UNDEFINED_HUFF_TABLE:
+ case JPEG_DECODER_BAD_RESTART_MARKER:
+ case JPEG_DECODER_BAD_SOS_SPECTRAL:
+ case JPEG_DECODER_BAD_SOS_SUCCESSIVE:
+ case JPEG_DECODER_BAD_HEADER_LENGTH:
+ case JPEG_DECODER_BAD_COUNT_VALUE:
+ case JPEG_DECODER_BAD_DHT_MARKER:
+ case JPEG_DECODER_BAD_INDEX_VALUE:
+ case JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES:
+ case JPEG_DECODER_BAD_QUANT_TABLE_LENGTH:
+ case JPEG_DECODER_BAD_NUMBER_QUANT_TABLES:
+ case JPEG_DECODER_BAD_COMPONENT_COUNT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline const char *err_str(enum jpeg_decoding_error_t err)
+{
+ switch (err) {
+ case JPEG_DECODER_NO_ERROR:
+ return "JPEG_DECODER_NO_ERROR";
+ case JPEG_DECODER_UNDEFINED_HUFF_TABLE:
+ return "JPEG_DECODER_UNDEFINED_HUFF_TABLE";
+ case JPEG_DECODER_UNSUPPORTED_MARKER:
+ return "JPEG_DECODER_UNSUPPORTED_MARKER";
+ case JPEG_DECODER_UNABLE_ALLOCATE_MEMORY:
+ return "JPEG_DECODER_UNABLE_ALLOCATE_MEMORY";
+ case JPEG_DECODER_NON_SUPPORTED_SAMP_FACTORS:
+ return "JPEG_DECODER_NON_SUPPORTED_SAMP_FACTORS";
+ case JPEG_DECODER_BAD_PARAMETER:
+ return "JPEG_DECODER_BAD_PARAMETER";
+ case JPEG_DECODER_DECODE_ERROR:
+ return "JPEG_DECODER_DECODE_ERROR";
+ case JPEG_DECODER_BAD_RESTART_MARKER:
+ return "JPEG_DECODER_BAD_RESTART_MARKER";
+ case JPEG_DECODER_UNSUPPORTED_COLORSPACE:
+ return "JPEG_DECODER_UNSUPPORTED_COLORSPACE";
+ case JPEG_DECODER_BAD_SOS_SPECTRAL:
+ return "JPEG_DECODER_BAD_SOS_SPECTRAL";
+ case JPEG_DECODER_BAD_SOS_SUCCESSIVE:
+ return "JPEG_DECODER_BAD_SOS_SUCCESSIVE";
+ case JPEG_DECODER_BAD_HEADER_LENGTH:
+ return "JPEG_DECODER_BAD_HEADER_LENGTH";
+ case JPEG_DECODER_BAD_COUNT_VALUE:
+ return "JPEG_DECODER_BAD_COUNT_VALUE";
+ case JPEG_DECODER_BAD_DHT_MARKER:
+ return "JPEG_DECODER_BAD_DHT_MARKER";
+ case JPEG_DECODER_BAD_INDEX_VALUE:
+ return "JPEG_DECODER_BAD_INDEX_VALUE";
+ case JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES:
+ return "JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES";
+ case JPEG_DECODER_BAD_QUANT_TABLE_LENGTH:
+ return "JPEG_DECODER_BAD_QUANT_TABLE_LENGTH";
+ case JPEG_DECODER_BAD_NUMBER_QUANT_TABLES:
+ return "JPEG_DECODER_BAD_NUMBER_QUANT_TABLES";
+ case JPEG_DECODER_BAD_COMPONENT_COUNT:
+ return "JPEG_DECODER_BAD_COMPONENT_COUNT";
+ case JPEG_DECODER_DIVIDE_BY_ZERO_ERROR:
+ return "JPEG_DECODER_DIVIDE_BY_ZERO_ERROR";
+ case JPEG_DECODER_NOT_JPG_IMAGE:
+ return "JPEG_DECODER_NOT_JPG_IMAGE";
+ case JPEG_DECODER_UNSUPPORTED_ROTATION_ANGLE:
+ return "JPEG_DECODER_UNSUPPORTED_ROTATION_ANGLE";
+ case JPEG_DECODER_UNSUPPORTED_SCALING:
+ return "JPEG_DECODER_UNSUPPORTED_SCALING";
+ case JPEG_DECODER_INSUFFICIENT_OUTPUTBUFFER_SIZE:
+ return "JPEG_DECODER_INSUFFICIENT_OUTPUTBUFFER_SIZE";
+ case JPEG_DECODER_BAD_HWCFG_GP_VERSION_VALUE:
+ return "JPEG_DECODER_BAD_HWCFG_GP_VERSION_VALUE";
+ case JPEG_DECODER_BAD_VALUE_FROM_RED:
+ return "JPEG_DECODER_BAD_VALUE_FROM_RED";
+ case JPEG_DECODER_BAD_SUBREGION_PARAMETERS:
+ return "JPEG_DECODER_BAD_SUBREGION_PARAMETERS";
+ case JPEG_DECODER_PROGRESSIVE_DECODE_NOT_SUPPORTED:
+ return "JPEG_DECODER_PROGRESSIVE_DECODE_NOT_SUPPORTED";
+ case JPEG_DECODER_ERROR_TASK_TIMEOUT:
+ return "JPEG_DECODER_ERROR_TASK_TIMEOUT";
+ case JPEG_DECODER_ERROR_FEATURE_NOT_SUPPORTED:
+ return "JPEG_DECODER_ERROR_FEATURE_NOT_SUPPORTED";
+ default:
+ return "!unknown MJPEG error!";
+ }
+}
+
+static bool delta_mjpeg_check_status(struct delta_ctx *pctx,
+ struct jpeg_decode_return_params_t *status)
+{
+ struct delta_dev *delta = pctx->dev;
+ bool dump = false;
+
+ if (status->error_code == JPEG_DECODER_NO_ERROR)
+ goto out;
+
+ if (is_stream_error(status->error_code)) {
+ dev_warn_ratelimited(delta->dev,
+ "%s firmware: stream error @ frame %d (%s)\n",
+ pctx->name, pctx->decoded_frames,
+ err_str(status->error_code));
+ pctx->stream_errors++;
+ } else {
+ dev_warn_ratelimited(delta->dev,
+ "%s firmware: decode error @ frame %d (%s)\n",
+ pctx->name, pctx->decoded_frames,
+ err_str(status->error_code));
+ pctx->decode_errors++;
+ dump = true;
+ }
+
+out:
+ dev_dbg(delta->dev,
+ "%s firmware: decoding time(us)=%d\n", pctx->name,
+ status->decode_time_in_us);
+
+ return dump;
+}
+
+static int delta_mjpeg_ipc_open(struct delta_ctx *pctx)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ int ret = 0;
+ struct jpeg_video_decode_init_params_t params_struct;
+ struct jpeg_video_decode_init_params_t *params = &params_struct;
+ struct delta_buf *ipc_buf;
+ u32 ipc_buf_size;
+ struct delta_ipc_param ipc_param;
+ void *hdl;
+
+ memset(params, 0, sizeof(*params));
+ params->circular_buffer_begin_addr_p = 0x00000000;
+ params->circular_buffer_end_addr_p = 0xffffffff;
+
+ dev_vdbg(delta->dev,
+ "%s %s\n", pctx->name,
+ ipc_open_param_str(params, ctx->str, sizeof(ctx->str)));
+
+ ipc_param.size = sizeof(*params);
+ ipc_param.data = params;
+ ipc_buf_size = sizeof(struct jpeg_decode_params_t) +
+ sizeof(struct jpeg_decode_return_params_t);
+ ret = delta_ipc_open(pctx, "JPEG_DECODER_HW0", &ipc_param,
+ ipc_buf_size, &ipc_buf, &hdl);
+ if (ret) {
+ dev_err(delta->dev,
+ "%s dumping command %s\n", pctx->name,
+ ipc_open_param_str(params, ctx->str, sizeof(ctx->str)));
+ return ret;
+ }
+
+ ctx->ipc_buf = ipc_buf;
+ ctx->ipc_hdl = hdl;
+
+ return 0;
+}
+
+static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ int ret = 0;
+ struct jpeg_decode_params_t *params = ctx->ipc_buf->vaddr;
+ struct jpeg_decode_return_params_t *status =
+ ctx->ipc_buf->vaddr + sizeof(*params);
+ struct delta_frame *frame;
+ struct delta_ipc_param ipc_param, ipc_status;
+
+ ret = delta_get_free_frame(pctx, &frame);
+ if (ret)
+ return ret;
+
+ memset(params, 0, sizeof(*params));
+
+ params->picture_start_addr_p = (u32)(au->paddr);
+ params->picture_end_addr_p = (u32)(au->paddr + au->size - 1);
+
+ /*
+ * !WARNING!
+ * the NV12 decoded frame is only available
+ * on decimated output when enabling flag
+ * "JPEG_ADDITIONAL_FLAG_420MB"...
+ * the non decimated output gives YUV422SP
+ */
+ params->main_aux_enable = JPEG_DISP_AUX_EN;
+ params->additional_flags = JPEG_ADDITIONAL_FLAG_420MB;
+ params->horizontal_decimation_factor = JPEG_HDEC_1;
+ params->vertical_decimation_factor = JPEG_VDEC_1;
+ params->decoding_mode = JPEG_NORMAL_DECODE;
+
+ params->display_buffer_addr.struct_size =
+ sizeof(struct jpeg_display_buffer_address_t);
+ params->display_buffer_addr.display_decimated_luma_p =
+ (u32)frame->paddr;
+ params->display_buffer_addr.display_decimated_chroma_p =
+ (u32)(frame->paddr
+ + frame->info.aligned_width * frame->info.aligned_height);
+
+ dev_vdbg(delta->dev,
+ "%s %s\n", pctx->name,
+ ipc_decode_param_str(params, ctx->str, sizeof(ctx->str)));
+
+ /* status */
+ memset(status, 0, sizeof(*status));
+ status->error_code = JPEG_DECODER_NO_ERROR;
+
+ ipc_param.size = sizeof(*params);
+ ipc_param.data = params;
+ ipc_status.size = sizeof(*status);
+ ipc_status.data = status;
+ ret = delta_ipc_decode(ctx->ipc_hdl, &ipc_param, &ipc_status);
+ if (ret) {
+ dev_err(delta->dev,
+ "%s dumping command %s\n", pctx->name,
+ ipc_decode_param_str(params, ctx->str,
+ sizeof(ctx->str)));
+ return ret;
+ }
+
+ pctx->decoded_frames++;
+
+ /* check firmware decoding status */
+ if (delta_mjpeg_check_status(pctx, status)) {
+ dev_err(delta->dev,
+ "%s dumping command %s\n", pctx->name,
+ ipc_decode_param_str(params, ctx->str,
+ sizeof(ctx->str)));
+ }
+
+ frame->field = V4L2_FIELD_NONE;
+ frame->flags = V4L2_BUF_FLAG_KEYFRAME;
+ frame->state |= DELTA_FRAME_DEC;
+
+ ctx->out_frame = frame;
+
+ return 0;
+}
+
+static int delta_mjpeg_open(struct delta_ctx *pctx)
+{
+ struct delta_mjpeg_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ pctx->priv = ctx;
+
+ return 0;
+}
+
+static int delta_mjpeg_close(struct delta_ctx *pctx)
+{
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+
+ if (ctx->ipc_hdl) {
+ delta_ipc_close(ctx->ipc_hdl);
+ ctx->ipc_hdl = NULL;
+ }
+
+ kfree(ctx);
+
+ return 0;
+}
+
+static int delta_mjpeg_get_streaminfo(struct delta_ctx *pctx,
+ struct delta_streaminfo *streaminfo)
+{
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+
+ if (!ctx->header)
+ goto nodata;
+
+ streaminfo->streamformat = V4L2_PIX_FMT_MJPEG;
+ streaminfo->width = ctx->header->frame_width;
+ streaminfo->height = ctx->header->frame_height;
+
+ /* progressive stream */
+ streaminfo->field = V4L2_FIELD_NONE;
+
+ streaminfo->dpb = 1;
+
+ return 0;
+
+nodata:
+ return -ENODATA;
+}
+
+static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
+{
+ struct delta_dev *delta = pctx->dev;
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+ int ret;
+ struct delta_au au = *pau;
+ unsigned int data_offset = 0;
+ struct mjpeg_header *header = &ctx->header_struct;
+
+ if (!ctx->header) {
+ ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ header, &data_offset);
+ if (ret) {
+ pctx->stream_errors++;
+ goto err;
+ }
+ if (header->frame_width * header->frame_height >
+ DELTA_MJPEG_MAX_RESO) {
+ dev_err(delta->dev,
+ "%s stream resolution too large: %dx%d > %d pixels budget\n",
+ pctx->name,
+ header->frame_width,
+ header->frame_height, DELTA_MJPEG_MAX_RESO);
+ ret = -EINVAL;
+ goto err;
+ }
+ ctx->header = header;
+ goto out;
+ }
+
+ if (!ctx->ipc_hdl) {
+ ret = delta_mjpeg_ipc_open(pctx);
+ if (ret)
+ goto err;
+ }
+
+ ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ ctx->header, &data_offset);
+ if (ret) {
+ pctx->stream_errors++;
+ goto err;
+ }
+
+ au.paddr += data_offset;
+ au.vaddr += data_offset;
+
+ ret = delta_mjpeg_ipc_decode(pctx, &au);
+ if (ret)
+ goto err;
+
+out:
+ return 0;
+
+err:
+ return ret;
+}
+
+static int delta_mjpeg_get_frame(struct delta_ctx *pctx,
+ struct delta_frame **frame)
+{
+ struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
+
+ if (!ctx->out_frame)
+ return -ENODATA;
+
+ *frame = ctx->out_frame;
+
+ ctx->out_frame = NULL;
+
+ return 0;
+}
+
+const struct delta_dec mjpegdec = {
+ .name = "MJPEG",
+ .streamformat = V4L2_PIX_FMT_MJPEG,
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .open = delta_mjpeg_open,
+ .close = delta_mjpeg_close,
+ .get_streaminfo = delta_mjpeg_get_streaminfo,
+ .get_frameinfo = delta_get_frameinfo_default,
+ .decode = delta_mjpeg_decode,
+ .get_frame = delta_mjpeg_get_frame,
+ .recycle = delta_recycle_default,
+};
diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg-fw.h b/drivers/media/platform/st/sti/delta/delta-mjpeg-fw.h
new file mode 100644
index 000000000..5a9404f4d
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-fw.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_MJPEG_FW_H
+#define DELTA_MJPEG_FW_H
+
+/*
+ * struct jpeg_decoded_buffer_address_t
+ *
+ * defines the addresses where the decoded picture/additional
+ * info related to the block structures will be stored
+ *
+ * @display_luma_p: address of the luma buffer
+ * @display_chroma_p: address of the chroma buffer
+ */
+struct jpeg_decoded_buffer_address_t {
+ u32 luma_p;
+ u32 chroma_p;
+};
+
+/*
+ * struct jpeg_display_buffer_address_t
+ *
+ * defines the addresses (used by the Display Reconstruction block)
+ * where the pictures to be displayed will be stored
+ *
+ * @struct_size: size of the structure in bytes
+ * @display_luma_p: address of the luma buffer
+ * @display_chroma_p: address of the chroma buffer
+ * @display_decimated_luma_p: address of the decimated luma buffer
+ * @display_decimated_chroma_p: address of the decimated chroma buffer
+ */
+struct jpeg_display_buffer_address_t {
+ u32 struct_size;
+ u32 display_luma_p;
+ u32 display_chroma_p;
+ u32 display_decimated_luma_p;
+ u32 display_decimated_chroma_p;
+};
+
+/*
+ * used for enabling main/aux outputs for both display &
+ * reference reconstruction blocks
+ */
+enum jpeg_rcn_ref_disp_enable_t {
+ /* enable decimated (for display) reconstruction */
+ JPEG_DISP_AUX_EN = 0x00000010,
+ /* enable main (for display) reconstruction */
+ JPEG_DISP_MAIN_EN = 0x00000020,
+ /* enable both main & decimated (for display) reconstruction */
+ JPEG_DISP_AUX_MAIN_EN = 0x00000030,
+ /* enable only reference output(ex. for trick modes) */
+ JPEG_REF_MAIN_EN = 0x00000100,
+ /*
+ * enable reference output with decimated
+ * (for display) reconstruction
+ */
+ JPEG_REF_MAIN_DISP_AUX_EN = 0x00000110,
+ /*
+ * enable reference output with main
+ * (for display) reconstruction
+ */
+ JPEG_REF_MAIN_DISP_MAIN_EN = 0x00000120,
+ /*
+ * enable reference output with main & decimated
+ * (for display) reconstruction
+ */
+ JPEG_REF_MAIN_DISP_MAIN_AUX_EN = 0x00000130
+};
+
+/* identifies the horizontal decimation factor */
+enum jpeg_horizontal_deci_factor_t {
+ /* no resize */
+ JPEG_HDEC_1 = 0x00000000,
+ /* Advanced H/2 resize using improved 8-tap filters */
+ JPEG_HDEC_ADVANCED_2 = 0x00000101,
+ /* Advanced H/4 resize using improved 8-tap filters */
+ JPEG_HDEC_ADVANCED_4 = 0x00000102
+};
+
+/* identifies the vertical decimation factor */
+enum jpeg_vertical_deci_factor_t {
+ /* no resize */
+ JPEG_VDEC_1 = 0x00000000,
+ /* V/2 , progressive resize */
+ JPEG_VDEC_ADVANCED_2_PROG = 0x00000204,
+ /* V/2 , interlaced resize */
+ JPEG_VDEC_ADVANCED_2_INT = 0x000000208
+};
+
+/* status of the decoding process */
+enum jpeg_decoding_error_t {
+ JPEG_DECODER_NO_ERROR = 0,
+ JPEG_DECODER_UNDEFINED_HUFF_TABLE = 1,
+ JPEG_DECODER_UNSUPPORTED_MARKER = 2,
+ JPEG_DECODER_UNABLE_ALLOCATE_MEMORY = 3,
+ JPEG_DECODER_NON_SUPPORTED_SAMP_FACTORS = 4,
+ JPEG_DECODER_BAD_PARAMETER = 5,
+ JPEG_DECODER_DECODE_ERROR = 6,
+ JPEG_DECODER_BAD_RESTART_MARKER = 7,
+ JPEG_DECODER_UNSUPPORTED_COLORSPACE = 8,
+ JPEG_DECODER_BAD_SOS_SPECTRAL = 9,
+ JPEG_DECODER_BAD_SOS_SUCCESSIVE = 10,
+ JPEG_DECODER_BAD_HEADER_LENGTH = 11,
+ JPEG_DECODER_BAD_COUNT_VALUE = 12,
+ JPEG_DECODER_BAD_DHT_MARKER = 13,
+ JPEG_DECODER_BAD_INDEX_VALUE = 14,
+ JPEG_DECODER_BAD_NUMBER_HUFFMAN_TABLES = 15,
+ JPEG_DECODER_BAD_QUANT_TABLE_LENGTH = 16,
+ JPEG_DECODER_BAD_NUMBER_QUANT_TABLES = 17,
+ JPEG_DECODER_BAD_COMPONENT_COUNT = 18,
+ JPEG_DECODER_DIVIDE_BY_ZERO_ERROR = 19,
+ JPEG_DECODER_NOT_JPG_IMAGE = 20,
+ JPEG_DECODER_UNSUPPORTED_ROTATION_ANGLE = 21,
+ JPEG_DECODER_UNSUPPORTED_SCALING = 22,
+ JPEG_DECODER_INSUFFICIENT_OUTPUTBUFFER_SIZE = 23,
+ JPEG_DECODER_BAD_HWCFG_GP_VERSION_VALUE = 24,
+ JPEG_DECODER_BAD_VALUE_FROM_RED = 25,
+ JPEG_DECODER_BAD_SUBREGION_PARAMETERS = 26,
+ JPEG_DECODER_PROGRESSIVE_DECODE_NOT_SUPPORTED = 27,
+ JPEG_DECODER_ERROR_TASK_TIMEOUT = 28,
+ JPEG_DECODER_ERROR_FEATURE_NOT_SUPPORTED = 29
+};
+
+/* identifies the decoding mode */
+enum jpeg_decoding_mode_t {
+ JPEG_NORMAL_DECODE = 0,
+};
+
+enum jpeg_additional_flags_t {
+ JPEG_ADDITIONAL_FLAG_NONE = 0,
+ /* request firmware to return values of the CEH registers */
+ JPEG_ADDITIONAL_FLAG_CEH = 1,
+ /* output storage of auxiliary reconstruction in Raster format. */
+ JPEG_ADDITIONAL_FLAG_RASTER = 64,
+ /* output storage of auxiliary reconstruction in 420MB format. */
+ JPEG_ADDITIONAL_FLAG_420MB = 128
+};
+
+/*
+ * struct jpeg_video_decode_init_params_t - initialization command parameters
+ *
+ * @circular_buffer_begin_addr_p: start address of fw circular buffer
+ * @circular_buffer_end_addr_p: end address of fw circular buffer
+ */
+struct jpeg_video_decode_init_params_t {
+ u32 circular_buffer_begin_addr_p;
+ u32 circular_buffer_end_addr_p;
+ u32 reserved;
+};
+
+/*
+ * struct jpeg_decode_params_t - decode command parameters
+ *
+ * @picture_start_addr_p: start address of jpeg picture
+ * @picture_end_addr_p: end address of jpeg picture
+ * @decoded_buffer_addr: decoded picture buffer
+ * @display_buffer_addr: display picture buffer
+ * @main_aux_enable: enable main and/or aux outputs
+ * @horizontal_decimation_factor:horizontal decimation factor
+ * @vertical_decimation_factor: vertical decimation factor
+ * @xvalue0: the x(0) coordinate for subregion decoding
+ * @xvalue1: the x(1) coordinate for subregion decoding
+ * @yvalue0: the y(0) coordinate for subregion decoding
+ * @yvalue1: the y(1) coordinate for subregion decoding
+ * @decoding_mode: decoding mode
+ * @additional_flags: additional flags
+ * @field_flag: determines frame/field scan
+ * @is_jpeg_image: 1 = still jpeg, 0 = motion jpeg
+ */
+struct jpeg_decode_params_t {
+ u32 picture_start_addr_p;
+ u32 picture_end_addr_p;
+ struct jpeg_decoded_buffer_address_t decoded_buffer_addr;
+ struct jpeg_display_buffer_address_t display_buffer_addr;
+ enum jpeg_rcn_ref_disp_enable_t main_aux_enable;
+ enum jpeg_horizontal_deci_factor_t horizontal_decimation_factor;
+ enum jpeg_vertical_deci_factor_t vertical_decimation_factor;
+ u32 xvalue0;
+ u32 xvalue1;
+ u32 yvalue0;
+ u32 yvalue1;
+ enum jpeg_decoding_mode_t decoding_mode;
+ u32 additional_flags;
+ u32 field_flag;
+ u32 reserved;
+ u32 is_jpeg_image;
+};
+
+/*
+ * struct jpeg_decode_return_params_t
+ *
+ * status returned by firmware after decoding
+ *
+ * @decode_time_in_us: decoding time in microseconds
+ * @pm_cycles: profiling information
+ * @pm_dmiss: profiling information
+ * @pm_imiss: profiling information
+ * @pm_bundles: profiling information
+ * @pm_pft: profiling information
+ * @error_code: status of the decoding process
+ * @ceh_registers: array where values of the Contrast Enhancement
+ * Histogram (CEH) registers will be stored.
+ * ceh_registers[0] correspond to register MBE_CEH_0_7,
+ * ceh_registers[1] correspond to register MBE_CEH_8_15
+ * ceh_registers[2] correspond to register MBE_CEH_16_23
+ * Note that elements of this array will be updated only
+ * if additional_flags has JPEG_ADDITIONAL_FLAG_CEH set.
+ */
+struct jpeg_decode_return_params_t {
+ /* profiling info */
+ u32 decode_time_in_us;
+ u32 pm_cycles;
+ u32 pm_dmiss;
+ u32 pm_imiss;
+ u32 pm_bundles;
+ u32 pm_pft;
+ enum jpeg_decoding_error_t error_code;
+ u32 ceh_registers[32];
+};
+
+#endif /* DELTA_MJPEG_FW_H */
diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg-hdr.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-hdr.c
new file mode 100644
index 000000000..90e5b2f72
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-hdr.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#include "delta.h"
+#include "delta-mjpeg.h"
+
+#define MJPEG_SOF_0 0xc0
+#define MJPEG_SOF_1 0xc1
+#define MJPEG_SOI 0xd8
+#define MJPEG_MARKER 0xff
+
+static char *header_str(struct mjpeg_header *header,
+ char *str,
+ unsigned int len)
+{
+ char *cur = str;
+ unsigned int left = len;
+
+ if (!header)
+ return "";
+
+ snprintf(cur, left, "[MJPEG header]\n"
+ "|- length = %d\n"
+ "|- precision = %d\n"
+ "|- width = %d\n"
+ "|- height = %d\n"
+ "|- components = %d\n",
+ header->length,
+ header->sample_precision,
+ header->frame_width,
+ header->frame_height,
+ header->nb_of_components);
+
+ return str;
+}
+
+static int delta_mjpeg_read_sof(struct delta_ctx *pctx,
+ unsigned char *data, unsigned int size,
+ struct mjpeg_header *header)
+{
+ struct delta_dev *delta = pctx->dev;
+ unsigned int offset = 0;
+
+ if (size < 64)
+ goto err_no_more;
+
+ memset(header, 0, sizeof(*header));
+ header->length = be16_to_cpu(*(__be16 *)(data + offset));
+ offset += sizeof(u16);
+ header->sample_precision = *(u8 *)(data + offset);
+ offset += sizeof(u8);
+ header->frame_height = be16_to_cpu(*(__be16 *)(data + offset));
+ offset += sizeof(u16);
+ header->frame_width = be16_to_cpu(*(__be16 *)(data + offset));
+ offset += sizeof(u16);
+ header->nb_of_components = *(u8 *)(data + offset);
+ offset += sizeof(u8);
+
+ if (header->nb_of_components >= MJPEG_MAX_COMPONENTS) {
+ dev_err(delta->dev,
+ "%s unsupported number of components (%d > %d)\n",
+ pctx->name, header->nb_of_components,
+ MJPEG_MAX_COMPONENTS);
+ return -EINVAL;
+ }
+
+ if ((offset + header->nb_of_components *
+ sizeof(header->components[0])) > size)
+ goto err_no_more;
+
+ return 0;
+
+err_no_more:
+ dev_err(delta->dev,
+ "%s sof: reached end of %d size input stream\n",
+ pctx->name, size);
+ return -ENODATA;
+}
+
+int delta_mjpeg_read_header(struct delta_ctx *pctx,
+ unsigned char *data, unsigned int size,
+ struct mjpeg_header *header,
+ unsigned int *data_offset)
+{
+ struct delta_dev *delta = pctx->dev;
+ unsigned char str[200];
+
+ unsigned int ret = 0;
+ unsigned int offset = 0;
+ unsigned int soi = 0;
+
+ if (size < 2)
+ goto err_no_more;
+
+ offset = 0;
+ while (1) {
+ if (data[offset] == MJPEG_MARKER)
+ switch (data[offset + 1]) {
+ case MJPEG_SOI:
+ soi = 1;
+ *data_offset = offset;
+ break;
+
+ case MJPEG_SOF_0:
+ case MJPEG_SOF_1:
+ if (!soi) {
+ dev_err(delta->dev,
+ "%s wrong sequence, got SOF while SOI not seen\n",
+ pctx->name);
+ return -EINVAL;
+ }
+
+ ret = delta_mjpeg_read_sof(pctx,
+ &data[offset + 2],
+ size - (offset + 2),
+ header);
+ if (ret)
+ goto err;
+
+ goto done;
+
+ default:
+ break;
+ }
+
+ offset++;
+ if ((offset + 2) >= size)
+ goto err_no_more;
+ }
+
+done:
+ dev_dbg(delta->dev,
+ "%s found header @ offset %d:\n%s", pctx->name,
+ *data_offset,
+ header_str(header, str, sizeof(str)));
+ return 0;
+
+err_no_more:
+ dev_err(delta->dev,
+ "%s no header found within %d bytes input stream\n",
+ pctx->name, size);
+ return -ENODATA;
+
+err:
+ return ret;
+}
diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg.h b/drivers/media/platform/st/sti/delta/delta-mjpeg.h
new file mode 100644
index 000000000..43f7a88b6
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_MJPEG_H
+#define DELTA_MJPEG_H
+
+#include "delta.h"
+
+struct mjpeg_component {
+ unsigned int id;/* 1=Y, 2=Cb, 3=Cr, 4=L, 5=Q */
+ unsigned int h_sampling_factor;
+ unsigned int v_sampling_factor;
+ unsigned int quant_table_index;
+};
+
+#define MJPEG_MAX_COMPONENTS 5
+
+struct mjpeg_header {
+ unsigned int length;
+ unsigned int sample_precision;
+ unsigned int frame_width;
+ unsigned int frame_height;
+ unsigned int nb_of_components;
+ struct mjpeg_component components[MJPEG_MAX_COMPONENTS];
+};
+
+int delta_mjpeg_read_header(struct delta_ctx *pctx,
+ unsigned char *data, unsigned int size,
+ struct mjpeg_header *header,
+ unsigned int *data_offset);
+
+#endif /* DELTA_MJPEG_H */
diff --git a/drivers/media/platform/st/sti/delta/delta-v4l2.c b/drivers/media/platform/st/sti/delta/delta-v4l2.c
new file mode 100644
index 000000000..03eaee6d1
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta-v4l2.c
@@ -0,0 +1,1970 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Hugues Fruchet <hugues.fruchet@st.com>
+ * Jean-Christophe Trotin <jean-christophe.trotin@st.com>
+ * for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "delta.h"
+#include "delta-debug.h"
+#include "delta-ipc.h"
+
+#define DELTA_NAME "st-delta"
+
+#define DELTA_PREFIX "[---:----]"
+
+#define to_ctx(__fh) container_of(__fh, struct delta_ctx, fh)
+#define to_au(__vbuf) container_of(__vbuf, struct delta_au, vbuf)
+#define to_frame(__vbuf) container_of(__vbuf, struct delta_frame, vbuf)
+
+#define call_dec_op(dec, op, args...)\
+ ((dec && (dec)->op) ? (dec)->op(args) : 0)
+
+/* registry of available decoders */
+static const struct delta_dec *delta_decoders[] = {
+#ifdef CONFIG_VIDEO_STI_DELTA_MJPEG
+ &mjpegdec,
+#endif
+};
+
+static inline int frame_size(u32 w, u32 h, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ return (w * h * 3) / 2;
+ default:
+ return 0;
+ }
+}
+
+static inline int frame_stride(u32 w, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ return w;
+ default:
+ return 0;
+ }
+}
+
+static void dump_au(struct delta_ctx *ctx, struct delta_au *au)
+{
+ struct delta_dev *delta = ctx->dev;
+ u32 size = 10; /* dump first & last 10 bytes */
+ u8 *data = (u8 *)(au->vaddr);
+
+ if (au->size <= (size * 2))
+ dev_dbg(delta->dev, "%s dump au[%d] dts=%lld size=%d data=%*ph\n",
+ ctx->name, au->vbuf.vb2_buf.index, au->dts, au->size,
+ au->size, data);
+ else
+ dev_dbg(delta->dev, "%s dump au[%d] dts=%lld size=%d data=%*ph..%*ph\n",
+ ctx->name, au->vbuf.vb2_buf.index, au->dts, au->size,
+ size, data, size, data + au->size - size);
+}
+
+static void dump_frame(struct delta_ctx *ctx, struct delta_frame *frame)
+{
+ struct delta_dev *delta = ctx->dev;
+ u32 size = 10; /* dump first 10 bytes */
+ u8 *data = (u8 *)(frame->vaddr);
+
+ dev_dbg(delta->dev, "%s dump frame[%d] dts=%lld type=%s field=%s data=%*ph\n",
+ ctx->name, frame->index, frame->dts,
+ frame_type_str(frame->flags),
+ frame_field_str(frame->field),
+ size, data);
+}
+
+static void delta_au_done(struct delta_ctx *ctx, struct delta_au *au, int err)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ vbuf = &au->vbuf;
+ vbuf->sequence = ctx->au_num++;
+ v4l2_m2m_buf_done(vbuf, err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+}
+
+static void delta_frame_done(struct delta_ctx *ctx, struct delta_frame *frame,
+ int err)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ dump_frame(ctx, frame);
+
+ /* decoded frame is now output to user */
+ frame->state |= DELTA_FRAME_OUT;
+
+ vbuf = &frame->vbuf;
+ vbuf->sequence = ctx->frame_num++;
+ v4l2_m2m_buf_done(vbuf, err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ if (frame->info.size) /* ignore EOS */
+ ctx->output_frames++;
+}
+
+static void requeue_free_frames(struct delta_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct delta_frame *frame;
+ unsigned int i;
+
+ /* requeue all free frames */
+ for (i = 0; i < ctx->nb_of_frames; i++) {
+ frame = ctx->frames[i];
+ if (frame->state == DELTA_FRAME_FREE) {
+ vbuf = &frame->vbuf;
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ frame->state = DELTA_FRAME_M2M;
+ }
+ }
+}
+
+static int delta_recycle(struct delta_ctx *ctx, struct delta_frame *frame)
+{
+ const struct delta_dec *dec = ctx->dec;
+
+ /* recycle frame on decoder side */
+ call_dec_op(dec, recycle, ctx, frame);
+
+ /* this frame is no more output */
+ frame->state &= ~DELTA_FRAME_OUT;
+
+ /* requeue free frame */
+ if (frame->state == DELTA_FRAME_FREE) {
+ struct vb2_v4l2_buffer *vbuf = &frame->vbuf;
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+ frame->state = DELTA_FRAME_M2M;
+ }
+
+ /* reset other frame fields */
+ frame->flags = 0;
+ frame->dts = 0;
+
+ return 0;
+}
+
+static void delta_push_dts(struct delta_ctx *ctx, u64 val)
+{
+ struct delta_dts *dts;
+
+ dts = kzalloc(sizeof(*dts), GFP_KERNEL);
+ if (!dts)
+ return;
+
+ INIT_LIST_HEAD(&dts->list);
+
+ /*
+ * protected by global lock acquired
+ * by V4L2 when calling delta_vb2_au_queue
+ */
+ dts->val = val;
+ list_add_tail(&dts->list, &ctx->dts);
+}
+
+static void delta_pop_dts(struct delta_ctx *ctx, u64 *val)
+{
+ struct delta_dev *delta = ctx->dev;
+ struct delta_dts *dts;
+
+ /*
+ * protected by global lock acquired
+ * by V4L2 when calling delta_vb2_au_queue
+ */
+ if (list_empty(&ctx->dts)) {
+ dev_warn(delta->dev, "%s no dts to pop ... output dts = 0\n",
+ ctx->name);
+ *val = 0;
+ return;
+ }
+
+ dts = list_first_entry(&ctx->dts, struct delta_dts, list);
+ list_del(&dts->list);
+
+ *val = dts->val;
+
+ kfree(dts);
+}
+
+static void delta_flush_dts(struct delta_ctx *ctx)
+{
+ struct delta_dts *dts;
+ struct delta_dts *next;
+
+ /*
+ * protected by global lock acquired
+ * by V4L2 when calling delta_vb2_au_queue
+ */
+
+ /* free all pending dts */
+ list_for_each_entry_safe(dts, next, &ctx->dts, list)
+ kfree(dts);
+
+ /* reset list */
+ INIT_LIST_HEAD(&ctx->dts);
+}
+
+static inline int frame_alignment(u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ /* multiple of 2 */
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static inline int estimated_au_size(u32 w, u32 h)
+{
+ /*
+ * for a MJPEG stream encoded from YUV422 pixel format,
+ * assuming a compression ratio of 2, the maximum size
+ * of an access unit is (width x height x 2) / 2,
+ * so (width x height)
+ */
+ return (w * h);
+}
+
+static void set_default_params(struct delta_ctx *ctx)
+{
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+
+ memset(frameinfo, 0, sizeof(*frameinfo));
+ frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+ frameinfo->width = DELTA_DEFAULT_WIDTH;
+ frameinfo->height = DELTA_DEFAULT_HEIGHT;
+ frameinfo->aligned_width = ALIGN(frameinfo->width,
+ DELTA_WIDTH_ALIGNMENT);
+ frameinfo->aligned_height = ALIGN(frameinfo->height,
+ DELTA_HEIGHT_ALIGNMENT);
+ frameinfo->size = frame_size(frameinfo->aligned_width,
+ frameinfo->aligned_height,
+ frameinfo->pixelformat);
+ frameinfo->field = V4L2_FIELD_NONE;
+ frameinfo->colorspace = V4L2_COLORSPACE_REC709;
+ frameinfo->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ frameinfo->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ frameinfo->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ memset(streaminfo, 0, sizeof(*streaminfo));
+ streaminfo->streamformat = DELTA_DEFAULT_STREAMFORMAT;
+ streaminfo->width = DELTA_DEFAULT_WIDTH;
+ streaminfo->height = DELTA_DEFAULT_HEIGHT;
+ streaminfo->field = V4L2_FIELD_NONE;
+ streaminfo->colorspace = V4L2_COLORSPACE_REC709;
+ streaminfo->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ streaminfo->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ streaminfo->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ ctx->max_au_size = estimated_au_size(streaminfo->width,
+ streaminfo->height);
+}
+
+static const struct delta_dec *delta_find_decoder(struct delta_ctx *ctx,
+ u32 streamformat,
+ u32 pixelformat)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec;
+ unsigned int i;
+
+ for (i = 0; i < delta->nb_of_decoders; i++) {
+ dec = delta->decoders[i];
+ if ((dec->pixelformat == pixelformat) &&
+ (dec->streamformat == streamformat))
+ return dec;
+ }
+
+ return NULL;
+}
+
+static void register_format(u32 format, u32 formats[], u32 *nb_of_formats)
+{
+ u32 i;
+
+ for (i = 0; i < *nb_of_formats; i++) {
+ if (format == formats[i])
+ return;
+ }
+
+ formats[(*nb_of_formats)++] = format;
+}
+
+static void register_formats(struct delta_dev *delta)
+{
+ unsigned int i;
+
+ for (i = 0; i < delta->nb_of_decoders; i++) {
+ register_format(delta->decoders[i]->pixelformat,
+ delta->pixelformats,
+ &delta->nb_of_pixelformats);
+
+ register_format(delta->decoders[i]->streamformat,
+ delta->streamformats,
+ &delta->nb_of_streamformats);
+ }
+}
+
+static void register_decoders(struct delta_dev *delta)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(delta_decoders); i++) {
+ if (delta->nb_of_decoders >= DELTA_MAX_DECODERS) {
+ dev_dbg(delta->dev,
+ "%s failed to register %s decoder (%d maximum reached)\n",
+ DELTA_PREFIX, delta_decoders[i]->name,
+ DELTA_MAX_DECODERS);
+ return;
+ }
+
+ delta->decoders[delta->nb_of_decoders++] = delta_decoders[i];
+ dev_info(delta->dev, "%s %s decoder registered\n",
+ DELTA_PREFIX, delta_decoders[i]->name);
+ }
+}
+
+static int delta_open_decoder(struct delta_ctx *ctx, u32 streamformat,
+ u32 pixelformat, const struct delta_dec **pdec)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec;
+ int ret;
+
+ dec = delta_find_decoder(ctx, streamformat, ctx->frameinfo.pixelformat);
+ if (!dec) {
+ dev_err(delta->dev, "%s no decoder found matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&streamformat, (char *)&pixelformat);
+ return -EINVAL;
+ }
+
+ dev_dbg(delta->dev, "%s one decoder matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&streamformat, (char *)&pixelformat);
+
+ /* update instance name */
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:%4.4s]",
+ delta->instance_id, (char *)&streamformat);
+
+ /* open decoder instance */
+ ret = call_dec_op(dec, open, ctx);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to open decoder instance (%d)\n",
+ ctx->name, ret);
+ return ret;
+ }
+
+ dev_dbg(delta->dev, "%s %s decoder opened\n", ctx->name, dec->name);
+
+ *pdec = dec;
+
+ return ret;
+}
+
+/*
+ * V4L2 ioctl operations
+ */
+
+static int delta_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+
+ strscpy(cap->driver, DELTA_NAME, sizeof(cap->driver));
+ strscpy(cap->card, delta->vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ delta->pdev->name);
+
+ return 0;
+}
+
+static int delta_enum_fmt_stream(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+
+ if (unlikely(f->index >= delta->nb_of_streamformats))
+ return -EINVAL;
+
+ f->pixelformat = delta->streamformats[f->index];
+
+ return 0;
+}
+
+static int delta_enum_fmt_frame(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+
+ if (unlikely(f->index >= delta->nb_of_pixelformats))
+ return -EINVAL;
+
+ f->pixelformat = delta->pixelformats[f->index];
+
+ return 0;
+}
+
+static int delta_g_fmt_stream(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ unsigned char str[100] = "";
+
+ if (!(ctx->flags & DELTA_FLAG_STREAMINFO))
+ dev_dbg(delta->dev,
+ "%s V4L2 GET_FMT (OUTPUT): no stream information available, default to %s\n",
+ ctx->name,
+ delta_streaminfo_str(streaminfo, str, sizeof(str)));
+
+ pix->pixelformat = streaminfo->streamformat;
+ pix->width = streaminfo->width;
+ pix->height = streaminfo->height;
+ pix->field = streaminfo->field;
+ pix->bytesperline = 0;
+ pix->sizeimage = ctx->max_au_size;
+ pix->colorspace = streaminfo->colorspace;
+ pix->xfer_func = streaminfo->xfer_func;
+ pix->ycbcr_enc = streaminfo->ycbcr_enc;
+ pix->quantization = streaminfo->quantization;
+
+ return 0;
+}
+
+static int delta_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ unsigned char str[100] = "";
+
+ if (!(ctx->flags & DELTA_FLAG_FRAMEINFO))
+ dev_dbg(delta->dev,
+ "%s V4L2 GET_FMT (CAPTURE): no frame information available, default to %s\n",
+ ctx->name,
+ delta_frameinfo_str(frameinfo, str, sizeof(str)));
+
+ pix->pixelformat = frameinfo->pixelformat;
+ pix->width = frameinfo->aligned_width;
+ pix->height = frameinfo->aligned_height;
+ pix->field = frameinfo->field;
+ pix->bytesperline = frame_stride(frameinfo->aligned_width,
+ frameinfo->pixelformat);
+ pix->sizeimage = frameinfo->size;
+
+ if (ctx->flags & DELTA_FLAG_STREAMINFO) {
+ /* align colorspace & friends on stream ones if any set */
+ frameinfo->colorspace = streaminfo->colorspace;
+ frameinfo->xfer_func = streaminfo->xfer_func;
+ frameinfo->ycbcr_enc = streaminfo->ycbcr_enc;
+ frameinfo->quantization = streaminfo->quantization;
+ }
+ pix->colorspace = frameinfo->colorspace;
+ pix->xfer_func = frameinfo->xfer_func;
+ pix->ycbcr_enc = frameinfo->ycbcr_enc;
+ pix->quantization = frameinfo->quantization;
+
+ return 0;
+}
+
+static int delta_try_fmt_stream(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 streamformat = pix->pixelformat;
+ const struct delta_dec *dec;
+ u32 width, height;
+ u32 au_size;
+
+ dec = delta_find_decoder(ctx, streamformat, ctx->frameinfo.pixelformat);
+ if (!dec) {
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (OUTPUT): unsupported format %4.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return -EINVAL;
+ }
+
+ /* adjust width & height */
+ width = pix->width;
+ height = pix->height;
+ v4l_bound_align_image
+ (&pix->width,
+ DELTA_MIN_WIDTH,
+ dec->max_width ? dec->max_width : DELTA_MAX_WIDTH,
+ 0,
+ &pix->height,
+ DELTA_MIN_HEIGHT,
+ dec->max_height ? dec->max_height : DELTA_MAX_HEIGHT,
+ 0, 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (OUTPUT): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height,
+ pix->width, pix->height);
+
+ au_size = estimated_au_size(pix->width, pix->height);
+ if (pix->sizeimage < au_size) {
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (OUTPUT): size updated %d -> %d to fit estimated size\n",
+ ctx->name, pix->sizeimage, au_size);
+ pix->sizeimage = au_size;
+ }
+
+ pix->bytesperline = 0;
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int delta_try_fmt_frame(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 pixelformat = pix->pixelformat;
+ const struct delta_dec *dec;
+ u32 width, height;
+
+ dec = delta_find_decoder(ctx, ctx->streaminfo.streamformat,
+ pixelformat);
+ if (!dec) {
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (CAPTURE): unsupported format %4.4s\n",
+ ctx->name, (char *)&pixelformat);
+ return -EINVAL;
+ }
+
+ /* adjust width & height */
+ width = pix->width;
+ height = pix->height;
+ v4l_bound_align_image(&pix->width,
+ DELTA_MIN_WIDTH, DELTA_MAX_WIDTH,
+ frame_alignment(pixelformat) - 1,
+ &pix->height,
+ DELTA_MIN_HEIGHT, DELTA_MAX_HEIGHT,
+ frame_alignment(pixelformat) - 1, 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height, pix->width, pix->height);
+
+ /* default decoder alignment constraint */
+ width = ALIGN(pix->width, DELTA_WIDTH_ALIGNMENT);
+ height = ALIGN(pix->height, DELTA_HEIGHT_ALIGNMENT);
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(delta->dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit decoder alignment\n",
+ ctx->name, width, height, pix->width, pix->height);
+
+ if (!pix->colorspace) {
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ pix->width = width;
+ pix->height = height;
+ pix->bytesperline = frame_stride(pix->width, pixelformat);
+ pix->sizeimage = frame_size(pix->width, pix->height, pixelformat);
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int delta_s_fmt_stream(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_queue *vq;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ ret = delta_try_fmt_stream(file, fh, f);
+ if (ret) {
+ dev_dbg(delta->dev,
+ "%s V4L2 S_FMT (OUTPUT): unsupported format %4.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(delta->dev, "%s V4L2 S_FMT (OUTPUT): queue busy\n",
+ ctx->name);
+ return -EBUSY;
+ }
+
+ ctx->max_au_size = pix->sizeimage;
+ ctx->streaminfo.width = pix->width;
+ ctx->streaminfo.height = pix->height;
+ ctx->streaminfo.streamformat = pix->pixelformat;
+ ctx->streaminfo.colorspace = pix->colorspace;
+ ctx->streaminfo.xfer_func = pix->xfer_func;
+ ctx->streaminfo.ycbcr_enc = pix->ycbcr_enc;
+ ctx->streaminfo.quantization = pix->quantization;
+ ctx->flags |= DELTA_FLAG_STREAMINFO;
+
+ return 0;
+}
+
+static int delta_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct delta_frameinfo frameinfo;
+ unsigned char str[100] = "";
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(delta->dev, "%s V4L2 S_FMT (CAPTURE): queue busy\n",
+ ctx->name);
+ return -EBUSY;
+ }
+
+ if (ctx->state < DELTA_STATE_READY) {
+ /*
+ * decoder not yet opened and valid stream header not found,
+ * could not negotiate format with decoder, check at least
+ * pixel format & negotiate resolution boundaries
+ * and alignment...
+ */
+ ret = delta_try_fmt_frame(file, fh, f);
+ if (ret) {
+ dev_dbg(delta->dev,
+ "%s V4L2 S_FMT (CAPTURE): unsupported format %4.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* set frame information to decoder */
+ memset(&frameinfo, 0, sizeof(frameinfo));
+ frameinfo.pixelformat = pix->pixelformat;
+ frameinfo.width = pix->width;
+ frameinfo.height = pix->height;
+ frameinfo.aligned_width = pix->width;
+ frameinfo.aligned_height = pix->height;
+ frameinfo.size = pix->sizeimage;
+ frameinfo.field = pix->field;
+ frameinfo.colorspace = pix->colorspace;
+ frameinfo.xfer_func = pix->xfer_func;
+ frameinfo.ycbcr_enc = pix->ycbcr_enc;
+ frameinfo.quantization = pix->quantization;
+ ret = call_dec_op(dec, set_frameinfo, ctx, &frameinfo);
+ if (ret)
+ return ret;
+
+ /* then get what decoder can really do */
+ ret = call_dec_op(dec, get_frameinfo, ctx, &frameinfo);
+ if (ret)
+ return ret;
+
+ ctx->flags |= DELTA_FLAG_FRAMEINFO;
+ ctx->frameinfo = frameinfo;
+ dev_dbg(delta->dev,
+ "%s V4L2 SET_FMT (CAPTURE): frameinfo updated to %s\n",
+ ctx->name,
+ delta_frameinfo_str(&frameinfo, str, sizeof(str)));
+
+ pix->pixelformat = frameinfo.pixelformat;
+ pix->width = frameinfo.aligned_width;
+ pix->height = frameinfo.aligned_height;
+ pix->bytesperline = frame_stride(pix->width, pix->pixelformat);
+ pix->sizeimage = frameinfo.size;
+ pix->field = frameinfo.field;
+ pix->colorspace = frameinfo.colorspace;
+ pix->xfer_func = frameinfo.xfer_func;
+ pix->ycbcr_enc = frameinfo.ycbcr_enc;
+ pix->quantization = frameinfo.quantization;
+
+ return 0;
+}
+
+static int delta_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct delta_ctx *ctx = to_ctx(fh);
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ struct v4l2_rect crop;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if ((ctx->flags & DELTA_FLAG_FRAMEINFO) &&
+ (frameinfo->flags & DELTA_FRAMEINFO_FLAG_CROP)) {
+ crop = frameinfo->crop;
+ } else {
+ /* default to video dimensions */
+ crop.left = 0;
+ crop.top = 0;
+ crop.width = frameinfo->width;
+ crop.height = frameinfo->height;
+ }
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ /* visible area inside video */
+ s->r = crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ /* up to aligned dimensions */
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frameinfo->aligned_width;
+ s->r.height = frameinfo->aligned_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void delta_complete_eos(struct delta_ctx *ctx,
+ struct delta_frame *frame)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct v4l2_event ev = {.type = V4L2_EVENT_EOS};
+
+ /*
+ * Send EOS to user:
+ * - by returning an empty frame flagged to V4L2_BUF_FLAG_LAST
+ * - and then send EOS event
+ */
+
+ /* empty frame */
+ frame->info.size = 0;
+
+ /* set the last buffer flag */
+ frame->flags |= V4L2_BUF_FLAG_LAST;
+
+ /* release frame to user */
+ delta_frame_done(ctx, frame, 0);
+
+ /* send EOS event */
+ v4l2_event_queue_fh(&ctx->fh, &ev);
+
+ dev_dbg(delta->dev, "%s EOS completed\n", ctx->name);
+}
+
+static int delta_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *cmd)
+{
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+
+ if (!(cmd->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) &&
+ (cmd->stop.pts != 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int delta_decoder_stop_cmd(struct delta_ctx *ctx, void *fh)
+{
+ const struct delta_dec *dec = ctx->dec;
+ struct delta_dev *delta = ctx->dev;
+ struct delta_frame *frame = NULL;
+ int ret = 0;
+
+ dev_dbg(delta->dev, "%s EOS received\n", ctx->name);
+
+ if (ctx->state != DELTA_STATE_READY)
+ return 0;
+
+ /* drain the decoder */
+ call_dec_op(dec, drain, ctx);
+
+ /* release to user drained frames */
+ while (1) {
+ frame = NULL;
+ ret = call_dec_op(dec, get_frame, ctx, &frame);
+ if (ret == -ENODATA) {
+ /* no more decoded frames */
+ break;
+ }
+ if (frame) {
+ dev_dbg(delta->dev, "%s drain frame[%d]\n",
+ ctx->name, frame->index);
+
+ /* pop timestamp and mark frame with it */
+ delta_pop_dts(ctx, &frame->dts);
+
+ /* release decoded frame to user */
+ delta_frame_done(ctx, frame, 0);
+ }
+ }
+
+ /* try to complete EOS */
+ ret = delta_get_free_frame(ctx, &frame);
+ if (ret)
+ goto delay_eos;
+
+ /* new frame available, EOS can now be completed */
+ delta_complete_eos(ctx, frame);
+
+ ctx->state = DELTA_STATE_EOS;
+
+ return 0;
+
+delay_eos:
+ /*
+ * EOS completion from driver is delayed because
+ * we don't have a free empty frame available.
+ * EOS completion is so delayed till next frame_queue() call
+ * to be sure to have a free empty frame available.
+ */
+ ctx->state = DELTA_STATE_WF_EOS;
+ dev_dbg(delta->dev, "%s EOS delayed\n", ctx->name);
+
+ return 0;
+}
+
+static int delta_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *cmd)
+{
+ struct delta_ctx *ctx = to_ctx(fh);
+ int ret = 0;
+
+ ret = delta_try_decoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ return delta_decoder_stop_cmd(ctx, fh);
+}
+
+static int delta_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* v4l2 ioctl ops */
+static const struct v4l2_ioctl_ops delta_ioctl_ops = {
+ .vidioc_querycap = delta_querycap,
+ .vidioc_enum_fmt_vid_cap = delta_enum_fmt_frame,
+ .vidioc_g_fmt_vid_cap = delta_g_fmt_frame,
+ .vidioc_try_fmt_vid_cap = delta_try_fmt_frame,
+ .vidioc_s_fmt_vid_cap = delta_s_fmt_frame,
+ .vidioc_enum_fmt_vid_out = delta_enum_fmt_stream,
+ .vidioc_g_fmt_vid_out = delta_g_fmt_stream,
+ .vidioc_try_fmt_vid_out = delta_try_fmt_stream,
+ .vidioc_s_fmt_vid_out = delta_s_fmt_stream,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = delta_g_selection,
+ .vidioc_try_decoder_cmd = delta_try_decoder_cmd,
+ .vidioc_decoder_cmd = delta_decoder_cmd,
+ .vidioc_subscribe_event = delta_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * mem-to-mem operations
+ */
+
+static void delta_run_work(struct work_struct *work)
+{
+ struct delta_ctx *ctx = container_of(work, struct delta_ctx, run_work);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+ struct delta_au *au;
+ struct delta_frame *frame = NULL;
+ int ret = 0;
+ bool discard = false;
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (!dec) {
+ dev_err(delta->dev, "%s no decoder opened yet\n", ctx->name);
+ return;
+ }
+
+ /* protect instance against reentrancy */
+ mutex_lock(&ctx->lock);
+
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf) {
+ dev_err(delta->dev, "%s no buffer to decode\n", ctx->name);
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+ au = to_au(vbuf);
+ au->size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ au->dts = vbuf->vb2_buf.timestamp;
+
+ /* dump access unit */
+ dump_au(ctx, au);
+
+ /* enable the hardware */
+ if (!dec->pm) {
+ ret = delta_get_sync(ctx);
+ if (ret)
+ goto err;
+ }
+
+ /* decode this access unit */
+ ret = call_dec_op(dec, decode, ctx, au);
+
+ /*
+ * if the (-ENODATA) value is returned, it refers to the interlaced
+ * stream case for which 2 access units are needed to get 1 frame.
+ * So, this returned value doesn't mean that the decoding fails, but
+ * indicates that the timestamp information of the access unit shall
+ * not be taken into account, and that the V4L2 buffer associated with
+ * the access unit shall be flagged with V4L2_BUF_FLAG_ERROR to inform
+ * the user of this situation
+ */
+ if (ret == -ENODATA) {
+ discard = true;
+ } else if (ret) {
+ dev_err(delta->dev, "%s decoding failed (%d)\n",
+ ctx->name, ret);
+
+ /* disable the hardware */
+ if (!dec->pm)
+ delta_put_autosuspend(ctx);
+
+ goto err;
+ }
+
+ /* disable the hardware */
+ if (!dec->pm)
+ delta_put_autosuspend(ctx);
+
+ /* push au timestamp in FIFO */
+ if (!discard)
+ delta_push_dts(ctx, au->dts);
+
+ /* get available decoded frames */
+ while (1) {
+ ret = call_dec_op(dec, get_frame, ctx, &frame);
+ if (ret == -ENODATA) {
+ /* no more decoded frames */
+ goto out;
+ }
+ if (ret) {
+ dev_err(delta->dev, "%s cannot get decoded frame (%d)\n",
+ ctx->name, ret);
+ goto out;
+ }
+ if (!frame) {
+ dev_err(delta->dev,
+ "%s NULL decoded frame\n",
+ ctx->name);
+ goto out;
+ }
+
+ /* pop timestamp and mark frame with it */
+ delta_pop_dts(ctx, &frame->dts);
+
+ /* release decoded frame to user */
+ delta_frame_done(ctx, frame, 0);
+ }
+
+out:
+ requeue_free_frames(ctx);
+ delta_au_done(ctx, au, (discard ? -ENODATA : 0));
+ mutex_unlock(&ctx->lock);
+ v4l2_m2m_job_finish(delta->m2m_dev, ctx->fh.m2m_ctx);
+ return;
+
+err:
+ requeue_free_frames(ctx);
+ delta_au_done(ctx, au, ret);
+ mutex_unlock(&ctx->lock);
+ v4l2_m2m_job_finish(delta->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void delta_device_run(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+
+ queue_work(delta->work_queue, &ctx->run_work);
+}
+
+static void delta_job_abort(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+
+ dev_dbg(delta->dev, "%s aborting job\n", ctx->name);
+
+ ctx->aborting = true;
+}
+
+static int delta_job_ready(void *priv)
+{
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+ int src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
+
+ if (!src_bufs) {
+ dev_dbg(delta->dev, "%s not ready: not enough video buffers.\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+ dev_dbg(delta->dev, "%s not ready: not enough video capture buffers.\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (ctx->aborting) {
+ dev_dbg(delta->dev, "%s job not ready: aborting\n", ctx->name);
+ return 0;
+ }
+
+ dev_dbg(delta->dev, "%s job ready\n", ctx->name);
+
+ return 1;
+}
+
+/* mem-to-mem ops */
+static const struct v4l2_m2m_ops delta_m2m_ops = {
+ .device_run = delta_device_run,
+ .job_ready = delta_job_ready,
+ .job_abort = delta_job_abort,
+};
+
+/*
+ * VB2 queue operations
+ */
+
+static int delta_vb2_au_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(vq);
+ unsigned int size = ctx->max_au_size;
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *num_planes = 1;
+ if (*num_buffers < 1)
+ *num_buffers = 1;
+ if (*num_buffers > DELTA_MAX_AUS)
+ *num_buffers = DELTA_MAX_AUS;
+
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int delta_vb2_au_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_au *au = to_au(vbuf);
+
+ if (!au->prepared) {
+ /* get memory addresses */
+ au->vaddr = vb2_plane_vaddr(&au->vbuf.vb2_buf, 0);
+ au->paddr = vb2_dma_contig_plane_dma_addr
+ (&au->vbuf.vb2_buf, 0);
+ au->prepared = true;
+ dev_dbg(delta->dev, "%s au[%d] prepared; virt=0x%p, phy=0x%pad\n",
+ ctx->name, vb->index, au->vaddr, &au->paddr);
+ }
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int delta_setup_frame(struct delta_ctx *ctx,
+ struct delta_frame *frame)
+{
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+
+ if (frame->index >= DELTA_MAX_FRAMES) {
+ dev_err(delta->dev,
+ "%s frame index=%d exceeds output frame count (%d)\n",
+ ctx->name, frame->index, DELTA_MAX_FRAMES);
+ return -EINVAL;
+ }
+
+ if (ctx->nb_of_frames >= DELTA_MAX_FRAMES) {
+ dev_err(delta->dev,
+ "%s number of frames exceeds output frame count (%d > %d)\n",
+ ctx->name, ctx->nb_of_frames, DELTA_MAX_FRAMES);
+ return -EINVAL;
+ }
+
+ if (frame->index != ctx->nb_of_frames) {
+ dev_warn(delta->dev,
+ "%s frame index discontinuity detected, expected %d, got %d\n",
+ ctx->name, ctx->nb_of_frames, frame->index);
+ }
+
+ frame->state = DELTA_FRAME_FREE;
+ ctx->frames[ctx->nb_of_frames] = frame;
+ ctx->nb_of_frames++;
+
+ /* setup frame on decoder side */
+ return call_dec_op(dec, setup_frame, ctx, frame);
+}
+
+/*
+ * default implementation of get_frameinfo decoder ops
+ * matching frame information from stream information
+ * & with default pixel format & default alignment.
+ */
+int delta_get_frameinfo_default(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo)
+{
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+
+ memset(frameinfo, 0, sizeof(*frameinfo));
+ frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+ frameinfo->width = streaminfo->width;
+ frameinfo->height = streaminfo->height;
+ frameinfo->aligned_width = ALIGN(streaminfo->width,
+ DELTA_WIDTH_ALIGNMENT);
+ frameinfo->aligned_height = ALIGN(streaminfo->height,
+ DELTA_HEIGHT_ALIGNMENT);
+ frameinfo->size = frame_size(frameinfo->aligned_width,
+ frameinfo->aligned_height,
+ frameinfo->pixelformat);
+ if (streaminfo->flags & DELTA_STREAMINFO_FLAG_CROP) {
+ frameinfo->flags |= DELTA_FRAMEINFO_FLAG_CROP;
+ frameinfo->crop = streaminfo->crop;
+ }
+ if (streaminfo->flags & DELTA_STREAMINFO_FLAG_PIXELASPECT) {
+ frameinfo->flags |= DELTA_FRAMEINFO_FLAG_PIXELASPECT;
+ frameinfo->pixelaspect = streaminfo->pixelaspect;
+ }
+ frameinfo->field = streaminfo->field;
+
+ return 0;
+}
+
+/*
+ * default implementation of recycle decoder ops
+ * consisting to relax the "decoded" frame state
+ */
+int delta_recycle_default(struct delta_ctx *pctx,
+ struct delta_frame *frame)
+{
+ frame->state &= ~DELTA_FRAME_DEC;
+
+ return 0;
+}
+
+static void dump_frames_status(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+ unsigned int i;
+ struct delta_frame *frame;
+ unsigned char str[100] = "";
+
+ dev_info(delta->dev,
+ "%s dumping frames status...\n", ctx->name);
+
+ for (i = 0; i < ctx->nb_of_frames; i++) {
+ frame = ctx->frames[i];
+ dev_info(delta->dev,
+ "%s frame[%d] %s\n",
+ ctx->name, frame->index,
+ frame_state_str(frame->state,
+ str, sizeof(str)));
+ }
+}
+
+int delta_get_free_frame(struct delta_ctx *ctx,
+ struct delta_frame **pframe)
+{
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf;
+ struct delta_frame *frame;
+
+ *pframe = NULL;
+
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf) {
+ dev_err(delta->dev, "%s no frame available",
+ ctx->name);
+ return -EIO;
+ }
+
+ frame = to_frame(vbuf);
+ frame->state &= ~DELTA_FRAME_M2M;
+ if (frame->state != DELTA_FRAME_FREE) {
+ dev_err(delta->dev,
+ "%s frame[%d] is not free\n",
+ ctx->name, frame->index);
+ dump_frames_status(ctx);
+ return -ENODATA;
+ }
+
+ dev_dbg(delta->dev,
+ "%s get free frame[%d]\n", ctx->name, frame->index);
+
+ *pframe = frame;
+ return 0;
+}
+
+int delta_get_sync(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+ int ret = 0;
+
+ /* enable the hardware */
+ ret = pm_runtime_resume_and_get(delta->dev);
+ if (ret < 0) {
+ dev_err(delta->dev, "%s pm_runtime_resume_and_get failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void delta_put_autosuspend(struct delta_ctx *ctx)
+{
+ struct delta_dev *delta = ctx->dev;
+
+ pm_runtime_put_autosuspend(delta->dev);
+}
+
+static void delta_vb2_au_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int delta_vb2_au_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+ struct delta_au *au;
+ int ret = 0;
+ struct vb2_v4l2_buffer *vbuf = NULL;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ unsigned char str1[100] = "";
+ unsigned char str2[100] = "";
+
+ if ((ctx->state != DELTA_STATE_WF_FORMAT) &&
+ (ctx->state != DELTA_STATE_WF_STREAMINFO))
+ return 0;
+
+ if (ctx->state == DELTA_STATE_WF_FORMAT) {
+ /* open decoder if not yet done */
+ ret = delta_open_decoder(ctx,
+ ctx->streaminfo.streamformat,
+ ctx->frameinfo.pixelformat, &dec);
+ if (ret)
+ goto err;
+ ctx->dec = dec;
+ ctx->state = DELTA_STATE_WF_STREAMINFO;
+ }
+
+ /*
+ * first buffer should contain stream header,
+ * decode it to get the infos related to stream
+ * such as width, height, dpb, ...
+ */
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf) {
+ dev_err(delta->dev, "%s failed to start streaming, no stream header buffer enqueued\n",
+ ctx->name);
+ ret = -EINVAL;
+ goto err;
+ }
+ au = to_au(vbuf);
+ au->size = vb2_get_plane_payload(&vbuf->vb2_buf, 0);
+ au->dts = vbuf->vb2_buf.timestamp;
+
+ delta_push_dts(ctx, au->dts);
+
+ /* dump access unit */
+ dump_au(ctx, au);
+
+ /* decode this access unit */
+ ret = call_dec_op(dec, decode, ctx, au);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to start streaming, header decoding failed (%d)\n",
+ ctx->name, ret);
+ goto err;
+ }
+
+ ret = call_dec_op(dec, get_streaminfo, ctx, streaminfo);
+ if (ret) {
+ dev_dbg_ratelimited(delta->dev,
+ "%s failed to start streaming, valid stream header not yet decoded\n",
+ ctx->name);
+ goto err;
+ }
+ ctx->flags |= DELTA_FLAG_STREAMINFO;
+
+ ret = call_dec_op(dec, get_frameinfo, ctx, frameinfo);
+ if (ret)
+ goto err;
+ ctx->flags |= DELTA_FLAG_FRAMEINFO;
+
+ ctx->state = DELTA_STATE_READY;
+
+ dev_dbg(delta->dev, "%s %s => %s\n", ctx->name,
+ delta_streaminfo_str(streaminfo, str1, sizeof(str1)),
+ delta_frameinfo_str(frameinfo, str2, sizeof(str2)));
+
+ delta_au_done(ctx, au, ret);
+ return 0;
+
+err:
+ /*
+ * return all buffers to vb2 in QUEUED state.
+ * This will give ownership back to userspace
+ */
+ if (vbuf)
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void delta_vb2_au_stop_streaming(struct vb2_queue *q)
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ delta_flush_dts(ctx);
+
+ /* return all buffers to vb2 in ERROR state */
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+
+ ctx->au_num = 0;
+
+ ctx->aborting = false;
+}
+
+static int delta_vb2_frame_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(vq);
+ struct delta_dev *delta = ctx->dev;
+ struct delta_streaminfo *streaminfo = &ctx->streaminfo;
+ struct delta_frameinfo *frameinfo = &ctx->frameinfo;
+ unsigned int size = frameinfo->size;
+
+ /*
+ * the number of output buffers needed for decoding =
+ * user need (*num_buffers given, usually for display pipeline) +
+ * stream need (streaminfo->dpb) +
+ * decoding peak smoothing (depends on DELTA IP perf)
+ */
+ if (*num_buffers < DELTA_MIN_FRAME_USER) {
+ dev_dbg(delta->dev,
+ "%s num_buffers too low (%d), increasing to %d\n",
+ ctx->name, *num_buffers, DELTA_MIN_FRAME_USER);
+ *num_buffers = DELTA_MIN_FRAME_USER;
+ }
+
+ *num_buffers += streaminfo->dpb + DELTA_PEAK_FRAME_SMOOTHING;
+
+ if (*num_buffers > DELTA_MAX_FRAMES) {
+ dev_dbg(delta->dev,
+ "%s output frame count too high (%d), cut to %d\n",
+ ctx->name, *num_buffers, DELTA_MAX_FRAMES);
+ *num_buffers = DELTA_MAX_FRAMES;
+ }
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ /* single plane for Y and CbCr */
+ *num_planes = 1;
+
+ sizes[0] = size;
+
+ ctx->nb_of_frames = 0;
+
+ return 0;
+}
+
+static int delta_vb2_frame_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct delta_dev *delta = ctx->dev;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_frame *frame = to_frame(vbuf);
+ int ret = 0;
+
+ if (!frame->prepared) {
+ frame->index = vbuf->vb2_buf.index;
+ frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ frame->paddr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ frame->info = ctx->frameinfo;
+
+ ret = delta_setup_frame(ctx, frame);
+ if (ret) {
+ dev_err(delta->dev,
+ "%s setup_frame() failed (%d)\n",
+ ctx->name, ret);
+ return ret;
+ }
+ frame->prepared = true;
+ dev_dbg(delta->dev,
+ "%s frame[%d] prepared; virt=0x%p, phy=0x%pad\n",
+ ctx->name, vb->index, frame->vaddr,
+ &frame->paddr);
+ }
+
+ frame->flags = vbuf->flags;
+
+ return 0;
+}
+
+static void delta_vb2_frame_finish(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_frame *frame = to_frame(vbuf);
+
+ /* update V4L2 fields for user */
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, frame->info.size);
+ vb->timestamp = frame->dts;
+ vbuf->field = frame->field;
+ vbuf->flags = frame->flags;
+}
+
+static void delta_vb2_frame_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct delta_frame *frame = to_frame(vbuf);
+
+ if (ctx->state == DELTA_STATE_WF_EOS) {
+ /* new frame available, EOS can now be completed */
+ delta_complete_eos(ctx, frame);
+
+ ctx->state = DELTA_STATE_EOS;
+
+ /* return, no need to recycle this buffer to decoder */
+ return;
+ }
+
+ /* recycle this frame */
+ delta_recycle(ctx, frame);
+}
+
+static void delta_vb2_frame_stop_streaming(struct vb2_queue *q)
+{
+ struct delta_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+ struct delta_frame *frame;
+ const struct delta_dec *dec = ctx->dec;
+ unsigned int i;
+
+ delta_flush_dts(ctx);
+
+ call_dec_op(dec, flush, ctx);
+
+ /*
+ * return all buffers to vb2 in ERROR state
+ * & reset each frame state to OUT
+ */
+ for (i = 0; i < ctx->nb_of_frames; i++) {
+ frame = ctx->frames[i];
+ if (!(frame->state & DELTA_FRAME_OUT)) {
+ vbuf = &frame->vbuf;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+ frame->state = DELTA_FRAME_OUT;
+ }
+
+ ctx->frame_num = 0;
+
+ ctx->aborting = false;
+}
+
+/* VB2 queue ops */
+static const struct vb2_ops delta_vb2_au_ops = {
+ .queue_setup = delta_vb2_au_queue_setup,
+ .buf_prepare = delta_vb2_au_prepare,
+ .buf_queue = delta_vb2_au_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = delta_vb2_au_start_streaming,
+ .stop_streaming = delta_vb2_au_stop_streaming,
+};
+
+static const struct vb2_ops delta_vb2_frame_ops = {
+ .queue_setup = delta_vb2_frame_queue_setup,
+ .buf_prepare = delta_vb2_frame_prepare,
+ .buf_finish = delta_vb2_frame_finish,
+ .buf_queue = delta_vb2_frame_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .stop_streaming = delta_vb2_frame_stop_streaming,
+};
+
+/*
+ * V4L2 file operations
+ */
+
+static int queue_init(void *priv,
+ struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct vb2_queue *q;
+ struct delta_ctx *ctx = priv;
+ struct delta_dev *delta = ctx->dev;
+ int ret;
+
+ /* setup vb2 queue for stream input */
+ q = src_vq;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = ctx;
+ /* overload vb2 buf with private au struct */
+ q->buf_struct_size = sizeof(struct delta_au);
+ q->ops = &delta_vb2_au_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q->lock = &delta->lock;
+ q->dev = delta->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+
+ /* setup vb2 queue for frame output */
+ q = dst_vq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = ctx;
+ /* overload vb2 buf with private frame struct */
+ q->buf_struct_size = sizeof(struct delta_frame)
+ + DELTA_MAX_FRAME_PRIV_SIZE;
+ q->ops = &delta_vb2_frame_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ q->lock = &delta->lock;
+ q->dev = delta->dev;
+
+ return vb2_queue_init(q);
+}
+
+static int delta_open(struct file *file)
+{
+ struct delta_dev *delta = video_drvdata(file);
+ struct delta_ctx *ctx = NULL;
+ int ret = 0;
+
+ mutex_lock(&delta->lock);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ctx->dev = delta;
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ INIT_WORK(&ctx->run_work, delta_run_work);
+ mutex_init(&ctx->lock);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(delta->m2m_dev, ctx,
+ queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ dev_err(delta->dev, "%s failed to initialize m2m context (%d)\n",
+ DELTA_PREFIX, ret);
+ goto err_fh_del;
+ }
+
+ /*
+ * wait stream format to determine which
+ * decoder to open
+ */
+ ctx->state = DELTA_STATE_WF_FORMAT;
+
+ INIT_LIST_HEAD(&ctx->dts);
+
+ /* set the instance name */
+ delta->instance_id++;
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:----]",
+ delta->instance_id);
+
+ /* default parameters for frame and stream */
+ set_default_params(ctx);
+
+ /* enable ST231 clocks */
+ if (clk_prepare_enable(delta->clk_st231))
+ dev_warn(delta->dev, "failed to enable st231 clk\n");
+
+ /* enable FLASH_PROMIP clock */
+ if (clk_prepare_enable(delta->clk_flash_promip))
+ dev_warn(delta->dev, "failed to enable delta promip clk\n");
+
+ mutex_unlock(&delta->lock);
+
+ dev_dbg(delta->dev, "%s decoder instance created\n", ctx->name);
+
+ return 0;
+
+err_fh_del:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+err:
+ mutex_unlock(&delta->lock);
+
+ return ret;
+}
+
+static int delta_release(struct file *file)
+{
+ struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_dev *delta = ctx->dev;
+ const struct delta_dec *dec = ctx->dec;
+
+ mutex_lock(&delta->lock);
+
+ /* close decoder */
+ call_dec_op(dec, close, ctx);
+
+ /*
+ * trace a summary of instance
+ * before closing (debug purpose)
+ */
+ delta_trace_summary(ctx);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ /* disable ST231 clocks */
+ clk_disable_unprepare(delta->clk_st231);
+
+ /* disable FLASH_PROMIP clock */
+ clk_disable_unprepare(delta->clk_flash_promip);
+
+ dev_dbg(delta->dev, "%s decoder instance released\n", ctx->name);
+
+ kfree(ctx);
+
+ mutex_unlock(&delta->lock);
+ return 0;
+}
+
+/* V4L2 file ops */
+static const struct v4l2_file_operations delta_fops = {
+ .owner = THIS_MODULE,
+ .open = delta_open,
+ .release = delta_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+ .poll = v4l2_m2m_fop_poll,
+};
+
+/*
+ * Platform device operations
+ */
+
+static int delta_register_device(struct delta_dev *delta)
+{
+ int ret;
+ struct video_device *vdev;
+
+ if (!delta)
+ return -ENODEV;
+
+ delta->m2m_dev = v4l2_m2m_init(&delta_m2m_ops);
+ if (IS_ERR(delta->m2m_dev)) {
+ dev_err(delta->dev, "%s failed to initialize v4l2-m2m device\n",
+ DELTA_PREFIX);
+ ret = PTR_ERR(delta->m2m_dev);
+ goto err;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ dev_err(delta->dev, "%s failed to allocate video device\n",
+ DELTA_PREFIX);
+ ret = -ENOMEM;
+ goto err_m2m_release;
+ }
+
+ vdev->fops = &delta_fops;
+ vdev->ioctl_ops = &delta_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->lock = &delta->lock;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ vdev->v4l2_dev = &delta->v4l2_dev;
+ snprintf(vdev->name, sizeof(vdev->name), "%s-%s",
+ DELTA_NAME, DELTA_FW_VERSION);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to register video device\n",
+ DELTA_PREFIX);
+ goto err_vdev_release;
+ }
+
+ delta->vdev = vdev;
+ video_set_drvdata(vdev, delta);
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+err_m2m_release:
+ v4l2_m2m_release(delta->m2m_dev);
+err:
+ return ret;
+}
+
+static void delta_unregister_device(struct delta_dev *delta)
+{
+ if (!delta)
+ return;
+
+ if (delta->m2m_dev)
+ v4l2_m2m_release(delta->m2m_dev);
+
+ video_unregister_device(delta->vdev);
+}
+
+static int delta_probe(struct platform_device *pdev)
+{
+ struct delta_dev *delta;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ delta = devm_kzalloc(dev, sizeof(*delta), GFP_KERNEL);
+ if (!delta) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ delta->dev = dev;
+ delta->pdev = pdev;
+ platform_set_drvdata(pdev, delta);
+
+ mutex_init(&delta->lock);
+
+ /* get clock resources */
+ delta->clk_delta = devm_clk_get(dev, "delta");
+ if (IS_ERR(delta->clk_delta)) {
+ dev_dbg(dev, "%s can't get delta clock\n", DELTA_PREFIX);
+ delta->clk_delta = NULL;
+ }
+
+ delta->clk_st231 = devm_clk_get(dev, "delta-st231");
+ if (IS_ERR(delta->clk_st231)) {
+ dev_dbg(dev, "%s can't get delta-st231 clock\n", DELTA_PREFIX);
+ delta->clk_st231 = NULL;
+ }
+
+ delta->clk_flash_promip = devm_clk_get(dev, "delta-flash-promip");
+ if (IS_ERR(delta->clk_flash_promip)) {
+ dev_dbg(dev, "%s can't get delta-flash-promip clock\n",
+ DELTA_PREFIX);
+ delta->clk_flash_promip = NULL;
+ }
+
+ /* init pm_runtime used for power management */
+ pm_runtime_set_autosuspend_delay(dev, DELTA_HW_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ /* init firmware ipc channel */
+ ret = delta_ipc_init(delta);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to initialize firmware ipc channel\n",
+ DELTA_PREFIX);
+ goto err_pm_disable;
+ }
+
+ /* register all available decoders */
+ register_decoders(delta);
+
+ /* register all supported formats */
+ register_formats(delta);
+
+ /* register on V4L2 */
+ ret = v4l2_device_register(dev, &delta->v4l2_dev);
+ if (ret) {
+ dev_err(delta->dev, "%s failed to register V4L2 device\n",
+ DELTA_PREFIX);
+ goto err_pm_disable;
+ }
+
+ delta->work_queue = create_workqueue(DELTA_NAME);
+ if (!delta->work_queue) {
+ dev_err(delta->dev, "%s failed to allocate work queue\n",
+ DELTA_PREFIX);
+ ret = -ENOMEM;
+ goto err_v4l2;
+ }
+
+ /* register device */
+ ret = delta_register_device(delta);
+ if (ret)
+ goto err_work_queue;
+
+ dev_info(dev, "%s %s registered as /dev/video%d\n",
+ DELTA_PREFIX, delta->vdev->name, delta->vdev->num);
+
+ return 0;
+
+err_work_queue:
+ destroy_workqueue(delta->work_queue);
+err_v4l2:
+ v4l2_device_unregister(&delta->v4l2_dev);
+err_pm_disable:
+ pm_runtime_disable(dev);
+err:
+ return ret;
+}
+
+static int delta_remove(struct platform_device *pdev)
+{
+ struct delta_dev *delta = platform_get_drvdata(pdev);
+
+ delta_ipc_exit(delta);
+
+ delta_unregister_device(delta);
+
+ destroy_workqueue(delta->work_queue);
+
+ pm_runtime_put_autosuspend(delta->dev);
+ pm_runtime_disable(delta->dev);
+
+ v4l2_device_unregister(&delta->v4l2_dev);
+
+ return 0;
+}
+
+static int delta_runtime_suspend(struct device *dev)
+{
+ struct delta_dev *delta = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(delta->clk_delta);
+
+ return 0;
+}
+
+static int delta_runtime_resume(struct device *dev)
+{
+ struct delta_dev *delta = dev_get_drvdata(dev);
+
+ if (clk_prepare_enable(delta->clk_delta))
+ dev_warn(dev, "failed to prepare/enable delta clk\n");
+
+ return 0;
+}
+
+/* PM ops */
+static const struct dev_pm_ops delta_pm_ops = {
+ .runtime_suspend = delta_runtime_suspend,
+ .runtime_resume = delta_runtime_resume,
+};
+
+static const struct of_device_id delta_match_types[] = {
+ {
+ .compatible = "st,st-delta",
+ },
+ {
+ /* end node */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, delta_match_types);
+
+static struct platform_driver delta_driver = {
+ .probe = delta_probe,
+ .remove = delta_remove,
+ .driver = {
+ .name = DELTA_NAME,
+ .of_match_table = delta_match_types,
+ .pm = &delta_pm_ops},
+};
+
+module_platform_driver(delta_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DELTA video decoder V4L2 driver");
diff --git a/drivers/media/platform/st/sti/delta/delta.h b/drivers/media/platform/st/sti/delta/delta.h
new file mode 100644
index 000000000..914556030
--- /dev/null
+++ b/drivers/media/platform/st/sti/delta/delta.h
@@ -0,0 +1,566 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Author: Hugues Fruchet <hugues.fruchet@st.com> for STMicroelectronics.
+ */
+
+#ifndef DELTA_H
+#define DELTA_H
+
+#include <linux/rpmsg.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "delta-cfg.h"
+
+/*
+ * enum delta_state - state of decoding instance
+ *
+ *@DELTA_STATE_WF_FORMAT:
+ * Wait for compressed format to be set by V4L2 client in order
+ * to know what is the relevant decoder to open.
+ *
+ *@DELTA_STATE_WF_STREAMINFO:
+ * Wait for stream information to be available (bitstream
+ * header parsing is done).
+ *
+ *@DELTA_STATE_READY:
+ * Decoding instance is ready to decode compressed access unit.
+ *
+ *@DELTA_STATE_WF_EOS:
+ * Decoding instance is waiting for EOS (End Of Stream) completion.
+ *
+ *@DELTA_STATE_EOS:
+ * EOS (End Of Stream) is completed (signaled to user). Decoding instance
+ * should then be closed.
+ */
+enum delta_state {
+ DELTA_STATE_WF_FORMAT,
+ DELTA_STATE_WF_STREAMINFO,
+ DELTA_STATE_READY,
+ DELTA_STATE_WF_EOS,
+ DELTA_STATE_EOS
+};
+
+/*
+ * struct delta_streaminfo - information about stream to decode
+ *
+ * @flags: validity of fields (crop, pixelaspect, other)
+ * @width: width of video stream
+ * @height: height ""
+ * @streamformat: fourcc compressed format of video (MJPEG, MPEG2, ...)
+ * @dpb: number of frames needed to decode a single frame
+ * (h264 dpb, up to 16)
+ * @crop: cropping window inside decoded frame (1920x1080@0,0
+ * inside 1920x1088 frame for ex.)
+ * @pixelaspect: pixel aspect ratio of video (4/3, 5/4)
+ * @field: interlaced or not
+ * @profile: profile string
+ * @level: level string
+ * @other: other string information from codec
+ * @colorspace: colorspace identifier
+ * @xfer_func: transfer function identifier
+ * @ycbcr_enc: Y'CbCr encoding identifier
+ * @quantization: quantization identifier
+ */
+struct delta_streaminfo {
+ u32 flags;
+ u32 streamformat;
+ u32 width;
+ u32 height;
+ u32 dpb;
+ struct v4l2_rect crop;
+ struct v4l2_fract pixelaspect;
+ enum v4l2_field field;
+ u8 profile[32];
+ u8 level[32];
+ u8 other[32];
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+};
+
+#define DELTA_STREAMINFO_FLAG_CROP 0x0001
+#define DELTA_STREAMINFO_FLAG_PIXELASPECT 0x0002
+#define DELTA_STREAMINFO_FLAG_OTHER 0x0004
+
+/*
+ * struct delta_au - access unit structure.
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @prepared: if set vaddr/paddr are resolved
+ * @vaddr: virtual address (kernel can read/write)
+ * @paddr: physical address (for hardware)
+ * @flags: access unit type (V4L2_BUF_FLAG_KEYFRAME/PFRAME/BFRAME)
+ * @dts: decoding timestamp of this access unit
+ */
+struct delta_au {
+ struct vb2_v4l2_buffer vbuf; /* keep first */
+ struct list_head list; /* keep second */
+
+ bool prepared;
+ u32 size;
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 flags;
+ u64 dts;
+};
+
+/*
+ * struct delta_frameinfo - information about decoded frame
+ *
+ * @flags: validity of fields (crop, pixelaspect)
+ * @pixelformat: fourcc code for uncompressed video format
+ * @width: width of frame
+ * @height: height of frame
+ * @aligned_width: width of frame (with encoder or decoder alignment
+ * constraint)
+ * @aligned_height: height of frame (with encoder or decoder alignment
+ * constraint)
+ * @size: maximum size in bytes required for data
+ * @crop: cropping window inside frame (1920x1080@0,0
+ * inside 1920x1088 frame for ex.)
+ * @pixelaspect: pixel aspect ratio of video (4/3, 5/4)
+ * @field: interlaced mode
+ * @colorspace: colorspace identifier
+ * @xfer_func: transfer function identifier
+ * @ycbcr_enc: Y'CbCr encoding identifier
+ * @quantization: quantization identifier
+ */
+struct delta_frameinfo {
+ u32 flags;
+ u32 pixelformat;
+ u32 width;
+ u32 height;
+ u32 aligned_width;
+ u32 aligned_height;
+ u32 size;
+ struct v4l2_rect crop;
+ struct v4l2_fract pixelaspect;
+ enum v4l2_field field;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+};
+
+#define DELTA_FRAMEINFO_FLAG_CROP 0x0001
+#define DELTA_FRAMEINFO_FLAG_PIXELASPECT 0x0002
+
+/*
+ * struct delta_frame - frame structure.
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @info: frame information (width, height, format, alignment...)
+ * @prepared: if set pix/vaddr/paddr are resolved
+ * @index: frame index, aligned on V4L2 wow
+ * @vaddr: virtual address (kernel can read/write)
+ * @paddr: physical address (for hardware)
+ * @state: frame state for frame lifecycle tracking
+ * (DELTA_FRAME_FREE/DEC/OUT/REC/...)
+ * @flags: frame type (V4L2_BUF_FLAG_KEYFRAME/PFRAME/BFRAME)
+ * @dts: decoding timestamp of this frame
+ * @field: field order for interlaced frame
+ */
+struct delta_frame {
+ struct vb2_v4l2_buffer vbuf; /* keep first */
+ struct list_head list; /* keep second */
+
+ struct delta_frameinfo info;
+ bool prepared;
+ u32 index;
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 state;
+ u32 flags;
+ u64 dts;
+ enum v4l2_field field;
+};
+
+/* frame state for frame lifecycle tracking */
+#define DELTA_FRAME_FREE 0x00 /* is free and can be used for decoding */
+#define DELTA_FRAME_REF 0x01 /* is a reference frame */
+#define DELTA_FRAME_BSY 0x02 /* is owned by decoder and busy */
+#define DELTA_FRAME_DEC 0x04 /* contains decoded content */
+#define DELTA_FRAME_OUT 0x08 /* has been given to user */
+#define DELTA_FRAME_RDY 0x10 /* is ready but still held by decoder */
+#define DELTA_FRAME_M2M 0x20 /* is owned by mem2mem framework */
+
+/*
+ * struct delta_dts - decoding timestamp.
+ *
+ * @list: list to chain timestamps
+ * @val: timestamp in microseconds
+ */
+struct delta_dts {
+ struct list_head list;
+ u64 val;
+};
+
+struct delta_buf {
+ u32 size;
+ void *vaddr;
+ dma_addr_t paddr;
+ const char *name;
+ unsigned long attrs;
+};
+
+struct delta_ipc_ctx {
+ int cb_err;
+ u32 copro_hdl;
+ struct completion done;
+ struct delta_buf ipc_buf_struct;
+ struct delta_buf *ipc_buf;
+};
+
+struct delta_ipc_param {
+ u32 size;
+ void *data;
+};
+
+struct delta_ctx;
+
+/*
+ * struct delta_dec - decoder structure.
+ *
+ * @name: name of this decoder
+ * @streamformat: input stream format that this decoder support
+ * @pixelformat: pixel format of decoded frame that this decoder support
+ * @max_width: (optional) maximum width that can decode this decoder
+ * if not set, maximum width is DELTA_MAX_WIDTH
+ * @max_height: (optional) maximum height that can decode this decoder
+ * if not set, maximum height is DELTA_MAX_HEIGHT
+ * @pm: (optional) if set, decoder will manage power on its own
+ * @open: open this decoder
+ * @close: close this decoder
+ * @setup_frame: setup frame to be used by decoder, see below
+ * @get_streaminfo: get stream related infos, see below
+ * @get_frameinfo: get decoded frame related infos, see below
+ * @set_frameinfo: (optional) set decoded frame related infos, see below
+ * @setup_frame: setup frame to be used by decoder, see below
+ * @decode: decode a single access unit, see below
+ * @get_frame: get the next decoded frame available, see below
+ * @recycle: recycle the given frame, see below
+ * @flush: (optional) flush decoder, see below
+ * @drain: (optional) drain decoder, see below
+ */
+struct delta_dec {
+ const char *name;
+ u32 streamformat;
+ u32 pixelformat;
+ u32 max_width;
+ u32 max_height;
+ bool pm;
+
+ /*
+ * decoder ops
+ */
+ int (*open)(struct delta_ctx *ctx);
+ int (*close)(struct delta_ctx *ctx);
+
+ /*
+ * setup_frame() - setup frame to be used by decoder
+ * @ctx: (in) instance
+ * @frame: (in) frame to use
+ * @frame.index (in) identifier of frame
+ * @frame.vaddr (in) virtual address (kernel can read/write)
+ * @frame.paddr (in) physical address (for hardware)
+ *
+ * Frame is to be allocated by caller, then given
+ * to decoder through this call.
+ * Several frames must be given to decoder (dpb),
+ * each frame is identified using its index.
+ */
+ int (*setup_frame)(struct delta_ctx *ctx, struct delta_frame *frame);
+
+ /*
+ * get_streaminfo() - get stream related infos
+ * @ctx: (in) instance
+ * @streaminfo: (out) width, height, dpb,...
+ *
+ * Precondition: stream header must have been successfully
+ * parsed to have this call successful & @streaminfo valid.
+ * Header parsing must be done using decode(), giving
+ * explicitly header access unit or first access unit of bitstream.
+ * If no valid header is found, get_streaminfo will return -ENODATA,
+ * in this case the next bitstream access unit must be decoded till
+ * get_streaminfo becomes successful.
+ */
+ int (*get_streaminfo)(struct delta_ctx *ctx,
+ struct delta_streaminfo *streaminfo);
+
+ /*
+ * get_frameinfo() - get decoded frame related infos
+ * @ctx: (in) instance
+ * @frameinfo: (out) width, height, alignment, crop, ...
+ *
+ * Precondition: get_streaminfo() must be successful
+ */
+ int (*get_frameinfo)(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo);
+
+ /*
+ * set_frameinfo() - set decoded frame related infos
+ * @ctx: (in) instance
+ * @frameinfo: (out) width, height, alignment, crop, ...
+ *
+ * Optional.
+ * Typically used to negotiate with decoder the output
+ * frame if decoder can do post-processing.
+ */
+ int (*set_frameinfo)(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo);
+
+ /*
+ * decode() - decode a single access unit
+ * @ctx: (in) instance
+ * @au: (in/out) access unit
+ * @au.size (in) size of au to decode
+ * @au.vaddr (in) virtual address (kernel can read/write)
+ * @au.paddr (in) physical address (for hardware)
+ * @au.flags (out) au type (V4L2_BUF_FLAG_KEYFRAME/
+ * PFRAME/BFRAME)
+ *
+ * Decode the access unit given. Decode is synchronous;
+ * access unit memory is no more needed after this call.
+ * After this call, none, one or several frames could
+ * have been decoded, which can be retrieved using
+ * get_frame().
+ */
+ int (*decode)(struct delta_ctx *ctx, struct delta_au *au);
+
+ /*
+ * get_frame() - get the next decoded frame available
+ * @ctx: (in) instance
+ * @frame: (out) frame with decoded data:
+ * @frame.index (out) identifier of frame
+ * @frame.field (out) field order for interlaced frame
+ * @frame.state (out) frame state for frame lifecycle tracking
+ * @frame.flags (out) frame type (V4L2_BUF_FLAG_KEYFRAME/
+ * PFRAME/BFRAME)
+ *
+ * Get the next available decoded frame.
+ * If no frame is available, -ENODATA is returned.
+ * If a frame is available, frame structure is filled with
+ * relevant data, frame.index identifying this exact frame.
+ * When this frame is no more needed by upper layers,
+ * recycle() must be called giving this frame identifier.
+ */
+ int (*get_frame)(struct delta_ctx *ctx, struct delta_frame **frame);
+
+ /*
+ * recycle() - recycle the given frame
+ * @ctx: (in) instance
+ * @frame: (in) frame to recycle:
+ * @frame.index (in) identifier of frame
+ *
+ * recycle() is to be called by user when the decoded frame
+ * is no more needed (composition/display done).
+ * This frame will then be reused by decoder to proceed
+ * with next frame decoding.
+ * If not enough frames have been provided through setup_frame(),
+ * or recycle() is not called fast enough, the decoder can run out
+ * of available frames to proceed with decoding (starvation).
+ * This case is guarded by wq_recycle wait queue which ensures that
+ * decoder is called only if at least one frame is available.
+ */
+ int (*recycle)(struct delta_ctx *ctx, struct delta_frame *frame);
+
+ /*
+ * flush() - flush decoder
+ * @ctx: (in) instance
+ *
+ * Optional.
+ * Reset decoder context and discard all internal buffers.
+ * This allows implementation of seek, which leads to discontinuity
+ * of input bitstream that decoder must know to restart its internal
+ * decoding logic.
+ */
+ int (*flush)(struct delta_ctx *ctx);
+
+ /*
+ * drain() - drain decoder
+ * @ctx: (in) instance
+ *
+ * Optional.
+ * Mark decoder pending frames (decoded but not yet output) as ready
+ * so that they can be output to client at EOS (End Of Stream).
+ * get_frame() is to be called in a loop right after drain() to
+ * get all those pending frames.
+ */
+ int (*drain)(struct delta_ctx *ctx);
+};
+
+struct delta_dev;
+
+/*
+ * struct delta_ctx - instance structure.
+ *
+ * @flags: validity of fields (streaminfo)
+ * @fh: V4L2 file handle
+ * @dev: device context
+ * @dec: selected decoder context for this instance
+ * @ipc_ctx: context of IPC communication with firmware
+ * @state: instance state
+ * @frame_num: frame number
+ * @au_num: access unit number
+ * @max_au_size: max size of an access unit
+ * @streaminfo: stream information (width, height, dpb, interlacing...)
+ * @frameinfo: frame information (width, height, format, alignment...)
+ * @nb_of_frames: number of frames available for decoding
+ * @frames: array of decoding frames to keep track of frame
+ * state and manage frame recycling
+ * @decoded_frames: nb of decoded frames from opening
+ * @output_frames: nb of output frames from opening
+ * @dropped_frames: nb of frames dropped (ie access unit not parsed
+ * or frame decoded but not output)
+ * @stream_errors: nb of stream errors (corrupted, not supported, ...)
+ * @decode_errors: nb of decode errors (firmware error)
+ * @sys_errors: nb of system errors (memory, ipc, ...)
+ * @dts: FIFO of decoding timestamp.
+ * output frames are timestamped with incoming access
+ * unit timestamps using this fifo.
+ * @name: string naming this instance (debug purpose)
+ * @run_work: decoding work
+ * @lock: lock for decoding work serialization
+ * @aborting: true if current job aborted
+ * @priv: private decoder context for this instance, allocated
+ * by decoder @open time.
+ */
+struct delta_ctx {
+ u32 flags;
+ struct v4l2_fh fh;
+ struct delta_dev *dev;
+ const struct delta_dec *dec;
+ struct delta_ipc_ctx ipc_ctx;
+
+ enum delta_state state;
+ u32 frame_num;
+ u32 au_num;
+ size_t max_au_size;
+ struct delta_streaminfo streaminfo;
+ struct delta_frameinfo frameinfo;
+ u32 nb_of_frames;
+ struct delta_frame *frames[DELTA_MAX_FRAMES];
+ u32 decoded_frames;
+ u32 output_frames;
+ u32 dropped_frames;
+ u32 stream_errors;
+ u32 decode_errors;
+ u32 sys_errors;
+ struct list_head dts;
+ char name[100];
+ struct work_struct run_work;
+ struct mutex lock;
+ bool aborting;
+ void *priv;
+};
+
+#define DELTA_FLAG_STREAMINFO 0x0001
+#define DELTA_FLAG_FRAMEINFO 0x0002
+
+#define DELTA_MAX_FORMATS DELTA_MAX_DECODERS
+
+/*
+ * struct delta_dev - device struct, 1 per probe (so single one for
+ * all platform life)
+ *
+ * @v4l2_dev: v4l2 device
+ * @vdev: v4l2 video device
+ * @pdev: platform device
+ * @dev: device
+ * @m2m_dev: memory-to-memory V4L2 device
+ * @lock: device lock, for crit section & V4L2 ops serialization.
+ * @clk_delta: delta main clock
+ * @clk_st231: st231 coprocessor main clock
+ * @clk_flash_promip: flash promip clock
+ * @decoders: list of registered decoders
+ * @nb_of_decoders: nb of registered decoders
+ * @pixelformats: supported uncompressed video formats
+ * @nb_of_pixelformats: number of supported umcompressed video formats
+ * @streamformats: supported compressed video formats
+ * @nb_of_streamformats:number of supported compressed video formats
+ * @instance_id: rolling counter identifying an instance (debug purpose)
+ * @work_queue: decoding job work queue
+ * @rpmsg_driver: rpmsg IPC driver
+ * @rpmsg_device: rpmsg IPC device
+ */
+struct delta_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct mutex lock;
+ struct clk *clk_delta;
+ struct clk *clk_st231;
+ struct clk *clk_flash_promip;
+ const struct delta_dec *decoders[DELTA_MAX_DECODERS];
+ u32 nb_of_decoders;
+ u32 pixelformats[DELTA_MAX_FORMATS];
+ u32 nb_of_pixelformats;
+ u32 streamformats[DELTA_MAX_FORMATS];
+ u32 nb_of_streamformats;
+ u8 instance_id;
+ struct workqueue_struct *work_queue;
+ struct rpmsg_driver rpmsg_driver;
+ struct rpmsg_device *rpmsg_device;
+};
+
+static inline char *frame_type_str(u32 flags)
+{
+ if (flags & V4L2_BUF_FLAG_KEYFRAME)
+ return "I";
+ if (flags & V4L2_BUF_FLAG_PFRAME)
+ return "P";
+ if (flags & V4L2_BUF_FLAG_BFRAME)
+ return "B";
+ if (flags & V4L2_BUF_FLAG_LAST)
+ return "EOS";
+ return "?";
+}
+
+static inline char *frame_field_str(enum v4l2_field field)
+{
+ if (field == V4L2_FIELD_NONE)
+ return "-";
+ if (field == V4L2_FIELD_TOP)
+ return "T";
+ if (field == V4L2_FIELD_BOTTOM)
+ return "B";
+ if (field == V4L2_FIELD_INTERLACED)
+ return "I";
+ if (field == V4L2_FIELD_INTERLACED_TB)
+ return "TB";
+ if (field == V4L2_FIELD_INTERLACED_BT)
+ return "BT";
+ return "?";
+}
+
+static inline char *frame_state_str(u32 state, char *str, unsigned int len)
+{
+ snprintf(str, len, "%s %s %s %s %s %s",
+ (state & DELTA_FRAME_REF) ? "ref" : " ",
+ (state & DELTA_FRAME_BSY) ? "bsy" : " ",
+ (state & DELTA_FRAME_DEC) ? "dec" : " ",
+ (state & DELTA_FRAME_OUT) ? "out" : " ",
+ (state & DELTA_FRAME_M2M) ? "m2m" : " ",
+ (state & DELTA_FRAME_RDY) ? "rdy" : " ");
+ return str;
+}
+
+int delta_get_frameinfo_default(struct delta_ctx *ctx,
+ struct delta_frameinfo *frameinfo);
+int delta_recycle_default(struct delta_ctx *pctx,
+ struct delta_frame *frame);
+
+int delta_get_free_frame(struct delta_ctx *ctx,
+ struct delta_frame **pframe);
+
+int delta_get_sync(struct delta_ctx *ctx);
+void delta_put_autosuspend(struct delta_ctx *ctx);
+
+#endif /* DELTA_H */
diff --git a/drivers/media/platform/st/sti/hva/Kconfig b/drivers/media/platform/st/sti/hva/Kconfig
new file mode 100644
index 000000000..46d6f82f6
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_STI_HVA
+ tristate "STMicroelectronics HVA multi-format video encoder V4L2 driver"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_STI || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ This V4L2 driver enables HVA (Hardware Video Accelerator) multi-format
+ video encoder of STMicroelectronics SoC, allowing hardware encoding of
+ raw uncompressed formats in various compressed video bitstreams format.
+
+ To compile this driver as a module, choose M here:
+ the module will be called st-hva.
+
+config VIDEO_STI_HVA_DEBUGFS
+ bool "Export STMicroelectronics HVA internals in debugfs"
+ depends on VIDEO_STI_HVA
+ depends on DEBUG_FS
+ help
+ Select this to see information about the internal state and the last
+ operation of STMicroelectronics HVA multi-format video encoder in
+ debugfs.
+
+ Choose N unless you know you need this.
diff --git a/drivers/media/platform/st/sti/hva/Makefile b/drivers/media/platform/st/sti/hva/Makefile
new file mode 100644
index 000000000..b5a5478bd
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_STI_HVA) += st-hva.o
+st-hva-y := hva-v4l2.o hva-hw.o hva-mem.o hva-h264.o
+st-hva-$(CONFIG_VIDEO_STI_HVA_DEBUGFS) += hva-debugfs.o
diff --git a/drivers/media/platform/st/sti/hva/hva-debugfs.c b/drivers/media/platform/st/sti/hva/hva-debugfs.c
new file mode 100644
index 000000000..a86a07b6f
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/hva-debugfs.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "hva.h"
+#include "hva-hw.h"
+
+static void format_ctx(struct seq_file *s, struct hva_ctx *ctx)
+{
+ struct hva_streaminfo *stream = &ctx->streaminfo;
+ struct hva_frameinfo *frame = &ctx->frameinfo;
+ struct hva_controls *ctrls = &ctx->ctrls;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+ u32 bitrate_mode, aspect, entropy, vui_sar, sei_fp;
+
+ seq_printf(s, "|-%s\n |\n", ctx->name);
+
+ seq_printf(s, " |-[%sframe info]\n",
+ ctx->flags & HVA_FLAG_FRAMEINFO ? "" : "default ");
+ seq_printf(s, " | |- pixel format=%4.4s\n"
+ " | |- wxh=%dx%d\n"
+ " | |- wxh (w/ encoder alignment constraint)=%dx%d\n"
+ " |\n",
+ (char *)&frame->pixelformat,
+ frame->width, frame->height,
+ frame->aligned_width, frame->aligned_height);
+
+ seq_printf(s, " |-[%sstream info]\n",
+ ctx->flags & HVA_FLAG_STREAMINFO ? "" : "default ");
+ seq_printf(s, " | |- stream format=%4.4s\n"
+ " | |- wxh=%dx%d\n"
+ " | |- %s\n"
+ " | |- %s\n"
+ " |\n",
+ (char *)&stream->streamformat,
+ stream->width, stream->height,
+ stream->profile, stream->level);
+
+ bitrate_mode = V4L2_CID_MPEG_VIDEO_BITRATE_MODE;
+ aspect = V4L2_CID_MPEG_VIDEO_ASPECT;
+ seq_puts(s, " |-[parameters]\n");
+ seq_printf(s, " | |- %s\n"
+ " | |- bitrate=%d bps\n"
+ " | |- GOP size=%d\n"
+ " | |- video aspect=%s\n"
+ " | |- framerate=%d/%d\n",
+ v4l2_ctrl_get_menu(bitrate_mode)[ctrls->bitrate_mode],
+ ctrls->bitrate,
+ ctrls->gop_size,
+ v4l2_ctrl_get_menu(aspect)[ctrls->aspect],
+ ctrls->time_per_frame.denominator,
+ ctrls->time_per_frame.numerator);
+
+ entropy = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE;
+ vui_sar = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC;
+ sei_fp = V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE;
+ if (stream->streamformat == V4L2_PIX_FMT_H264) {
+ seq_printf(s, " | |- %s entropy mode\n"
+ " | |- CPB size=%d kB\n"
+ " | |- DCT8x8 enable=%s\n"
+ " | |- qpmin=%d\n"
+ " | |- qpmax=%d\n"
+ " | |- PAR enable=%s\n"
+ " | |- PAR id=%s\n"
+ " | |- SEI frame packing enable=%s\n"
+ " | |- SEI frame packing type=%s\n",
+ v4l2_ctrl_get_menu(entropy)[ctrls->entropy_mode],
+ ctrls->cpb_size,
+ ctrls->dct8x8 ? "true" : "false",
+ ctrls->qpmin,
+ ctrls->qpmax,
+ ctrls->vui_sar ? "true" : "false",
+ v4l2_ctrl_get_menu(vui_sar)[ctrls->vui_sar_idc],
+ ctrls->sei_fp ? "true" : "false",
+ v4l2_ctrl_get_menu(sei_fp)[ctrls->sei_fp_type]);
+ }
+
+ if (ctx->sys_errors || ctx->encode_errors || ctx->frame_errors) {
+ seq_puts(s, " |\n |-[errors]\n");
+ seq_printf(s, " | |- system=%d\n"
+ " | |- encoding=%d\n"
+ " | |- frame=%d\n",
+ ctx->sys_errors,
+ ctx->encode_errors,
+ ctx->frame_errors);
+ }
+
+ seq_puts(s, " |\n |-[performances]\n");
+ seq_printf(s, " | |- frames encoded=%d\n"
+ " | |- avg HW processing duration (0.1ms)=%d [min=%d, max=%d]\n"
+ " | |- avg encoding period (0.1ms)=%d [min=%d, max=%d]\n"
+ " | |- avg fps (0.1Hz)=%d\n"
+ " | |- max reachable fps (0.1Hz)=%d\n"
+ " | |- avg bitrate (kbps)=%d [min=%d, max=%d]\n"
+ " | |- last bitrate (kbps)=%d\n",
+ dbg->cnt_duration,
+ dbg->avg_duration,
+ dbg->min_duration,
+ dbg->max_duration,
+ dbg->avg_period,
+ dbg->min_period,
+ dbg->max_period,
+ dbg->avg_fps,
+ dbg->max_fps,
+ dbg->avg_bitrate,
+ dbg->min_bitrate,
+ dbg->max_bitrate,
+ dbg->last_bitrate);
+}
+
+/*
+ * performance debug info
+ */
+void hva_dbg_perf_begin(struct hva_ctx *ctx)
+{
+ u64 div;
+ u32 period;
+ u32 bitrate;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+ ktime_t prev = dbg->begin;
+
+ dbg->begin = ktime_get();
+
+ if (dbg->is_valid_period) {
+ /* encoding period */
+ div = (u64)ktime_us_delta(dbg->begin, prev);
+ do_div(div, 100);
+ period = (u32)div;
+ dbg->min_period = min(period, dbg->min_period);
+ dbg->max_period = max(period, dbg->max_period);
+ dbg->total_period += period;
+ dbg->cnt_period++;
+
+ /*
+ * minimum and maximum bitrates are based on the
+ * encoding period values upon a window of 32 samples
+ */
+ dbg->window_duration += period;
+ dbg->cnt_window++;
+ if (dbg->cnt_window >= 32) {
+ /*
+ * bitrate in kbps = (size * 8 / 1000) /
+ * (duration / 10000)
+ * = size * 80 / duration
+ */
+ if (dbg->window_duration > 0) {
+ div = (u64)dbg->window_stream_size * 80;
+ do_div(div, dbg->window_duration);
+ bitrate = (u32)div;
+ dbg->last_bitrate = bitrate;
+ dbg->min_bitrate = min(bitrate,
+ dbg->min_bitrate);
+ dbg->max_bitrate = max(bitrate,
+ dbg->max_bitrate);
+ }
+ dbg->window_stream_size = 0;
+ dbg->window_duration = 0;
+ dbg->cnt_window = 0;
+ }
+ }
+
+ /*
+ * filter sequences valid for performance:
+ * - begin/begin (no stream available) is an invalid sequence
+ * - begin/end is a valid sequence
+ */
+ dbg->is_valid_period = false;
+}
+
+void hva_dbg_perf_end(struct hva_ctx *ctx, struct hva_stream *stream)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ u64 div;
+ u32 duration;
+ u32 bytesused;
+ u32 timestamp;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+ ktime_t end = ktime_get();
+
+ /* stream bytesused and timestamp in us */
+ bytesused = vb2_get_plane_payload(&stream->vbuf.vb2_buf, 0);
+ div = stream->vbuf.vb2_buf.timestamp;
+ do_div(div, 1000);
+ timestamp = (u32)div;
+
+ /* encoding duration */
+ div = (u64)ktime_us_delta(end, dbg->begin);
+
+ dev_dbg(dev,
+ "%s perf stream[%d] dts=%d encoded using %d bytes in %d us",
+ ctx->name,
+ stream->vbuf.sequence,
+ timestamp,
+ bytesused, (u32)div);
+
+ do_div(div, 100);
+ duration = (u32)div;
+
+ dbg->min_duration = min(duration, dbg->min_duration);
+ dbg->max_duration = max(duration, dbg->max_duration);
+ dbg->total_duration += duration;
+ dbg->cnt_duration++;
+
+ /*
+ * the average bitrate is based on the total stream size
+ * and the total encoding periods
+ */
+ dbg->total_stream_size += bytesused;
+ dbg->window_stream_size += bytesused;
+
+ dbg->is_valid_period = true;
+}
+
+static void hva_dbg_perf_compute(struct hva_ctx *ctx)
+{
+ u64 div;
+ struct hva_ctx_dbg *dbg = &ctx->dbg;
+
+ if (dbg->cnt_duration > 0) {
+ div = (u64)dbg->total_duration;
+ do_div(div, dbg->cnt_duration);
+ dbg->avg_duration = (u32)div;
+ } else {
+ dbg->avg_duration = 0;
+ }
+
+ if (dbg->total_duration > 0) {
+ div = (u64)dbg->cnt_duration * 100000;
+ do_div(div, dbg->total_duration);
+ dbg->max_fps = (u32)div;
+ } else {
+ dbg->max_fps = 0;
+ }
+
+ if (dbg->cnt_period > 0) {
+ div = (u64)dbg->total_period;
+ do_div(div, dbg->cnt_period);
+ dbg->avg_period = (u32)div;
+ } else {
+ dbg->avg_period = 0;
+ }
+
+ if (dbg->total_period > 0) {
+ div = (u64)dbg->cnt_period * 100000;
+ do_div(div, dbg->total_period);
+ dbg->avg_fps = (u32)div;
+ } else {
+ dbg->avg_fps = 0;
+ }
+
+ if (dbg->total_period > 0) {
+ /*
+ * bitrate in kbps = (video size * 8 / 1000) /
+ * (video duration / 10000)
+ * = video size * 80 / video duration
+ */
+ div = (u64)dbg->total_stream_size * 80;
+ do_div(div, dbg->total_period);
+ dbg->avg_bitrate = (u32)div;
+ } else {
+ dbg->avg_bitrate = 0;
+ }
+}
+
+/*
+ * device debug info
+ */
+
+static int device_show(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+
+ seq_printf(s, "[%s]\n", hva->v4l2_dev.name);
+ seq_printf(s, "registered as /dev/video%d\n", hva->vdev->num);
+
+ return 0;
+}
+
+static int encoders_show(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+ unsigned int i = 0;
+
+ seq_printf(s, "[encoders]\n|- %d registered encoders:\n",
+ hva->nb_of_encoders);
+
+ while (hva->encoders[i]) {
+ seq_printf(s, "|- %s: %4.4s => %4.4s\n", hva->encoders[i]->name,
+ (char *)&hva->encoders[i]->pixelformat,
+ (char *)&hva->encoders[i]->streamformat);
+ i++;
+ }
+
+ return 0;
+}
+
+static int last_show(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+ struct hva_ctx *last_ctx = &hva->dbg.last_ctx;
+
+ if (last_ctx->flags & HVA_FLAG_STREAMINFO) {
+ seq_puts(s, "[last encoding]\n");
+
+ hva_dbg_perf_compute(last_ctx);
+ format_ctx(s, last_ctx);
+ } else {
+ seq_puts(s, "[no information recorded about last encoding]\n");
+ }
+
+ return 0;
+}
+
+static int regs_show(struct seq_file *s, void *data)
+{
+ struct hva_dev *hva = s->private;
+
+ hva_hw_dump_regs(hva, s);
+
+ return 0;
+}
+
+#define hva_dbg_create_entry(name) \
+ debugfs_create_file(#name, 0444, hva->dbg.debugfs_entry, hva, \
+ &name##_fops)
+
+DEFINE_SHOW_ATTRIBUTE(device);
+DEFINE_SHOW_ATTRIBUTE(encoders);
+DEFINE_SHOW_ATTRIBUTE(last);
+DEFINE_SHOW_ATTRIBUTE(regs);
+
+void hva_debugfs_create(struct hva_dev *hva)
+{
+ hva->dbg.debugfs_entry = debugfs_create_dir(HVA_NAME, NULL);
+
+ hva_dbg_create_entry(device);
+ hva_dbg_create_entry(encoders);
+ hva_dbg_create_entry(last);
+ hva_dbg_create_entry(regs);
+}
+
+void hva_debugfs_remove(struct hva_dev *hva)
+{
+ debugfs_remove_recursive(hva->dbg.debugfs_entry);
+ hva->dbg.debugfs_entry = NULL;
+}
+
+/*
+ * context (instance) debug info
+ */
+
+static int ctx_show(struct seq_file *s, void *data)
+{
+ struct hva_ctx *ctx = s->private;
+
+ seq_printf(s, "[running encoding %d]\n", ctx->id);
+
+ hva_dbg_perf_compute(ctx);
+ format_ctx(s, ctx);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(ctx);
+
+void hva_dbg_ctx_create(struct hva_ctx *ctx)
+{
+ struct hva_dev *hva = ctx->hva_dev;
+ char name[4] = "";
+
+ ctx->dbg.min_duration = UINT_MAX;
+ ctx->dbg.min_period = UINT_MAX;
+ ctx->dbg.min_bitrate = UINT_MAX;
+
+ snprintf(name, sizeof(name), "%d", hva->instance_id);
+
+ ctx->dbg.debugfs_entry = debugfs_create_file(name, 0444,
+ hva->dbg.debugfs_entry,
+ ctx, &ctx_fops);
+}
+
+void hva_dbg_ctx_remove(struct hva_ctx *ctx)
+{
+ struct hva_dev *hva = ctx->hva_dev;
+
+ if (ctx->flags & HVA_FLAG_STREAMINFO)
+ /* save context before removing */
+ memcpy(&hva->dbg.last_ctx, ctx, sizeof(*ctx));
+
+ debugfs_remove(ctx->dbg.debugfs_entry);
+}
diff --git a/drivers/media/platform/st/sti/hva/hva-h264.c b/drivers/media/platform/st/sti/hva/hva-h264.c
new file mode 100644
index 000000000..98cb00d2d
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/hva-h264.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include "hva.h"
+#include "hva-hw.h"
+
+#define MAX_SPS_PPS_SIZE 128
+
+#define BITSTREAM_OFFSET_MASK 0x7F
+
+/* video max size*/
+#define H264_MAX_SIZE_W 1920
+#define H264_MAX_SIZE_H 1920
+
+/* macroBlocs number (width & height) */
+#define MB_W(w) ((w + 0xF) / 0x10)
+#define MB_H(h) ((h + 0xF) / 0x10)
+
+/* formula to get temporal or spatial data size */
+#define DATA_SIZE(w, h) (MB_W(w) * MB_H(h) * 16)
+
+#define SEARCH_WINDOW_BUFFER_MAX_SIZE(w) ((4 * MB_W(w) + 42) * 256 * 3 / 2)
+#define CABAC_CONTEXT_BUFFER_MAX_SIZE(w) (MB_W(w) * 16)
+#define CTX_MB_BUFFER_MAX_SIZE(w) (MB_W(w) * 16 * 8)
+#define SLICE_HEADER_SIZE (4 * 16)
+#define BRC_DATA_SIZE (5 * 16)
+
+/* source buffer copy in YUV 420 MB-tiled format with size=16*256*3/2 */
+#define CURRENT_WINDOW_BUFFER_MAX_SIZE (16 * 256 * 3 / 2)
+
+/*
+ * 4 lines of pixels (in Luma, Chroma blue and Chroma red) of top MB
+ * for deblocking with size=4*16*MBx*2
+ */
+#define LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(w) (4 * 16 * MB_W(w) * 2)
+
+/* factor for bitrate and cpb buffer size max values if profile >= high */
+#define H264_FACTOR_HIGH 1200
+
+/* factor for bitrate and cpb buffer size max values if profile < high */
+#define H264_FACTOR_BASELINE 1000
+
+/* number of bytes for NALU_TYPE_FILLER_DATA header and footer */
+#define H264_FILLER_DATA_SIZE 6
+
+struct h264_profile {
+ enum v4l2_mpeg_video_h264_level level;
+ u32 max_mb_per_seconds;
+ u32 max_frame_size;
+ u32 max_bitrate;
+ u32 max_cpb_size;
+ u32 min_comp_ratio;
+};
+
+static const struct h264_profile h264_infos_list[] = {
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_0, 1485, 99, 64, 175, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1B, 1485, 99, 128, 350, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_1, 3000, 396, 192, 500, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_2, 6000, 396, 384, 1000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_1_3, 11880, 396, 768, 2000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_2_0, 11880, 396, 2000, 2000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_2_1, 19800, 792, 4000, 4000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_2_2, 20250, 1620, 4000, 4000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_3_0, 40500, 1620, 10000, 10000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_3_1, 108000, 3600, 14000, 14000, 4},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_3_2, 216000, 5120, 20000, 20000, 4},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_4_0, 245760, 8192, 20000, 25000, 4},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_4_1, 245760, 8192, 50000, 62500, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_4_2, 522240, 8704, 50000, 62500, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_5_0, 589824, 22080, 135000, 135000, 2},
+ {V4L2_MPEG_VIDEO_H264_LEVEL_5_1, 983040, 36864, 240000, 240000, 2}
+};
+
+enum hva_brc_type {
+ BRC_TYPE_NONE = 0,
+ BRC_TYPE_CBR = 1,
+ BRC_TYPE_VBR = 2,
+ BRC_TYPE_VBR_LOW_DELAY = 3
+};
+
+enum hva_entropy_coding_mode {
+ CAVLC = 0,
+ CABAC = 1
+};
+
+enum hva_picture_coding_type {
+ PICTURE_CODING_TYPE_I = 0,
+ PICTURE_CODING_TYPE_P = 1,
+ PICTURE_CODING_TYPE_B = 2
+};
+
+enum hva_h264_sampling_mode {
+ SAMPLING_MODE_NV12 = 0,
+ SAMPLING_MODE_UYVY = 1,
+ SAMPLING_MODE_RGB3 = 3,
+ SAMPLING_MODE_XRGB4 = 4,
+ SAMPLING_MODE_NV21 = 8,
+ SAMPLING_MODE_VYUY = 9,
+ SAMPLING_MODE_BGR3 = 11,
+ SAMPLING_MODE_XBGR4 = 12,
+ SAMPLING_MODE_RGBX4 = 20,
+ SAMPLING_MODE_BGRX4 = 28
+};
+
+enum hva_h264_nalu_type {
+ NALU_TYPE_UNKNOWN = 0,
+ NALU_TYPE_SLICE = 1,
+ NALU_TYPE_SLICE_DPA = 2,
+ NALU_TYPE_SLICE_DPB = 3,
+ NALU_TYPE_SLICE_DPC = 4,
+ NALU_TYPE_SLICE_IDR = 5,
+ NALU_TYPE_SEI = 6,
+ NALU_TYPE_SPS = 7,
+ NALU_TYPE_PPS = 8,
+ NALU_TYPE_AU_DELIMITER = 9,
+ NALU_TYPE_SEQ_END = 10,
+ NALU_TYPE_STREAM_END = 11,
+ NALU_TYPE_FILLER_DATA = 12,
+ NALU_TYPE_SPS_EXT = 13,
+ NALU_TYPE_PREFIX_UNIT = 14,
+ NALU_TYPE_SUBSET_SPS = 15,
+ NALU_TYPE_SLICE_AUX = 19,
+ NALU_TYPE_SLICE_EXT = 20
+};
+
+enum hva_h264_sei_payload_type {
+ SEI_BUFFERING_PERIOD = 0,
+ SEI_PICTURE_TIMING = 1,
+ SEI_STEREO_VIDEO_INFO = 21,
+ SEI_FRAME_PACKING_ARRANGEMENT = 45
+};
+
+/*
+ * stereo Video Info struct
+ */
+struct hva_h264_stereo_video_sei {
+ u8 field_views_flag;
+ u8 top_field_is_left_view_flag;
+ u8 current_frame_is_left_view_flag;
+ u8 next_frame_is_second_view_flag;
+ u8 left_view_self_contained_flag;
+ u8 right_view_self_contained_flag;
+};
+
+/*
+ * struct hva_h264_td
+ *
+ * @frame_width: width in pixels of the buffer containing the input frame
+ * @frame_height: height in pixels of the buffer containing the input frame
+ * @frame_num: the parameter to be written in the slice header
+ * @picture_coding_type: type I, P or B
+ * @pic_order_cnt_type: POC mode, as defined in H264 std : can be 0,1,2
+ * @first_picture_in_sequence: flag telling to encoder that this is the
+ * first picture in a video sequence.
+ * Used for VBR
+ * @slice_size_type: 0 = no constraint to close the slice
+ * 1= a slice is closed as soon as the slice_mb_size limit
+ * is reached
+ * 2= a slice is closed as soon as the slice_byte_size limit
+ * is reached
+ * 3= a slice is closed as soon as either the slice_byte_size
+ * limit or the slice_mb_size limit is reached
+ * @slice_mb_size: defines the slice size in number of macroblocks
+ * (used when slice_size_type=1 or slice_size_type=3)
+ * @ir_param_option: defines the number of macroblocks per frame to be
+ * refreshed by AIR algorithm OR the refresh period
+ * by CIR algorithm
+ * @intra_refresh_type: enables the adaptive intra refresh algorithm.
+ * Disable=0 / Adaptative=1 and Cycle=2 as intra refresh
+ * @use_constrained_intra_flag: constrained_intra_pred_flag from PPS
+ * @transform_mode: controls the use of 4x4/8x8 transform mode
+ * @disable_deblocking_filter_idc:
+ * 0: specifies that all luma and chroma block edges of
+ * the slice are filtered.
+ * 1: specifies that deblocking is disabled for all block
+ * edges of the slice.
+ * 2: specifies that all luma and chroma block edges of
+ * the slice are filtered with exception of the block edges
+ * that coincide with slice boundaries
+ * @slice_alpha_c0_offset_div2: to be written in slice header,
+ * controls deblocking
+ * @slice_beta_offset_div2: to be written in slice header,
+ * controls deblocking
+ * @encoder_complexity: encoder complexity control (IME).
+ * 0 = I_16x16, P_16x16, Full ME Complexity
+ * 1 = I_16x16, I_NxN, P_16x16, Full ME Complexity
+ * 2 = I_16x16, I_NXN, P_16x16, P_WxH, Full ME Complexity
+ * 4 = I_16x16, P_16x16, Reduced ME Complexity
+ * 5 = I_16x16, I_NxN, P_16x16, Reduced ME Complexity
+ * 6 = I_16x16, I_NXN, P_16x16, P_WxH, Reduced ME Complexity
+ * @chroma_qp_index_offset: coming from picture parameter set
+ * (PPS see [H.264 STD] 7.4.2.2)
+ * @entropy_coding_mode: entropy coding mode.
+ * 0 = CAVLC
+ * 1 = CABAC
+ * @brc_type: selects the bit-rate control algorithm
+ * 0 = constant Qp, (no BRC)
+ * 1 = CBR
+ * 2 = VBR
+ * @quant: Quantization param used in case of fix QP encoding (no BRC)
+ * @non_VCL_NALU_Size: size of non-VCL NALUs (SPS, PPS, filler),
+ * used by BRC
+ * @cpb_buffer_size: size of Coded Picture Buffer, used by BRC
+ * @bit_rate: target bitrate, for BRC
+ * @qp_min: min QP threshold
+ * @qp_max: max QP threshold
+ * @framerate_num: target framerate numerator , used by BRC
+ * @framerate_den: target framerate denomurator , used by BRC
+ * @delay: End-to-End Initial Delay
+ * @strict_HRD_compliancy: flag for HDR compliancy (1)
+ * May impact quality encoding
+ * @addr_source_buffer: address of input frame buffer for current frame
+ * @addr_fwd_Ref_Buffer: address of reference frame buffer
+ * @addr_rec_buffer: address of reconstructed frame buffer
+ * @addr_output_bitstream_start: output bitstream start address
+ * @addr_output_bitstream_end: output bitstream end address
+ * @addr_external_sw : address of external search window
+ * @addr_lctx : address of context picture buffer
+ * @addr_local_rec_buffer: address of local reconstructed buffer
+ * @addr_spatial_context: address of spatial context buffer
+ * @bitstream_offset: offset in bits between aligned bitstream start
+ * address and first bit to be written by HVA.
+ * Range value is [0..63]
+ * @sampling_mode: Input picture format .
+ * 0: YUV420 semi_planar Interleaved
+ * 1: YUV422 raster Interleaved
+ * @addr_param_out: address of output parameters structure
+ * @addr_scaling_matrix: address to the coefficient of
+ * the inverse scaling matrix
+ * @addr_scaling_matrix_dir: address to the coefficient of
+ * the direct scaling matrix
+ * @addr_cabac_context_buffer: address of cabac context buffer
+ * @GmvX: Input information about the horizontal global displacement of
+ * the encoded frame versus the previous one
+ * @GmvY: Input information about the vertical global displacement of
+ * the encoded frame versus the previous one
+ * @window_width: width in pixels of the window to be encoded inside
+ * the input frame
+ * @window_height: width in pixels of the window to be encoded inside
+ * the input frame
+ * @window_horizontal_offset: horizontal offset in pels for input window
+ * within input frame
+ * @window_vertical_offset: vertical offset in pels for input window
+ * within input frame
+ * @addr_roi: Map of QP offset for the Region of Interest algorithm and
+ * also used for Error map.
+ * Bit 0-6 used for qp offset (value -64 to 63).
+ * Bit 7 used to force intra
+ * @addr_slice_header: address to slice header
+ * @slice_header_size_in_bits: size in bits of the Slice header
+ * @slice_header_offset0: Slice header offset where to insert
+ * first_Mb_in_slice
+ * @slice_header_offset1: Slice header offset where to insert
+ * slice_qp_delta
+ * @slice_header_offset2: Slice header offset where to insert
+ * num_MBs_in_slice
+ * @slice_synchro_enable: enable "slice ready" interrupt after each slice
+ * @max_slice_number: Maximum number of slice in a frame
+ * (0 is strictly forbidden)
+ * @rgb2_yuv_y_coeff: Four coefficients (C0C1C2C3) to convert from RGB to
+ * YUV for the Y component.
+ * Y = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @rgb2_yuv_u_coeff: four coefficients (C0C1C2C3) to convert from RGB to
+ * YUV for the Y component.
+ * Y = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @rgb2_yuv_v_coeff: Four coefficients (C0C1C2C3) to convert from RGB to
+ * YUV for the U (Cb) component.
+ * U = C0*R + C1*G + C2*B + C3 (C0 is on byte 0)
+ * @slice_byte_size: maximum slice size in bytes
+ * (used when slice_size_type=2 or slice_size_type=3)
+ * @max_air_intra_mb_nb: Maximum number of intra macroblock in a frame
+ * for the AIR algorithm
+ * @brc_no_skip: Disable skipping in the Bitrate Controller
+ * @addr_brc_in_out_parameter: address of static buffer for BRC parameters
+ */
+struct hva_h264_td {
+ u16 frame_width;
+ u16 frame_height;
+ u32 frame_num;
+ u16 picture_coding_type;
+ u16 reserved1;
+ u16 pic_order_cnt_type;
+ u16 first_picture_in_sequence;
+ u16 slice_size_type;
+ u16 reserved2;
+ u32 slice_mb_size;
+ u16 ir_param_option;
+ u16 intra_refresh_type;
+ u16 use_constrained_intra_flag;
+ u16 transform_mode;
+ u16 disable_deblocking_filter_idc;
+ s16 slice_alpha_c0_offset_div2;
+ s16 slice_beta_offset_div2;
+ u16 encoder_complexity;
+ s16 chroma_qp_index_offset;
+ u16 entropy_coding_mode;
+ u16 brc_type;
+ u16 quant;
+ u32 non_vcl_nalu_size;
+ u32 cpb_buffer_size;
+ u32 bit_rate;
+ u16 qp_min;
+ u16 qp_max;
+ u16 framerate_num;
+ u16 framerate_den;
+ u16 delay;
+ u16 strict_hrd_compliancy;
+ u32 addr_source_buffer;
+ u32 addr_fwd_ref_buffer;
+ u32 addr_rec_buffer;
+ u32 addr_output_bitstream_start;
+ u32 addr_output_bitstream_end;
+ u32 addr_external_sw;
+ u32 addr_lctx;
+ u32 addr_local_rec_buffer;
+ u32 addr_spatial_context;
+ u16 bitstream_offset;
+ u16 sampling_mode;
+ u32 addr_param_out;
+ u32 addr_scaling_matrix;
+ u32 addr_scaling_matrix_dir;
+ u32 addr_cabac_context_buffer;
+ u32 reserved3;
+ u32 reserved4;
+ s16 gmv_x;
+ s16 gmv_y;
+ u16 window_width;
+ u16 window_height;
+ u16 window_horizontal_offset;
+ u16 window_vertical_offset;
+ u32 addr_roi;
+ u32 addr_slice_header;
+ u16 slice_header_size_in_bits;
+ u16 slice_header_offset0;
+ u16 slice_header_offset1;
+ u16 slice_header_offset2;
+ u32 reserved5;
+ u32 reserved6;
+ u16 reserved7;
+ u16 reserved8;
+ u16 slice_synchro_enable;
+ u16 max_slice_number;
+ u32 rgb2_yuv_y_coeff;
+ u32 rgb2_yuv_u_coeff;
+ u32 rgb2_yuv_v_coeff;
+ u32 slice_byte_size;
+ u16 max_air_intra_mb_nb;
+ u16 brc_no_skip;
+ u32 addr_temporal_context;
+ u32 addr_brc_in_out_parameter;
+};
+
+/*
+ * struct hva_h264_slice_po
+ *
+ * @ slice_size: slice size
+ * @ slice_start_time: start time
+ * @ slice_stop_time: stop time
+ * @ slice_num: slice number
+ */
+struct hva_h264_slice_po {
+ u32 slice_size;
+ u32 slice_start_time;
+ u32 slice_end_time;
+ u32 slice_num;
+};
+
+/*
+ * struct hva_h264_po
+ *
+ * @ bitstream_size: bitstream size
+ * @ dct_bitstream_size: dtc bitstream size
+ * @ stuffing_bits: number of stuffing bits inserted by the encoder
+ * @ removal_time: removal time of current frame (nb of ticks 1/framerate)
+ * @ hvc_start_time: hvc start time
+ * @ hvc_stop_time: hvc stop time
+ * @ slice_count: slice count
+ */
+struct hva_h264_po {
+ u32 bitstream_size;
+ u32 dct_bitstream_size;
+ u32 stuffing_bits;
+ u32 removal_time;
+ u32 hvc_start_time;
+ u32 hvc_stop_time;
+ u32 slice_count;
+ u32 reserved0;
+ struct hva_h264_slice_po slice_params[16];
+};
+
+struct hva_h264_task {
+ struct hva_h264_td td;
+ struct hva_h264_po po;
+};
+
+/*
+ * struct hva_h264_ctx
+ *
+ * @seq_info: sequence information buffer
+ * @ref_frame: reference frame buffer
+ * @rec_frame: reconstructed frame buffer
+ * @task: task descriptor
+ */
+struct hva_h264_ctx {
+ struct hva_buffer *seq_info;
+ struct hva_buffer *ref_frame;
+ struct hva_buffer *rec_frame;
+ struct hva_buffer *task;
+};
+
+static int hva_h264_fill_slice_header(struct hva_ctx *pctx,
+ u8 *slice_header_addr,
+ struct hva_controls *ctrls,
+ int frame_num,
+ u16 *header_size,
+ u16 *header_offset0,
+ u16 *header_offset1,
+ u16 *header_offset2)
+{
+ /*
+ * with this HVA hardware version, part of the slice header is computed
+ * on host and part by hardware.
+ * The part of host is precomputed and available through this array.
+ */
+ struct device *dev = ctx_to_dev(pctx);
+ int cabac = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC;
+ static const unsigned char slice_header[] = {
+ 0x00, 0x00, 0x00, 0x01,
+ 0x41, 0x34, 0x07, 0x00
+ };
+ int idr_pic_id = frame_num % 2;
+ enum hva_picture_coding_type type;
+ u32 frame_order = frame_num % ctrls->gop_size;
+
+ if (!(frame_num % ctrls->gop_size))
+ type = PICTURE_CODING_TYPE_I;
+ else
+ type = PICTURE_CODING_TYPE_P;
+
+ memcpy(slice_header_addr, slice_header, sizeof(slice_header));
+
+ *header_size = 56;
+ *header_offset0 = 40;
+ *header_offset1 = 13;
+ *header_offset2 = 0;
+
+ if (type == PICTURE_CODING_TYPE_I) {
+ slice_header_addr[4] = 0x65;
+ slice_header_addr[5] = 0x11;
+
+ /* toggle the I frame */
+ if ((frame_num / ctrls->gop_size) % 2) {
+ *header_size += 4;
+ *header_offset1 += 4;
+ slice_header_addr[6] = 0x04;
+ slice_header_addr[7] = 0x70;
+
+ } else {
+ *header_size += 2;
+ *header_offset1 += 2;
+ slice_header_addr[6] = 0x09;
+ slice_header_addr[7] = 0xC0;
+ }
+ } else {
+ if (ctrls->entropy_mode == cabac) {
+ *header_size += 1;
+ *header_offset1 += 1;
+ slice_header_addr[7] = 0x80;
+ }
+ /*
+ * update slice header with P frame order
+ * frame order is limited to 16 (coded on 4bits only)
+ */
+ slice_header_addr[5] += ((frame_order & 0x0C) >> 2);
+ slice_header_addr[6] += ((frame_order & 0x03) << 6);
+ }
+
+ dev_dbg(dev,
+ "%s %s slice header order %d idrPicId %d header size %d\n",
+ pctx->name, __func__, frame_order, idr_pic_id, *header_size);
+ return 0;
+}
+
+static int hva_h264_fill_data_nal(struct hva_ctx *pctx,
+ unsigned int stuffing_bytes, u8 *addr,
+ unsigned int stream_size, unsigned int *size)
+{
+ struct device *dev = ctx_to_dev(pctx);
+ static const u8 start[] = { 0x00, 0x00, 0x00, 0x01 };
+
+ dev_dbg(dev, "%s %s stuffing bytes %d\n", pctx->name, __func__,
+ stuffing_bytes);
+
+ if ((*size + stuffing_bytes + H264_FILLER_DATA_SIZE) > stream_size) {
+ dev_dbg(dev, "%s %s too many stuffing bytes %d\n",
+ pctx->name, __func__, stuffing_bytes);
+ return 0;
+ }
+
+ /* start code */
+ memcpy(addr + *size, start, sizeof(start));
+ *size += sizeof(start);
+
+ /* nal_unit_type */
+ addr[*size] = NALU_TYPE_FILLER_DATA;
+ *size += 1;
+
+ memset(addr + *size, 0xff, stuffing_bytes);
+ *size += stuffing_bytes;
+
+ addr[*size] = 0x80;
+ *size += 1;
+
+ return 0;
+}
+
+static int hva_h264_fill_sei_nal(struct hva_ctx *pctx,
+ enum hva_h264_sei_payload_type type,
+ u8 *addr, u32 *size)
+{
+ struct device *dev = ctx_to_dev(pctx);
+ static const u8 start[] = { 0x00, 0x00, 0x00, 0x01 };
+ struct hva_h264_stereo_video_sei info;
+ u8 offset = 7;
+ u8 msg = 0;
+
+ /* start code */
+ memcpy(addr + *size, start, sizeof(start));
+ *size += sizeof(start);
+
+ /* nal_unit_type */
+ addr[*size] = NALU_TYPE_SEI;
+ *size += 1;
+
+ /* payload type */
+ addr[*size] = type;
+ *size += 1;
+
+ switch (type) {
+ case SEI_STEREO_VIDEO_INFO:
+ memset(&info, 0, sizeof(info));
+
+ /* set to top/bottom frame packing arrangement */
+ info.field_views_flag = 1;
+ info.top_field_is_left_view_flag = 1;
+
+ /* payload size */
+ addr[*size] = 1;
+ *size += 1;
+
+ /* payload */
+ msg = info.field_views_flag << offset--;
+
+ if (info.field_views_flag) {
+ msg |= info.top_field_is_left_view_flag <<
+ offset--;
+ } else {
+ msg |= info.current_frame_is_left_view_flag <<
+ offset--;
+ msg |= info.next_frame_is_second_view_flag <<
+ offset--;
+ }
+ msg |= info.left_view_self_contained_flag << offset--;
+ msg |= info.right_view_self_contained_flag << offset--;
+
+ addr[*size] = msg;
+ *size += 1;
+
+ addr[*size] = 0x80;
+ *size += 1;
+
+ return 0;
+ case SEI_BUFFERING_PERIOD:
+ case SEI_PICTURE_TIMING:
+ case SEI_FRAME_PACKING_ARRANGEMENT:
+ default:
+ dev_err(dev, "%s sei nal type not supported %d\n",
+ pctx->name, type);
+ return -EINVAL;
+ }
+}
+
+static int hva_h264_prepare_task(struct hva_ctx *pctx,
+ struct hva_h264_task *task,
+ struct hva_frame *frame,
+ struct hva_stream *stream)
+{
+ struct hva_dev *hva = ctx_to_hdev(pctx);
+ struct device *dev = ctx_to_dev(pctx);
+ struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct hva_buffer *seq_info = ctx->seq_info;
+ struct hva_buffer *fwd_ref_frame = ctx->ref_frame;
+ struct hva_buffer *loc_rec_frame = ctx->rec_frame;
+ struct hva_h264_td *td = &task->td;
+ struct hva_controls *ctrls = &pctx->ctrls;
+ struct v4l2_fract *time_per_frame = &pctx->ctrls.time_per_frame;
+ int cavlc = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
+ u32 frame_num = pctx->stream_num;
+ u32 addr_esram = hva->esram_addr;
+ enum v4l2_mpeg_video_h264_level level;
+ dma_addr_t paddr = 0;
+ u8 *slice_header_vaddr;
+ u32 frame_width = frame->info.aligned_width;
+ u32 frame_height = frame->info.aligned_height;
+ u32 max_cpb_buffer_size;
+ unsigned int payload = stream->bytesused;
+ u32 max_bitrate;
+
+ /* check width and height parameters */
+ if ((frame_width > max(H264_MAX_SIZE_W, H264_MAX_SIZE_H)) ||
+ (frame_height > max(H264_MAX_SIZE_W, H264_MAX_SIZE_H))) {
+ dev_err(dev,
+ "%s width(%d) or height(%d) exceeds limits (%dx%d)\n",
+ pctx->name, frame_width, frame_height,
+ H264_MAX_SIZE_W, H264_MAX_SIZE_H);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ level = ctrls->level;
+
+ memset(td, 0, sizeof(struct hva_h264_td));
+
+ td->frame_width = frame_width;
+ td->frame_height = frame_height;
+
+ /* set frame alignment */
+ td->window_width = frame_width;
+ td->window_height = frame_height;
+ td->window_horizontal_offset = 0;
+ td->window_vertical_offset = 0;
+
+ td->first_picture_in_sequence = (!frame_num) ? 1 : 0;
+
+ /* pic_order_cnt_type hard coded to '2' as only I & P frames */
+ td->pic_order_cnt_type = 2;
+
+ /* useConstrainedIntraFlag set to false for better coding efficiency */
+ td->use_constrained_intra_flag = false;
+ td->brc_type = (ctrls->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ ? BRC_TYPE_CBR : BRC_TYPE_VBR;
+
+ td->entropy_coding_mode = (ctrls->entropy_mode == cavlc) ? CAVLC :
+ CABAC;
+
+ td->bit_rate = ctrls->bitrate;
+
+ /* set framerate, framerate = 1 n/ time per frame */
+ if (time_per_frame->numerator >= 536) {
+ /*
+ * due to a hardware bug, framerate denominator can't exceed
+ * 536 (BRC overflow). Compute nearest framerate
+ */
+ td->framerate_den = 1;
+ td->framerate_num = (time_per_frame->denominator +
+ (time_per_frame->numerator >> 1) - 1) /
+ time_per_frame->numerator;
+
+ /*
+ * update bitrate to introduce a correction due to
+ * the new framerate
+ * new bitrate = (old bitrate * new framerate) / old framerate
+ */
+ td->bit_rate /= time_per_frame->numerator;
+ td->bit_rate *= time_per_frame->denominator;
+ td->bit_rate /= td->framerate_num;
+ } else {
+ td->framerate_den = time_per_frame->numerator;
+ td->framerate_num = time_per_frame->denominator;
+ }
+
+ /* compute maximum bitrate depending on profile */
+ if (ctrls->profile >= V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+ max_bitrate = h264_infos_list[level].max_bitrate *
+ H264_FACTOR_HIGH;
+ else
+ max_bitrate = h264_infos_list[level].max_bitrate *
+ H264_FACTOR_BASELINE;
+
+ /* check if bitrate doesn't exceed max size */
+ if (td->bit_rate > max_bitrate) {
+ dev_dbg(dev,
+ "%s bitrate (%d) larger than level and profile allow, clip to %d\n",
+ pctx->name, td->bit_rate, max_bitrate);
+ td->bit_rate = max_bitrate;
+ }
+
+ /* convert cpb_buffer_size in bits */
+ td->cpb_buffer_size = ctrls->cpb_size * 8000;
+
+ /* compute maximum cpb buffer size depending on profile */
+ if (ctrls->profile >= V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+ max_cpb_buffer_size =
+ h264_infos_list[level].max_cpb_size * H264_FACTOR_HIGH;
+ else
+ max_cpb_buffer_size =
+ h264_infos_list[level].max_cpb_size * H264_FACTOR_BASELINE;
+
+ /* check if cpb buffer size doesn't exceed max size */
+ if (td->cpb_buffer_size > max_cpb_buffer_size) {
+ dev_dbg(dev,
+ "%s cpb size larger than level %d allows, clip to %d\n",
+ pctx->name, td->cpb_buffer_size, max_cpb_buffer_size);
+ td->cpb_buffer_size = max_cpb_buffer_size;
+ }
+
+ /* enable skipping in the Bitrate Controller */
+ td->brc_no_skip = 0;
+
+ /* initial delay */
+ if ((ctrls->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) &&
+ td->bit_rate)
+ td->delay = 1000 * (td->cpb_buffer_size / td->bit_rate);
+ else
+ td->delay = 0;
+
+ switch (frame->info.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ td->sampling_mode = SAMPLING_MODE_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ td->sampling_mode = SAMPLING_MODE_NV21;
+ break;
+ default:
+ dev_err(dev, "%s invalid source pixel format\n",
+ pctx->name);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ /*
+ * fill matrix color converter (RGB to YUV)
+ * Y = 0,299 R + 0,587 G + 0,114 B
+ * Cb = -0,1687 R -0,3313 G + 0,5 B + 128
+ * Cr = 0,5 R - 0,4187 G - 0,0813 B + 128
+ */
+ td->rgb2_yuv_y_coeff = 0x12031008;
+ td->rgb2_yuv_u_coeff = 0x800EF7FB;
+ td->rgb2_yuv_v_coeff = 0x80FEF40E;
+
+ /* enable/disable transform mode */
+ td->transform_mode = ctrls->dct8x8;
+
+ /* encoder complexity fix to 2, ENCODE_I_16x16_I_NxN_P_16x16_P_WxH */
+ td->encoder_complexity = 2;
+
+ /* quant fix to 28, default VBR value */
+ td->quant = 28;
+
+ if (td->framerate_den == 0) {
+ dev_err(dev, "%s invalid framerate\n", pctx->name);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ /* if automatic framerate, deactivate bitrate controller */
+ if (td->framerate_num == 0)
+ td->brc_type = 0;
+
+ /* compliancy fix to true */
+ td->strict_hrd_compliancy = 1;
+
+ /* set minimum & maximum quantizers */
+ td->qp_min = clamp_val(ctrls->qpmin, 0, 51);
+ td->qp_max = clamp_val(ctrls->qpmax, 0, 51);
+
+ td->addr_source_buffer = frame->paddr;
+ td->addr_fwd_ref_buffer = fwd_ref_frame->paddr;
+ td->addr_rec_buffer = loc_rec_frame->paddr;
+
+ td->addr_output_bitstream_end = (u32)stream->paddr + stream->size;
+
+ td->addr_output_bitstream_start = (u32)stream->paddr;
+ td->bitstream_offset = (((u32)stream->paddr & 0xF) << 3) &
+ BITSTREAM_OFFSET_MASK;
+
+ td->addr_param_out = (u32)ctx->task->paddr +
+ offsetof(struct hva_h264_task, po);
+
+ /* swap spatial and temporal context */
+ if (frame_num % 2) {
+ paddr = seq_info->paddr;
+ td->addr_spatial_context = ALIGN(paddr, 0x100);
+ paddr = seq_info->paddr + DATA_SIZE(frame_width,
+ frame_height);
+ td->addr_temporal_context = ALIGN(paddr, 0x100);
+ } else {
+ paddr = seq_info->paddr;
+ td->addr_temporal_context = ALIGN(paddr, 0x100);
+ paddr = seq_info->paddr + DATA_SIZE(frame_width,
+ frame_height);
+ td->addr_spatial_context = ALIGN(paddr, 0x100);
+ }
+
+ paddr = seq_info->paddr + 2 * DATA_SIZE(frame_width, frame_height);
+
+ td->addr_brc_in_out_parameter = ALIGN(paddr, 0x100);
+
+ paddr = td->addr_brc_in_out_parameter + BRC_DATA_SIZE;
+ td->addr_slice_header = ALIGN(paddr, 0x100);
+ td->addr_external_sw = ALIGN(addr_esram, 0x100);
+
+ addr_esram += SEARCH_WINDOW_BUFFER_MAX_SIZE(frame_width);
+ td->addr_local_rec_buffer = ALIGN(addr_esram, 0x100);
+
+ addr_esram += LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(frame_width);
+ td->addr_lctx = ALIGN(addr_esram, 0x100);
+
+ addr_esram += CTX_MB_BUFFER_MAX_SIZE(max(frame_width, frame_height));
+ td->addr_cabac_context_buffer = ALIGN(addr_esram, 0x100);
+
+ if (!(frame_num % ctrls->gop_size)) {
+ td->picture_coding_type = PICTURE_CODING_TYPE_I;
+ stream->vbuf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ } else {
+ td->picture_coding_type = PICTURE_CODING_TYPE_P;
+ stream->vbuf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ }
+
+ /* fill the slice header part */
+ slice_header_vaddr = seq_info->vaddr + (td->addr_slice_header -
+ seq_info->paddr);
+
+ hva_h264_fill_slice_header(pctx, slice_header_vaddr, ctrls, frame_num,
+ &td->slice_header_size_in_bits,
+ &td->slice_header_offset0,
+ &td->slice_header_offset1,
+ &td->slice_header_offset2);
+
+ td->chroma_qp_index_offset = 2;
+ td->slice_synchro_enable = 0;
+ td->max_slice_number = 1;
+
+ /*
+ * check the sps/pps header size for key frame only
+ * sps/pps header was previously fill by libv4l
+ * during qbuf of stream buffer
+ */
+ if ((stream->vbuf.flags == V4L2_BUF_FLAG_KEYFRAME) &&
+ (payload > MAX_SPS_PPS_SIZE)) {
+ dev_err(dev, "%s invalid sps/pps size %d\n", pctx->name,
+ payload);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ if (stream->vbuf.flags != V4L2_BUF_FLAG_KEYFRAME)
+ payload = 0;
+
+ /* add SEI nal (video stereo info) */
+ if (ctrls->sei_fp && hva_h264_fill_sei_nal(pctx, SEI_STEREO_VIDEO_INFO,
+ (u8 *)stream->vaddr,
+ &payload)) {
+ dev_err(dev, "%s fail to get SEI nal\n", pctx->name);
+ pctx->frame_errors++;
+ return -EINVAL;
+ }
+
+ /* fill size of non-VCL NAL units (SPS, PPS, filler and SEI) */
+ td->non_vcl_nalu_size = payload * 8;
+
+ /* compute bitstream offset & new start address of bitstream */
+ td->addr_output_bitstream_start += ((payload >> 4) << 4);
+ td->bitstream_offset += (payload - ((payload >> 4) << 4)) * 8;
+
+ stream->bytesused = payload;
+
+ return 0;
+}
+
+static unsigned int hva_h264_get_stream_size(struct hva_h264_task *task)
+{
+ struct hva_h264_po *po = &task->po;
+
+ return po->bitstream_size;
+}
+
+static u32 hva_h264_get_stuffing_bytes(struct hva_h264_task *task)
+{
+ struct hva_h264_po *po = &task->po;
+
+ return po->stuffing_bits >> 3;
+}
+
+static int hva_h264_open(struct hva_ctx *pctx)
+{
+ struct device *dev = ctx_to_dev(pctx);
+ struct hva_h264_ctx *ctx;
+ struct hva_dev *hva = ctx_to_hdev(pctx);
+ u32 frame_width = pctx->frameinfo.aligned_width;
+ u32 frame_height = pctx->frameinfo.aligned_height;
+ u32 size;
+ int ret;
+
+ /* check esram size necessary to encode a frame */
+ size = SEARCH_WINDOW_BUFFER_MAX_SIZE(frame_width) +
+ LOCAL_RECONSTRUCTED_BUFFER_MAX_SIZE(frame_width) +
+ CTX_MB_BUFFER_MAX_SIZE(max(frame_width, frame_height)) +
+ CABAC_CONTEXT_BUFFER_MAX_SIZE(frame_width);
+
+ if (hva->esram_size < size) {
+ dev_err(dev, "%s not enough esram (max:%d request:%d)\n",
+ pctx->name, hva->esram_size, size);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* allocate context for codec */
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* allocate sequence info buffer */
+ ret = hva_mem_alloc(pctx,
+ 2 * DATA_SIZE(frame_width, frame_height) +
+ SLICE_HEADER_SIZE +
+ BRC_DATA_SIZE,
+ "hva sequence info",
+ &ctx->seq_info);
+ if (ret) {
+ dev_err(dev,
+ "%s failed to allocate sequence info buffer\n",
+ pctx->name);
+ goto err_ctx;
+ }
+
+ /* allocate reference frame buffer */
+ ret = hva_mem_alloc(pctx,
+ frame_width * frame_height * 3 / 2,
+ "hva reference frame",
+ &ctx->ref_frame);
+ if (ret) {
+ dev_err(dev, "%s failed to allocate reference frame buffer\n",
+ pctx->name);
+ goto err_seq_info;
+ }
+
+ /* allocate reconstructed frame buffer */
+ ret = hva_mem_alloc(pctx,
+ frame_width * frame_height * 3 / 2,
+ "hva reconstructed frame",
+ &ctx->rec_frame);
+ if (ret) {
+ dev_err(dev,
+ "%s failed to allocate reconstructed frame buffer\n",
+ pctx->name);
+ goto err_ref_frame;
+ }
+
+ /* allocate task descriptor */
+ ret = hva_mem_alloc(pctx,
+ sizeof(struct hva_h264_task),
+ "hva task descriptor",
+ &ctx->task);
+ if (ret) {
+ dev_err(dev,
+ "%s failed to allocate task descriptor\n",
+ pctx->name);
+ goto err_rec_frame;
+ }
+
+ pctx->priv = (void *)ctx;
+
+ return 0;
+
+err_rec_frame:
+ hva_mem_free(pctx, ctx->rec_frame);
+err_ref_frame:
+ hva_mem_free(pctx, ctx->ref_frame);
+err_seq_info:
+ hva_mem_free(pctx, ctx->seq_info);
+err_ctx:
+ devm_kfree(dev, ctx);
+err:
+ pctx->sys_errors++;
+ return ret;
+}
+
+static int hva_h264_close(struct hva_ctx *pctx)
+{
+ struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct device *dev = ctx_to_dev(pctx);
+
+ if (ctx->seq_info)
+ hva_mem_free(pctx, ctx->seq_info);
+
+ if (ctx->ref_frame)
+ hva_mem_free(pctx, ctx->ref_frame);
+
+ if (ctx->rec_frame)
+ hva_mem_free(pctx, ctx->rec_frame);
+
+ if (ctx->task)
+ hva_mem_free(pctx, ctx->task);
+
+ devm_kfree(dev, ctx);
+
+ return 0;
+}
+
+static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
+ struct hva_stream *stream)
+{
+ struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
+ struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr;
+ u32 stuffing_bytes = 0;
+ int ret = 0;
+
+ ret = hva_h264_prepare_task(pctx, task, frame, stream);
+ if (ret)
+ goto err;
+
+ ret = hva_hw_execute_task(pctx, H264_ENC, ctx->task);
+ if (ret)
+ goto err;
+
+ pctx->stream_num++;
+ stream->bytesused += hva_h264_get_stream_size(task);
+
+ stuffing_bytes = hva_h264_get_stuffing_bytes(task);
+
+ if (stuffing_bytes)
+ hva_h264_fill_data_nal(pctx, stuffing_bytes,
+ (u8 *)stream->vaddr,
+ stream->size,
+ &stream->bytesused);
+
+ /* switch reference & reconstructed frame */
+ swap(ctx->ref_frame, ctx->rec_frame);
+
+ return 0;
+err:
+ stream->bytesused = 0;
+ return ret;
+}
+
+const struct hva_enc nv12h264enc = {
+ .name = "H264(NV12)",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .streamformat = V4L2_PIX_FMT_H264,
+ .max_width = H264_MAX_SIZE_W,
+ .max_height = H264_MAX_SIZE_H,
+ .open = hva_h264_open,
+ .close = hva_h264_close,
+ .encode = hva_h264_encode,
+};
+
+const struct hva_enc nv21h264enc = {
+ .name = "H264(NV21)",
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .streamformat = V4L2_PIX_FMT_H264,
+ .max_width = H264_MAX_SIZE_W,
+ .max_height = H264_MAX_SIZE_H,
+ .open = hva_h264_open,
+ .close = hva_h264_close,
+ .encode = hva_h264_encode,
+};
diff --git a/drivers/media/platform/st/sti/hva/hva-hw.c b/drivers/media/platform/st/sti/hva/hva-hw.c
new file mode 100644
index 000000000..fe4ea2e7f
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/hva-hw.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+#include <linux/seq_file.h>
+#endif
+
+#include "hva.h"
+#include "hva-hw.h"
+
+/* HVA register offsets */
+#define HVA_HIF_REG_RST 0x0100U
+#define HVA_HIF_REG_RST_ACK 0x0104U
+#define HVA_HIF_REG_MIF_CFG 0x0108U
+#define HVA_HIF_REG_HEC_MIF_CFG 0x010CU
+#define HVA_HIF_REG_CFL 0x0110U
+#define HVA_HIF_FIFO_CMD 0x0114U
+#define HVA_HIF_FIFO_STS 0x0118U
+#define HVA_HIF_REG_SFL 0x011CU
+#define HVA_HIF_REG_IT_ACK 0x0120U
+#define HVA_HIF_REG_ERR_IT_ACK 0x0124U
+#define HVA_HIF_REG_LMI_ERR 0x0128U
+#define HVA_HIF_REG_EMI_ERR 0x012CU
+#define HVA_HIF_REG_HEC_MIF_ERR 0x0130U
+#define HVA_HIF_REG_HEC_STS 0x0134U
+#define HVA_HIF_REG_HVC_STS 0x0138U
+#define HVA_HIF_REG_HJE_STS 0x013CU
+#define HVA_HIF_REG_CNT 0x0140U
+#define HVA_HIF_REG_HEC_CHKSYN_DIS 0x0144U
+#define HVA_HIF_REG_CLK_GATING 0x0148U
+#define HVA_HIF_REG_VERSION 0x014CU
+#define HVA_HIF_REG_BSM 0x0150U
+
+/* define value for version id register (HVA_HIF_REG_VERSION) */
+#define VERSION_ID_MASK 0x0000FFFF
+
+/* define values for BSM register (HVA_HIF_REG_BSM) */
+#define BSM_CFG_VAL1 0x0003F000
+#define BSM_CFG_VAL2 0x003F0000
+
+/* define values for memory interface register (HVA_HIF_REG_MIF_CFG) */
+#define MIF_CFG_VAL1 0x04460446
+#define MIF_CFG_VAL2 0x04460806
+#define MIF_CFG_VAL3 0x00000000
+
+/* define value for HEC memory interface register (HVA_HIF_REG_MIF_CFG) */
+#define HEC_MIF_CFG_VAL 0x000000C4
+
+/* Bits definition for clock gating register (HVA_HIF_REG_CLK_GATING) */
+#define CLK_GATING_HVC BIT(0)
+#define CLK_GATING_HEC BIT(1)
+#define CLK_GATING_HJE BIT(2)
+
+/* fix hva clock rate */
+#define CLK_RATE 300000000
+
+/* fix delay for pmruntime */
+#define AUTOSUSPEND_DELAY_MS 3
+
+/*
+ * hw encode error values
+ * NO_ERROR: Success, Task OK
+ * H264_BITSTREAM_OVERSIZE: VECH264 Bitstream size > bitstream buffer
+ * H264_FRAME_SKIPPED: VECH264 Frame skipped (refers to CPB Buffer Size)
+ * H264_SLICE_LIMIT_SIZE: VECH264 MB > slice limit size
+ * H264_MAX_SLICE_NUMBER: VECH264 max slice number reached
+ * H264_SLICE_READY: VECH264 Slice ready
+ * TASK_LIST_FULL: HVA/FPC task list full
+ (discard latest transform command)
+ * UNKNOWN_COMMAND: Transform command not known by HVA/FPC
+ * WRONG_CODEC_OR_RESOLUTION: Wrong Codec or Resolution Selection
+ * NO_INT_COMPLETION: Time-out on interrupt completion
+ * LMI_ERR: Local Memory Interface Error
+ * EMI_ERR: External Memory Interface Error
+ * HECMI_ERR: HEC Memory Interface Error
+ */
+enum hva_hw_error {
+ NO_ERROR = 0x0,
+ H264_BITSTREAM_OVERSIZE = 0x2,
+ H264_FRAME_SKIPPED = 0x4,
+ H264_SLICE_LIMIT_SIZE = 0x5,
+ H264_MAX_SLICE_NUMBER = 0x7,
+ H264_SLICE_READY = 0x8,
+ TASK_LIST_FULL = 0xF0,
+ UNKNOWN_COMMAND = 0xF1,
+ WRONG_CODEC_OR_RESOLUTION = 0xF4,
+ NO_INT_COMPLETION = 0x100,
+ LMI_ERR = 0x101,
+ EMI_ERR = 0x102,
+ HECMI_ERR = 0x103,
+};
+
+static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
+{
+ struct hva_dev *hva = data;
+
+ /* read status registers */
+ hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
+ hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
+
+ /* acknowledge interruption */
+ writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
+{
+ struct hva_dev *hva = arg;
+ struct device *dev = hva_to_dev(hva);
+ u32 status = hva->sts_reg & 0xFF;
+ u8 ctx_id = 0;
+ struct hva_ctx *ctx = NULL;
+
+ dev_dbg(dev, "%s %s: status: 0x%02x fifo level: 0x%02x\n",
+ HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
+
+ /*
+ * status: task_id[31:16] client_id[15:8] status[7:0]
+ * the context identifier is retrieved from the client identifier
+ */
+ ctx_id = (hva->sts_reg & 0xFF00) >> 8;
+ if (ctx_id >= HVA_MAX_INSTANCES) {
+ dev_err(dev, "%s %s: bad context identifier: %d\n",
+ HVA_PREFIX, __func__, ctx_id);
+ goto out;
+ }
+
+ ctx = hva->instances[ctx_id];
+ if (!ctx)
+ goto out;
+
+ switch (status) {
+ case NO_ERROR:
+ dev_dbg(dev, "%s %s: no error\n",
+ ctx->name, __func__);
+ ctx->hw_err = false;
+ break;
+ case H264_SLICE_READY:
+ dev_dbg(dev, "%s %s: h264 slice ready\n",
+ ctx->name, __func__);
+ ctx->hw_err = false;
+ break;
+ case H264_FRAME_SKIPPED:
+ dev_dbg(dev, "%s %s: h264 frame skipped\n",
+ ctx->name, __func__);
+ ctx->hw_err = false;
+ break;
+ case H264_BITSTREAM_OVERSIZE:
+ dev_err(dev, "%s %s:h264 bitstream oversize\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case H264_SLICE_LIMIT_SIZE:
+ dev_err(dev, "%s %s: h264 slice limit size is reached\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case H264_MAX_SLICE_NUMBER:
+ dev_err(dev, "%s %s: h264 max slice number is reached\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case TASK_LIST_FULL:
+ dev_err(dev, "%s %s:task list full\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case UNKNOWN_COMMAND:
+ dev_err(dev, "%s %s: command not known\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ case WRONG_CODEC_OR_RESOLUTION:
+ dev_err(dev, "%s %s: wrong codec or resolution\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ default:
+ dev_err(dev, "%s %s: status not recognized\n",
+ ctx->name, __func__);
+ ctx->hw_err = true;
+ break;
+ }
+out:
+ complete(&hva->interrupt);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
+{
+ struct hva_dev *hva = data;
+
+ /* read status registers */
+ hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
+ hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
+
+ /* read error registers */
+ hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
+ hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
+ hva->hec_mif_err_reg = readl_relaxed(hva->regs +
+ HVA_HIF_REG_HEC_MIF_ERR);
+
+ /* acknowledge interruption */
+ writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
+{
+ struct hva_dev *hva = arg;
+ struct device *dev = hva_to_dev(hva);
+ u8 ctx_id = 0;
+ struct hva_ctx *ctx;
+
+ dev_dbg(dev, "%s status: 0x%02x fifo level: 0x%02x\n",
+ HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
+
+ /*
+ * status: task_id[31:16] client_id[15:8] status[7:0]
+ * the context identifier is retrieved from the client identifier
+ */
+ ctx_id = (hva->sts_reg & 0xFF00) >> 8;
+ if (ctx_id >= HVA_MAX_INSTANCES) {
+ dev_err(dev, "%s bad context identifier: %d\n", HVA_PREFIX,
+ ctx_id);
+ goto out;
+ }
+
+ ctx = hva->instances[ctx_id];
+ if (!ctx)
+ goto out;
+
+ if (hva->lmi_err_reg) {
+ dev_err(dev, "%s local memory interface error: 0x%08x\n",
+ ctx->name, hva->lmi_err_reg);
+ ctx->hw_err = true;
+ }
+
+ if (hva->emi_err_reg) {
+ dev_err(dev, "%s external memory interface error: 0x%08x\n",
+ ctx->name, hva->emi_err_reg);
+ ctx->hw_err = true;
+ }
+
+ if (hva->hec_mif_err_reg) {
+ dev_err(dev, "%s hec memory interface error: 0x%08x\n",
+ ctx->name, hva->hec_mif_err_reg);
+ ctx->hw_err = true;
+ }
+out:
+ complete(&hva->interrupt);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
+{
+ struct device *dev = hva_to_dev(hva);
+ unsigned long int version;
+
+ if (pm_runtime_resume_and_get(dev) < 0) {
+ dev_err(dev, "%s failed to get pm_runtime\n", HVA_PREFIX);
+ mutex_unlock(&hva->protect_mutex);
+ return -EFAULT;
+ }
+
+ version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
+ VERSION_ID_MASK;
+
+ pm_runtime_put_autosuspend(dev);
+
+ switch (version) {
+ case HVA_VERSION_V400:
+ dev_dbg(dev, "%s IP hardware version 0x%lx\n",
+ HVA_PREFIX, version);
+ break;
+ default:
+ dev_err(dev, "%s unknown IP hardware version 0x%lx\n",
+ HVA_PREFIX, version);
+ version = HVA_VERSION_UNKNOWN;
+ break;
+ }
+
+ return version;
+}
+
+int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *esram;
+ int ret;
+
+ WARN_ON(!hva);
+
+ /* get memory for registers */
+ hva->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hva->regs)) {
+ dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
+ return PTR_ERR(hva->regs);
+ }
+
+ /* get memory for esram */
+ esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!esram) {
+ dev_err(dev, "%s failed to get esram\n", HVA_PREFIX);
+ return -ENODEV;
+ }
+ hva->esram_addr = esram->start;
+ hva->esram_size = resource_size(esram);
+
+ dev_info(dev, "%s esram reserved for address: 0x%x size:%d\n",
+ HVA_PREFIX, hva->esram_addr, hva->esram_size);
+
+ /* get clock resource */
+ hva->clk = devm_clk_get(dev, "clk_hva");
+ if (IS_ERR(hva->clk)) {
+ dev_err(dev, "%s failed to get clock\n", HVA_PREFIX);
+ return PTR_ERR(hva->clk);
+ }
+
+ ret = clk_prepare(hva->clk);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to prepare clock\n", HVA_PREFIX);
+ hva->clk = ERR_PTR(-EINVAL);
+ return ret;
+ }
+
+ /* get status interruption resource */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_clk;
+ hva->irq_its = ret;
+
+ ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
+ hva_hw_its_irq_thread,
+ IRQF_ONESHOT,
+ "hva_its_irq", hva);
+ if (ret) {
+ dev_err(dev, "%s failed to install status IRQ 0x%x\n",
+ HVA_PREFIX, hva->irq_its);
+ goto err_clk;
+ }
+ disable_irq(hva->irq_its);
+
+ /* get error interruption resource */
+ ret = platform_get_irq(pdev, 1);
+ if (ret < 0)
+ goto err_clk;
+ hva->irq_err = ret;
+
+ ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
+ hva_hw_err_irq_thread,
+ IRQF_ONESHOT,
+ "hva_err_irq", hva);
+ if (ret) {
+ dev_err(dev, "%s failed to install error IRQ 0x%x\n",
+ HVA_PREFIX, hva->irq_err);
+ goto err_clk;
+ }
+ disable_irq(hva->irq_err);
+
+ /* initialise protection mutex */
+ mutex_init(&hva->protect_mutex);
+
+ /* initialise completion signal */
+ init_completion(&hva->interrupt);
+
+ /* initialise runtime power management */
+ pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "%s failed to set PM\n", HVA_PREFIX);
+ goto err_disable;
+ }
+
+ /* check IP hardware version */
+ hva->ip_version = hva_hw_get_ip_version(hva);
+
+ if (hva->ip_version == HVA_VERSION_UNKNOWN) {
+ ret = -EINVAL;
+ goto err_pm;
+ }
+
+ dev_info(dev, "%s found hva device (version 0x%lx)\n", HVA_PREFIX,
+ hva->ip_version);
+
+ return 0;
+
+err_pm:
+ pm_runtime_put(dev);
+err_disable:
+ pm_runtime_disable(dev);
+err_clk:
+ if (hva->clk)
+ clk_unprepare(hva->clk);
+
+ return ret;
+}
+
+void hva_hw_remove(struct hva_dev *hva)
+{
+ struct device *dev = hva_to_dev(hva);
+
+ disable_irq(hva->irq_its);
+ disable_irq(hva->irq_err);
+
+ pm_runtime_put_autosuspend(dev);
+ pm_runtime_disable(dev);
+}
+
+int hva_hw_runtime_suspend(struct device *dev)
+{
+ struct hva_dev *hva = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(hva->clk);
+
+ return 0;
+}
+
+int hva_hw_runtime_resume(struct device *dev)
+{
+ struct hva_dev *hva = dev_get_drvdata(dev);
+
+ if (clk_prepare_enable(hva->clk)) {
+ dev_err(hva->dev, "%s failed to prepare hva clk\n",
+ HVA_PREFIX);
+ return -EINVAL;
+ }
+
+ if (clk_set_rate(hva->clk, CLK_RATE)) {
+ dev_err(dev, "%s failed to set clock frequency\n",
+ HVA_PREFIX);
+ clk_disable_unprepare(hva->clk);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
+ struct hva_buffer *task)
+{
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = hva_to_dev(hva);
+ u8 client_id = ctx->id;
+ int ret;
+ u32 reg = 0;
+ bool got_pm = false;
+
+ mutex_lock(&hva->protect_mutex);
+
+ /* enable irqs */
+ enable_irq(hva->irq_its);
+ enable_irq(hva->irq_err);
+
+ if (pm_runtime_resume_and_get(dev) < 0) {
+ dev_err(dev, "%s failed to get pm_runtime\n", ctx->name);
+ ctx->sys_errors++;
+ ret = -EFAULT;
+ goto out;
+ }
+ got_pm = true;
+
+ reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
+ switch (cmd) {
+ case H264_ENC:
+ reg |= CLK_GATING_HVC;
+ break;
+ default:
+ dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
+ ctx->encode_errors++;
+ ret = -EFAULT;
+ goto out;
+ }
+ writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
+
+ dev_dbg(dev, "%s %s: write configuration registers\n", ctx->name,
+ __func__);
+
+ /* byte swap config */
+ writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
+
+ /* define Max Opcode Size and Max Message Size for LMI and EMI */
+ writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
+ writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
+
+ /*
+ * command FIFO: task_id[31:16] client_id[15:8] command_type[7:0]
+ * the context identifier is provided as client identifier to the
+ * hardware, and is retrieved in the interrupt functions from the
+ * status register
+ */
+ dev_dbg(dev, "%s %s: send task (cmd: %d, task_desc: %pad)\n",
+ ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
+ writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
+ writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
+
+ if (!wait_for_completion_timeout(&hva->interrupt,
+ msecs_to_jiffies(2000))) {
+ dev_err(dev, "%s %s: time out on completion\n", ctx->name,
+ __func__);
+ ctx->encode_errors++;
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* get encoding status */
+ ret = ctx->hw_err ? -EFAULT : 0;
+
+ ctx->encode_errors += ctx->hw_err ? 1 : 0;
+
+out:
+ disable_irq(hva->irq_its);
+ disable_irq(hva->irq_err);
+
+ switch (cmd) {
+ case H264_ENC:
+ reg &= ~CLK_GATING_HVC;
+ writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
+ break;
+ default:
+ dev_dbg(dev, "%s unknown command 0x%x\n", ctx->name, cmd);
+ }
+
+ if (got_pm)
+ pm_runtime_put_autosuspend(dev);
+ mutex_unlock(&hva->protect_mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+#define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
+ #reg, readl_relaxed(hva->regs + reg))
+
+void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
+{
+ struct device *dev = hva_to_dev(hva);
+
+ mutex_lock(&hva->protect_mutex);
+
+ if (pm_runtime_resume_and_get(dev) < 0) {
+ seq_puts(s, "Cannot wake up IP\n");
+ mutex_unlock(&hva->protect_mutex);
+ return;
+ }
+
+ seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
+
+ DUMP(HVA_HIF_REG_RST);
+ DUMP(HVA_HIF_REG_RST_ACK);
+ DUMP(HVA_HIF_REG_MIF_CFG);
+ DUMP(HVA_HIF_REG_HEC_MIF_CFG);
+ DUMP(HVA_HIF_REG_CFL);
+ DUMP(HVA_HIF_REG_SFL);
+ DUMP(HVA_HIF_REG_LMI_ERR);
+ DUMP(HVA_HIF_REG_EMI_ERR);
+ DUMP(HVA_HIF_REG_HEC_MIF_ERR);
+ DUMP(HVA_HIF_REG_HEC_STS);
+ DUMP(HVA_HIF_REG_HVC_STS);
+ DUMP(HVA_HIF_REG_HJE_STS);
+ DUMP(HVA_HIF_REG_CNT);
+ DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
+ DUMP(HVA_HIF_REG_CLK_GATING);
+ DUMP(HVA_HIF_REG_VERSION);
+
+ pm_runtime_put_autosuspend(dev);
+ mutex_unlock(&hva->protect_mutex);
+}
+#endif
diff --git a/drivers/media/platform/st/sti/hva/hva-hw.h b/drivers/media/platform/st/sti/hva/hva-hw.h
new file mode 100644
index 000000000..b29899026
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/hva-hw.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#ifndef HVA_HW_H
+#define HVA_HW_H
+
+#include "hva-mem.h"
+
+/* HVA Versions */
+#define HVA_VERSION_UNKNOWN 0x000
+#define HVA_VERSION_V400 0x400
+
+/* HVA command types */
+enum hva_hw_cmd_type {
+ /* RESERVED = 0x00 */
+ /* RESERVED = 0x01 */
+ H264_ENC = 0x02,
+ /* RESERVED = 0x03 */
+ /* RESERVED = 0x04 */
+ /* RESERVED = 0x05 */
+ /* RESERVED = 0x06 */
+ /* RESERVED = 0x07 */
+ REMOVE_CLIENT = 0x08,
+ FREEZE_CLIENT = 0x09,
+ START_CLIENT = 0x0A,
+ FREEZE_ALL = 0x0B,
+ START_ALL = 0x0C,
+ REMOVE_ALL = 0x0D
+};
+
+int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva);
+void hva_hw_remove(struct hva_dev *hva);
+int hva_hw_runtime_suspend(struct device *dev);
+int hva_hw_runtime_resume(struct device *dev);
+int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
+ struct hva_buffer *task);
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s);
+#endif
+
+#endif /* HVA_HW_H */
diff --git a/drivers/media/platform/st/sti/hva/hva-mem.c b/drivers/media/platform/st/sti/hva/hva-mem.c
new file mode 100644
index 000000000..68047b60b
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/hva-mem.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include "hva.h"
+#include "hva-mem.h"
+
+int hva_mem_alloc(struct hva_ctx *ctx, u32 size, const char *name,
+ struct hva_buffer **buf)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ struct hva_buffer *b;
+ dma_addr_t paddr;
+ void *base;
+
+ b = devm_kzalloc(dev, sizeof(*b), GFP_KERNEL);
+ if (!b) {
+ ctx->sys_errors++;
+ return -ENOMEM;
+ }
+
+ base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+ if (!base) {
+ dev_err(dev, "%s %s : dma_alloc_attrs failed for %s (size=%d)\n",
+ ctx->name, __func__, name, size);
+ ctx->sys_errors++;
+ devm_kfree(dev, b);
+ return -ENOMEM;
+ }
+
+ b->size = size;
+ b->paddr = paddr;
+ b->vaddr = base;
+ b->name = name;
+
+ dev_dbg(dev,
+ "%s allocate %d bytes of HW memory @(virt=%p, phy=%pad): %s\n",
+ ctx->name, size, b->vaddr, &b->paddr, b->name);
+
+ /* return hva buffer to user */
+ *buf = b;
+
+ return 0;
+}
+
+void hva_mem_free(struct hva_ctx *ctx, struct hva_buffer *buf)
+{
+ struct device *dev = ctx_to_dev(ctx);
+
+ dev_dbg(dev,
+ "%s free %d bytes of HW memory @(virt=%p, phy=%pad): %s\n",
+ ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);
+
+ dma_free_attrs(dev, buf->size, buf->vaddr, buf->paddr,
+ DMA_ATTR_WRITE_COMBINE);
+
+ devm_kfree(dev, buf);
+}
diff --git a/drivers/media/platform/st/sti/hva/hva-mem.h b/drivers/media/platform/st/sti/hva/hva-mem.h
new file mode 100644
index 000000000..fec549dff
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/hva-mem.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#ifndef HVA_MEM_H
+#define HVA_MEM_H
+
+/**
+ * struct hva_buffer - hva buffer
+ *
+ * @name: name of requester
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @size: size of buffer
+ */
+struct hva_buffer {
+ const char *name;
+ dma_addr_t paddr;
+ void *vaddr;
+ u32 size;
+};
+
+int hva_mem_alloc(struct hva_ctx *ctx,
+ __u32 size,
+ const char *name,
+ struct hva_buffer **buf);
+
+void hva_mem_free(struct hva_ctx *ctx,
+ struct hva_buffer *buf);
+
+#endif /* HVA_MEM_H */
diff --git a/drivers/media/platform/st/sti/hva/hva-v4l2.c b/drivers/media/platform/st/sti/hva/hva-v4l2.c
new file mode 100644
index 000000000..bb34d6997
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/hva-v4l2.c
@@ -0,0 +1,1476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "hva.h"
+#include "hva-hw.h"
+
+#define MIN_FRAMES 1
+#define MIN_STREAMS 1
+
+#define HVA_MIN_WIDTH 32
+#define HVA_MAX_WIDTH 1920
+#define HVA_MIN_HEIGHT 32
+#define HVA_MAX_HEIGHT 1920
+
+/* HVA requires a 16x16 pixels alignment for frames */
+#define HVA_WIDTH_ALIGNMENT 16
+#define HVA_HEIGHT_ALIGNMENT 16
+
+#define HVA_DEFAULT_WIDTH HVA_MIN_WIDTH
+#define HVA_DEFAULT_HEIGHT HVA_MIN_HEIGHT
+#define HVA_DEFAULT_FRAME_NUM 1
+#define HVA_DEFAULT_FRAME_DEN 30
+
+#define to_type_str(type) (type == V4L2_BUF_TYPE_VIDEO_OUTPUT ? \
+ "frame" : "stream")
+
+#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
+
+/* registry of available encoders */
+static const struct hva_enc *hva_encoders[] = {
+ &nv12h264enc,
+ &nv21h264enc,
+};
+
+static inline int frame_size(u32 w, u32 h, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ return (w * h * 3) / 2;
+ default:
+ return 0;
+ }
+}
+
+static inline int frame_stride(u32 w, u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ return w;
+ default:
+ return 0;
+ }
+}
+
+static inline int frame_alignment(u32 fmt)
+{
+ switch (fmt) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ /* multiple of 2 */
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static inline int estimated_stream_size(u32 w, u32 h)
+{
+ /*
+ * HVA only encodes in YUV420 format, whatever the frame format.
+ * A compression ratio of 2 is assumed: thus, the maximum size
+ * of a stream is estimated to ((width x height x 3 / 2) / 2)
+ */
+ return (w * h * 3) / 4;
+}
+
+static void set_default_params(struct hva_ctx *ctx)
+{
+ struct hva_frameinfo *frameinfo = &ctx->frameinfo;
+ struct hva_streaminfo *streaminfo = &ctx->streaminfo;
+
+ frameinfo->pixelformat = V4L2_PIX_FMT_NV12;
+ frameinfo->width = HVA_DEFAULT_WIDTH;
+ frameinfo->height = HVA_DEFAULT_HEIGHT;
+ frameinfo->aligned_width = ALIGN(frameinfo->width,
+ HVA_WIDTH_ALIGNMENT);
+ frameinfo->aligned_height = ALIGN(frameinfo->height,
+ HVA_HEIGHT_ALIGNMENT);
+ frameinfo->size = frame_size(frameinfo->aligned_width,
+ frameinfo->aligned_height,
+ frameinfo->pixelformat);
+
+ streaminfo->streamformat = V4L2_PIX_FMT_H264;
+ streaminfo->width = HVA_DEFAULT_WIDTH;
+ streaminfo->height = HVA_DEFAULT_HEIGHT;
+
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ ctx->max_stream_size = estimated_stream_size(streaminfo->width,
+ streaminfo->height);
+}
+
+static const struct hva_enc *hva_find_encoder(struct hva_ctx *ctx,
+ u32 pixelformat,
+ u32 streamformat)
+{
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ const struct hva_enc *enc;
+ unsigned int i;
+
+ for (i = 0; i < hva->nb_of_encoders; i++) {
+ enc = hva->encoders[i];
+ if ((enc->pixelformat == pixelformat) &&
+ (enc->streamformat == streamformat))
+ return enc;
+ }
+
+ return NULL;
+}
+
+static void register_format(u32 format, u32 formats[], u32 *nb_of_formats)
+{
+ u32 i;
+ bool found = false;
+
+ for (i = 0; i < *nb_of_formats; i++) {
+ if (format == formats[i]) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ formats[(*nb_of_formats)++] = format;
+}
+
+static void register_formats(struct hva_dev *hva)
+{
+ unsigned int i;
+
+ for (i = 0; i < hva->nb_of_encoders; i++) {
+ register_format(hva->encoders[i]->pixelformat,
+ hva->pixelformats,
+ &hva->nb_of_pixelformats);
+
+ register_format(hva->encoders[i]->streamformat,
+ hva->streamformats,
+ &hva->nb_of_streamformats);
+ }
+}
+
+static void register_encoders(struct hva_dev *hva)
+{
+ struct device *dev = hva_to_dev(hva);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hva_encoders); i++) {
+ if (hva->nb_of_encoders >= HVA_MAX_ENCODERS) {
+ dev_dbg(dev,
+ "%s failed to register %s encoder (%d maximum reached)\n",
+ HVA_PREFIX, hva_encoders[i]->name,
+ HVA_MAX_ENCODERS);
+ return;
+ }
+
+ hva->encoders[hva->nb_of_encoders++] = hva_encoders[i];
+ dev_info(dev, "%s %s encoder registered\n", HVA_PREFIX,
+ hva_encoders[i]->name);
+ }
+}
+
+static int hva_open_encoder(struct hva_ctx *ctx, u32 streamformat,
+ u32 pixelformat, struct hva_enc **penc)
+{
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ struct hva_enc *enc;
+ int ret;
+
+ /* find an encoder which can deal with these formats */
+ enc = (struct hva_enc *)hva_find_encoder(ctx, pixelformat,
+ streamformat);
+ if (!enc) {
+ dev_err(dev, "%s no encoder found matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&pixelformat, (char *)&streamformat);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s one encoder matching %4.4s => %4.4s\n",
+ ctx->name, (char *)&pixelformat, (char *)&streamformat);
+
+ /* update instance name */
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:%4.4s]",
+ hva->instance_id, (char *)&streamformat);
+
+ /* open encoder instance */
+ ret = enc->open(ctx);
+ if (ret) {
+ dev_err(dev, "%s failed to open encoder instance (%d)\n",
+ ctx->name, ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s %s encoder opened\n", ctx->name, enc->name);
+
+ *penc = enc;
+
+ return ret;
+}
+
+static void hva_dbg_summary(struct hva_ctx *ctx)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ struct hva_streaminfo *stream = &ctx->streaminfo;
+ struct hva_frameinfo *frame = &ctx->frameinfo;
+
+ if (!(ctx->flags & HVA_FLAG_STREAMINFO))
+ return;
+
+ dev_dbg(dev, "%s %4.4s %dx%d > %4.4s %dx%d %s %s: %d frames encoded, %d system errors, %d encoding errors, %d frame errors\n",
+ ctx->name,
+ (char *)&frame->pixelformat,
+ frame->aligned_width, frame->aligned_height,
+ (char *)&stream->streamformat,
+ stream->width, stream->height,
+ stream->profile, stream->level,
+ ctx->encoded_frames,
+ ctx->sys_errors,
+ ctx->encode_errors,
+ ctx->frame_errors);
+}
+
+/*
+ * V4L2 ioctl operations
+ */
+
+static int hva_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ strscpy(cap->driver, HVA_NAME, sizeof(cap->driver));
+ strscpy(cap->card, hva->vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ hva->pdev->name);
+
+ return 0;
+}
+
+static int hva_enum_fmt_stream(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ if (unlikely(f->index >= hva->nb_of_streamformats))
+ return -EINVAL;
+
+ f->pixelformat = hva->streamformats[f->index];
+
+ return 0;
+}
+
+static int hva_enum_fmt_frame(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ if (unlikely(f->index >= hva->nb_of_pixelformats))
+ return -EINVAL;
+
+ f->pixelformat = hva->pixelformats[f->index];
+
+ return 0;
+}
+
+static int hva_g_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_streaminfo *streaminfo = &ctx->streaminfo;
+
+ f->fmt.pix.width = streaminfo->width;
+ f->fmt.pix.height = streaminfo->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
+ f->fmt.pix.pixelformat = streaminfo->streamformat;
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = ctx->max_stream_size;
+
+ return 0;
+}
+
+static int hva_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_frameinfo *frameinfo = &ctx->frameinfo;
+
+ f->fmt.pix.width = frameinfo->width;
+ f->fmt.pix.height = frameinfo->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
+ f->fmt.pix.pixelformat = frameinfo->pixelformat;
+ f->fmt.pix.bytesperline = frame_stride(frameinfo->aligned_width,
+ frameinfo->pixelformat);
+ f->fmt.pix.sizeimage = frameinfo->size;
+
+ return 0;
+}
+
+static int hva_try_fmt_stream(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 streamformat = pix->pixelformat;
+ const struct hva_enc *enc;
+ u32 width, height;
+ u32 stream_size;
+
+ enc = hva_find_encoder(ctx, ctx->frameinfo.pixelformat, streamformat);
+ if (!enc) {
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (CAPTURE): unsupported format %.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return -EINVAL;
+ }
+
+ width = pix->width;
+ height = pix->height;
+ if (ctx->flags & HVA_FLAG_FRAMEINFO) {
+ /*
+ * if the frame resolution is already fixed, only allow the
+ * same stream resolution
+ */
+ pix->width = ctx->frameinfo.width;
+ pix->height = ctx->frameinfo.height;
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit frame resolution\n",
+ ctx->name, width, height,
+ pix->width, pix->height);
+ } else {
+ /* adjust width & height */
+ v4l_bound_align_image(&pix->width,
+ HVA_MIN_WIDTH, enc->max_width,
+ 0,
+ &pix->height,
+ HVA_MIN_HEIGHT, enc->max_height,
+ 0,
+ 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (CAPTURE): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height,
+ pix->width, pix->height);
+ }
+
+ stream_size = estimated_stream_size(pix->width, pix->height);
+ if (pix->sizeimage < stream_size)
+ pix->sizeimage = stream_size;
+
+ pix->bytesperline = 0;
+ pix->colorspace = ctx->colorspace;
+ pix->xfer_func = ctx->xfer_func;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int hva_try_fmt_frame(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ u32 pixelformat = pix->pixelformat;
+ const struct hva_enc *enc;
+ u32 width, height;
+
+ enc = hva_find_encoder(ctx, pixelformat, ctx->streaminfo.streamformat);
+ if (!enc) {
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (OUTPUT): unsupported format %.4s\n",
+ ctx->name, (char *)&pixelformat);
+ return -EINVAL;
+ }
+
+ /* adjust width & height */
+ width = pix->width;
+ height = pix->height;
+ v4l_bound_align_image(&pix->width,
+ HVA_MIN_WIDTH, HVA_MAX_WIDTH,
+ frame_alignment(pixelformat) - 1,
+ &pix->height,
+ HVA_MIN_HEIGHT, HVA_MAX_HEIGHT,
+ frame_alignment(pixelformat) - 1,
+ 0);
+
+ if ((pix->width != width) || (pix->height != height))
+ dev_dbg(dev,
+ "%s V4L2 TRY_FMT (OUTPUT): resolution updated %dx%d -> %dx%d to fit min/max/alignment\n",
+ ctx->name, width, height, pix->width, pix->height);
+
+ width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
+ height = ALIGN(pix->height, HVA_HEIGHT_ALIGNMENT);
+
+ if (!pix->colorspace) {
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ }
+
+ pix->bytesperline = frame_stride(width, pixelformat);
+ pix->sizeimage = frame_size(width, height, pixelformat);
+ pix->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = hva_try_fmt_stream(file, fh, f);
+ if (ret) {
+ dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): unsupported format %.4s\n",
+ ctx->name, (char *)&f->fmt.pix.pixelformat);
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(dev, "%s V4L2 S_FMT (CAPTURE): queue busy\n",
+ ctx->name);
+ return -EBUSY;
+ }
+
+ ctx->max_stream_size = f->fmt.pix.sizeimage;
+ ctx->streaminfo.width = f->fmt.pix.width;
+ ctx->streaminfo.height = f->fmt.pix.height;
+ ctx->streaminfo.streamformat = f->fmt.pix.pixelformat;
+ ctx->flags |= HVA_FLAG_STREAMINFO;
+
+ return 0;
+}
+
+static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = hva_try_fmt_frame(file, fh, f);
+ if (ret) {
+ dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): unsupported format %.4s\n",
+ ctx->name, (char *)&pix->pixelformat);
+ return ret;
+ }
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq)) {
+ dev_dbg(dev, "%s V4L2 S_FMT (OUTPUT): queue busy\n", ctx->name);
+ return -EBUSY;
+ }
+
+ ctx->colorspace = pix->colorspace;
+ ctx->xfer_func = pix->xfer_func;
+ ctx->ycbcr_enc = pix->ycbcr_enc;
+ ctx->quantization = pix->quantization;
+
+ ctx->frameinfo.aligned_width = ALIGN(pix->width, HVA_WIDTH_ALIGNMENT);
+ ctx->frameinfo.aligned_height = ALIGN(pix->height,
+ HVA_HEIGHT_ALIGNMENT);
+ ctx->frameinfo.size = pix->sizeimage;
+ ctx->frameinfo.pixelformat = pix->pixelformat;
+ ctx->frameinfo.width = pix->width;
+ ctx->frameinfo.height = pix->height;
+ ctx->flags |= HVA_FLAG_FRAMEINFO;
+
+ return 0;
+}
+
+static int hva_g_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ sp->parm.output.timeperframe.numerator = time_per_frame->numerator;
+ sp->parm.output.timeperframe.denominator =
+ time_per_frame->denominator;
+
+ return 0;
+}
+
+static int hva_s_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (!sp->parm.output.timeperframe.numerator ||
+ !sp->parm.output.timeperframe.denominator)
+ return hva_g_parm(file, fh, sp);
+
+ sp->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ time_per_frame->numerator = sp->parm.output.timeperframe.numerator;
+ time_per_frame->denominator =
+ sp->parm.output.timeperframe.denominator;
+
+ return 0;
+}
+
+static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct device *dev = ctx_to_dev(ctx);
+
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ /*
+ * depending on the targeted compressed video format, the
+ * capture buffer might contain headers (e.g. H.264 SPS/PPS)
+ * filled in by the driver client; the size of these data is
+ * copied from the bytesused field of the V4L2 buffer in the
+ * payload field of the hva stream buffer
+ */
+ struct vb2_queue *vq;
+ struct hva_stream *stream;
+ struct vb2_buffer *vb2_buf;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, buf->type);
+
+ if (buf->index >= vq->num_buffers) {
+ dev_dbg(dev, "%s buffer index %d out of range (%d)\n",
+ ctx->name, buf->index, vq->num_buffers);
+ return -EINVAL;
+ }
+
+ vb2_buf = vb2_get_buffer(vq, buf->index);
+ stream = to_hva_stream(to_vb2_v4l2_buffer(vb2_buf));
+ stream->bytesused = buf->bytesused;
+ }
+
+ return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
+}
+
+/* V4L2 ioctl ops */
+static const struct v4l2_ioctl_ops hva_ioctl_ops = {
+ .vidioc_querycap = hva_querycap,
+ .vidioc_enum_fmt_vid_cap = hva_enum_fmt_stream,
+ .vidioc_enum_fmt_vid_out = hva_enum_fmt_frame,
+ .vidioc_g_fmt_vid_cap = hva_g_fmt_stream,
+ .vidioc_g_fmt_vid_out = hva_g_fmt_frame,
+ .vidioc_try_fmt_vid_cap = hva_try_fmt_stream,
+ .vidioc_try_fmt_vid_out = hva_try_fmt_frame,
+ .vidioc_s_fmt_vid_cap = hva_s_fmt_stream,
+ .vidioc_s_fmt_vid_out = hva_s_fmt_frame,
+ .vidioc_g_parm = hva_g_parm,
+ .vidioc_s_parm = hva_s_parm,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = hva_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * V4L2 control operations
+ */
+
+static int hva_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hva_ctx *ctx = container_of(ctrl->handler, struct hva_ctx,
+ ctrl_handler);
+ struct device *dev = ctx_to_dev(ctx);
+
+ dev_dbg(dev, "%s S_CTRL: id = %d, val = %d\n", ctx->name,
+ ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ ctx->ctrls.bitrate_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctx->ctrls.gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctx->ctrls.bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ ctx->ctrls.aspect = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ ctx->ctrls.profile = ctrl->val;
+ snprintf(ctx->streaminfo.profile,
+ sizeof(ctx->streaminfo.profile),
+ "%s profile",
+ v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ ctx->ctrls.level = ctrl->val;
+ snprintf(ctx->streaminfo.level,
+ sizeof(ctx->streaminfo.level),
+ "level %s",
+ v4l2_ctrl_get_menu(ctrl->id)[ctrl->val]);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ ctx->ctrls.entropy_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+ ctx->ctrls.cpb_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ ctx->ctrls.dct8x8 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ ctx->ctrls.qpmin = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ ctx->ctrls.qpmax = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ ctx->ctrls.vui_sar = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ ctx->ctrls.vui_sar_idc = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING:
+ ctx->ctrls.sei_fp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ ctx->ctrls.sei_fp_type = ctrl->val;
+ break;
+ default:
+ dev_dbg(dev, "%s S_CTRL: invalid control (id = %d)\n",
+ ctx->name, ctrl->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* V4L2 control ops */
+static const struct v4l2_ctrl_ops hva_ctrl_ops = {
+ .s_ctrl = hva_s_ctrl,
+};
+
+static int hva_ctrls_setup(struct hva_ctx *ctx)
+{
+ struct device *dev = ctx_to_dev(ctx);
+ u64 mask;
+ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type sei_fp_type =
+ V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 15);
+
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ 0,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 1, 60, 1, 16);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE,
+ 1000, 60000000, 1000, 20000000);
+
+ mask = ~(1 << V4L2_MPEG_VIDEO_ASPECT_1x1);
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_ASPECT,
+ V4L2_MPEG_VIDEO_ASPECT_1x1,
+ mask,
+ V4L2_MPEG_VIDEO_ASPECT_1x1);
+
+ mask = ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH));
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH,
+ mask,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+ 0,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ 0,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+ 1, 10000, 1, 3000);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+ 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ 0, 51, 1, 5);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ 0, 51, 1, 51);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
+ 0, 1, 1, 1);
+
+ mask = ~(1 << V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1,
+ mask,
+ V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING,
+ 0, 1, 1, 0);
+
+ mask = ~(1 << sei_fp_type);
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &hva_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE,
+ sei_fp_type,
+ mask,
+ sei_fp_type);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ dev_dbg(dev, "%s controls setup failed (%d)\n",
+ ctx->name, err);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ /* set default time per frame */
+ ctx->ctrls.time_per_frame.numerator = HVA_DEFAULT_FRAME_NUM;
+ ctx->ctrls.time_per_frame.denominator = HVA_DEFAULT_FRAME_DEN;
+
+ return 0;
+}
+
+/*
+ * mem-to-mem operations
+ */
+
+static void hva_run_work(struct work_struct *work)
+{
+ struct hva_ctx *ctx = container_of(work, struct hva_ctx, run_work);
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct hva_enc *enc = ctx->enc;
+ struct hva_frame *frame;
+ struct hva_stream *stream;
+ int ret;
+
+ /* protect instance against reentrancy */
+ mutex_lock(&ctx->lock);
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_perf_begin(ctx);
+#endif
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ frame = to_hva_frame(src_buf);
+ stream = to_hva_stream(dst_buf);
+ frame->vbuf.sequence = ctx->frame_num++;
+
+ ret = enc->encode(ctx, frame, stream);
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, stream->bytesused);
+ if (ret) {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ /* propagate frame timestamp */
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->field = V4L2_FIELD_NONE;
+ dst_buf->sequence = ctx->stream_num - 1;
+
+ ctx->encoded_frames++;
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_perf_end(ctx, stream);
+#endif
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ v4l2_m2m_job_finish(ctx->hva_dev->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+static void hva_device_run(void *priv)
+{
+ struct hva_ctx *ctx = priv;
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+
+ queue_work(hva->work_queue, &ctx->run_work);
+}
+
+static void hva_job_abort(void *priv)
+{
+ struct hva_ctx *ctx = priv;
+ struct device *dev = ctx_to_dev(ctx);
+
+ dev_dbg(dev, "%s aborting job\n", ctx->name);
+
+ ctx->aborting = true;
+}
+
+static int hva_job_ready(void *priv)
+{
+ struct hva_ctx *ctx = priv;
+ struct device *dev = ctx_to_dev(ctx);
+
+ if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
+ dev_dbg(dev, "%s job not ready: no frame buffers\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
+ dev_dbg(dev, "%s job not ready: no stream buffers\n",
+ ctx->name);
+ return 0;
+ }
+
+ if (ctx->aborting) {
+ dev_dbg(dev, "%s job not ready: aborting\n", ctx->name);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* mem-to-mem ops */
+static const struct v4l2_m2m_ops hva_m2m_ops = {
+ .device_run = hva_device_run,
+ .job_abort = hva_job_abort,
+ .job_ready = hva_job_ready,
+};
+
+/*
+ * VB2 queue operations
+ */
+
+static int hva_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+ struct device *dev = ctx_to_dev(ctx);
+ unsigned int size;
+
+ dev_dbg(dev, "%s %s queue setup: num_buffers %d\n", ctx->name,
+ to_type_str(vq->type), *num_buffers);
+
+ size = vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ?
+ ctx->frameinfo.size : ctx->max_stream_size;
+
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ /* only one plane supported */
+ *num_planes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int hva_buf_prepare(struct vb2_buffer *vb)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = ctx_to_dev(ctx);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ struct hva_frame *frame = to_hva_frame(vbuf);
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_dbg(dev,
+ "%s frame[%d] prepare: %d field not supported\n",
+ ctx->name, vb->index, vbuf->field);
+ return -EINVAL;
+ }
+
+ if (!frame->prepared) {
+ /* get memory addresses */
+ frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ frame->paddr = vb2_dma_contig_plane_dma_addr(
+ &vbuf->vb2_buf, 0);
+ frame->info = ctx->frameinfo;
+ frame->prepared = true;
+
+ dev_dbg(dev,
+ "%s frame[%d] prepared; virt=%p, phy=%pad\n",
+ ctx->name, vb->index,
+ frame->vaddr, &frame->paddr);
+ }
+ } else {
+ struct hva_stream *stream = to_hva_stream(vbuf);
+
+ if (!stream->prepared) {
+ /* get memory addresses */
+ stream->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0);
+ stream->paddr = vb2_dma_contig_plane_dma_addr(
+ &vbuf->vb2_buf, 0);
+ stream->size = vb2_plane_size(&vbuf->vb2_buf, 0);
+ stream->prepared = true;
+
+ dev_dbg(dev,
+ "%s stream[%d] prepared; virt=%p, phy=%pad\n",
+ ctx->name, vb->index,
+ stream->vaddr, &stream->paddr);
+ }
+ }
+
+ return 0;
+}
+
+static void hva_buf_queue(struct vb2_buffer *vb)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (ctx->fh.m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int hva_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ struct vb2_v4l2_buffer *vbuf;
+ int ret;
+ unsigned int i;
+ bool found = false;
+
+ dev_dbg(dev, "%s %s start streaming\n", ctx->name,
+ to_type_str(vq->type));
+
+ /* open encoder when both start_streaming have been called */
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->cap_q_ctx.q))
+ return 0;
+ } else {
+ if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->out_q_ctx.q))
+ return 0;
+ }
+
+ /* store the instance context in the instances array */
+ for (i = 0; i < HVA_MAX_INSTANCES; i++) {
+ if (!hva->instances[i]) {
+ hva->instances[i] = ctx;
+ /* save the context identifier in the context */
+ ctx->id = i;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(dev, "%s maximum instances reached\n", ctx->name);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hva->nb_of_instances++;
+
+ if (!ctx->enc) {
+ ret = hva_open_encoder(ctx,
+ ctx->streaminfo.streamformat,
+ ctx->frameinfo.pixelformat,
+ &ctx->enc);
+ if (ret < 0)
+ goto err_ctx;
+ }
+
+ return 0;
+
+err_ctx:
+ hva->instances[ctx->id] = NULL;
+ hva->nb_of_instances--;
+err:
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /* return of all pending buffers to vb2 (in queued state) */
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+ } else {
+ /* return of all pending buffers to vb2 (in queued state) */
+ while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_QUEUED);
+ }
+
+ ctx->sys_errors++;
+
+ return ret;
+}
+
+static void hva_stop_streaming(struct vb2_queue *vq)
+{
+ struct hva_ctx *ctx = vb2_get_drv_priv(vq);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ const struct hva_enc *enc = ctx->enc;
+ struct vb2_v4l2_buffer *vbuf;
+
+ dev_dbg(dev, "%s %s stop streaming\n", ctx->name,
+ to_type_str(vq->type));
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /* return of all pending buffers to vb2 (in error state) */
+ ctx->frame_num = 0;
+ while ((vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ } else {
+ /* return of all pending buffers to vb2 (in error state) */
+ ctx->stream_num = 0;
+ while ((vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+
+ if ((V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) ||
+ (V4L2_TYPE_IS_CAPTURE(vq->type) &&
+ vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) {
+ dev_dbg(dev, "%s %s out=%d cap=%d\n",
+ ctx->name, to_type_str(vq->type),
+ vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q),
+ vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q));
+ return;
+ }
+
+ /* close encoder when both stop_streaming have been called */
+ if (enc) {
+ dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
+ enc->close(ctx);
+ ctx->enc = NULL;
+
+ /* clear instance context in instances array */
+ hva->instances[ctx->id] = NULL;
+ hva->nb_of_instances--;
+ }
+
+ ctx->aborting = false;
+}
+
+/* VB2 queue ops */
+static const struct vb2_ops hva_qops = {
+ .queue_setup = hva_queue_setup,
+ .buf_prepare = hva_buf_prepare,
+ .buf_queue = hva_buf_queue,
+ .start_streaming = hva_start_streaming,
+ .stop_streaming = hva_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * V4L2 file operations
+ */
+
+static int queue_init(struct hva_ctx *ctx, struct vb2_queue *vq)
+{
+ vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vq->drv_priv = ctx;
+ vq->ops = &hva_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ vq->lock = &ctx->hva_dev->lock;
+
+ return vb2_queue_init(vq);
+}
+
+static int hva_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct hva_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->buf_struct_size = sizeof(struct hva_frame);
+ src_vq->min_buffers_needed = MIN_FRAMES;
+ src_vq->dev = ctx->hva_dev->dev;
+
+ ret = queue_init(ctx, src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->buf_struct_size = sizeof(struct hva_stream);
+ dst_vq->min_buffers_needed = MIN_STREAMS;
+ dst_vq->dev = ctx->hva_dev->dev;
+
+ return queue_init(ctx, dst_vq);
+}
+
+static int hva_open(struct file *file)
+{
+ struct hva_dev *hva = video_drvdata(file);
+ struct device *dev = hva_to_dev(hva);
+ struct hva_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ctx->hva_dev = hva;
+
+ INIT_WORK(&ctx->run_work, hva_run_work);
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ret = hva_ctrls_setup(ctx);
+ if (ret) {
+ dev_err(dev, "%s [x:x] failed to setup controls\n",
+ HVA_PREFIX);
+ ctx->sys_errors++;
+ goto err_fh;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ mutex_init(&ctx->lock);
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(hva->m2m_dev, ctx,
+ &hva_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ dev_err(dev, "%s failed to initialize m2m context (%d)\n",
+ HVA_PREFIX, ret);
+ ctx->sys_errors++;
+ goto err_ctrls;
+ }
+
+ /* set the instance name */
+ mutex_lock(&hva->lock);
+ hva->instance_id++;
+ snprintf(ctx->name, sizeof(ctx->name), "[%3d:----]",
+ hva->instance_id);
+ mutex_unlock(&hva->lock);
+
+ /* default parameters for frame and stream */
+ set_default_params(ctx);
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_ctx_create(ctx);
+#endif
+
+ dev_info(dev, "%s encoder instance created\n", ctx->name);
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+err_fh:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+out:
+ return ret;
+}
+
+static int hva_release(struct file *file)
+{
+ struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_dev *hva = ctx_to_hdev(ctx);
+ struct device *dev = ctx_to_dev(ctx);
+ const struct hva_enc *enc = ctx->enc;
+
+ if (enc) {
+ dev_dbg(dev, "%s %s encoder closed\n", ctx->name, enc->name);
+ enc->close(ctx);
+ ctx->enc = NULL;
+
+ /* clear instance context in instances array */
+ hva->instances[ctx->id] = NULL;
+ hva->nb_of_instances--;
+ }
+
+ /* trace a summary of instance before closing (debug purpose) */
+ hva_dbg_summary(ctx);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_dbg_ctx_remove(ctx);
+#endif
+
+ dev_info(dev, "%s encoder instance released\n", ctx->name);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+/* V4L2 file ops */
+static const struct v4l2_file_operations hva_fops = {
+ .owner = THIS_MODULE,
+ .open = hva_open,
+ .release = hva_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+ .poll = v4l2_m2m_fop_poll,
+};
+
+/*
+ * Platform device operations
+ */
+
+static int hva_register_device(struct hva_dev *hva)
+{
+ int ret;
+ struct video_device *vdev;
+ struct device *dev;
+
+ if (!hva)
+ return -ENODEV;
+ dev = hva_to_dev(hva);
+
+ hva->m2m_dev = v4l2_m2m_init(&hva_m2m_ops);
+ if (IS_ERR(hva->m2m_dev)) {
+ dev_err(dev, "%s failed to initialize v4l2-m2m device\n",
+ HVA_PREFIX);
+ ret = PTR_ERR(hva->m2m_dev);
+ goto err;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ dev_err(dev, "%s failed to allocate video device\n",
+ HVA_PREFIX);
+ ret = -ENOMEM;
+ goto err_m2m_release;
+ }
+
+ vdev->fops = &hva_fops;
+ vdev->ioctl_ops = &hva_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->lock = &hva->lock;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
+ vdev->v4l2_dev = &hva->v4l2_dev;
+ snprintf(vdev->name, sizeof(vdev->name), "%s%lx", HVA_NAME,
+ hva->ip_version);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(dev, "%s failed to register video device\n",
+ HVA_PREFIX);
+ goto err_vdev_release;
+ }
+
+ hva->vdev = vdev;
+ video_set_drvdata(vdev, hva);
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+err_m2m_release:
+ v4l2_m2m_release(hva->m2m_dev);
+err:
+ return ret;
+}
+
+static void hva_unregister_device(struct hva_dev *hva)
+{
+ if (!hva)
+ return;
+
+ if (hva->m2m_dev)
+ v4l2_m2m_release(hva->m2m_dev);
+
+ video_unregister_device(hva->vdev);
+}
+
+static int hva_probe(struct platform_device *pdev)
+{
+ struct hva_dev *hva;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ hva = devm_kzalloc(dev, sizeof(*hva), GFP_KERNEL);
+ if (!hva) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ hva->dev = dev;
+ hva->pdev = pdev;
+ platform_set_drvdata(pdev, hva);
+
+ mutex_init(&hva->lock);
+
+ /* probe hardware */
+ ret = hva_hw_probe(pdev, hva);
+ if (ret)
+ goto err;
+
+ /* register all available encoders */
+ register_encoders(hva);
+
+ /* register all supported formats */
+ register_formats(hva);
+
+ /* register on V4L2 */
+ ret = v4l2_device_register(dev, &hva->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "%s %s failed to register V4L2 device\n",
+ HVA_PREFIX, HVA_NAME);
+ goto err_hw;
+ }
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_debugfs_create(hva);
+#endif
+
+ hva->work_queue = create_workqueue(HVA_NAME);
+ if (!hva->work_queue) {
+ dev_err(dev, "%s %s failed to allocate work queue\n",
+ HVA_PREFIX, HVA_NAME);
+ ret = -ENOMEM;
+ goto err_v4l2;
+ }
+
+ /* register device */
+ ret = hva_register_device(hva);
+ if (ret)
+ goto err_work_queue;
+
+ dev_info(dev, "%s %s registered as /dev/video%d\n", HVA_PREFIX,
+ HVA_NAME, hva->vdev->num);
+
+ return 0;
+
+err_work_queue:
+ destroy_workqueue(hva->work_queue);
+err_v4l2:
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_debugfs_remove(hva);
+#endif
+ v4l2_device_unregister(&hva->v4l2_dev);
+err_hw:
+ hva_hw_remove(hva);
+err:
+ return ret;
+}
+
+static int hva_remove(struct platform_device *pdev)
+{
+ struct hva_dev *hva = platform_get_drvdata(pdev);
+ struct device *dev = hva_to_dev(hva);
+
+ hva_unregister_device(hva);
+
+ destroy_workqueue(hva->work_queue);
+
+ hva_hw_remove(hva);
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ hva_debugfs_remove(hva);
+#endif
+
+ v4l2_device_unregister(&hva->v4l2_dev);
+
+ dev_info(dev, "%s %s removed\n", HVA_PREFIX, pdev->name);
+
+ return 0;
+}
+
+/* PM ops */
+static const struct dev_pm_ops hva_pm_ops = {
+ .runtime_suspend = hva_hw_runtime_suspend,
+ .runtime_resume = hva_hw_runtime_resume,
+};
+
+static const struct of_device_id hva_match_types[] = {
+ {
+ .compatible = "st,st-hva",
+ },
+ { /* end node */ }
+};
+
+MODULE_DEVICE_TABLE(of, hva_match_types);
+
+static struct platform_driver hva_driver = {
+ .probe = hva_probe,
+ .remove = hva_remove,
+ .driver = {
+ .name = HVA_NAME,
+ .of_match_table = hva_match_types,
+ .pm = &hva_pm_ops,
+ },
+};
+
+module_platform_driver(hva_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics HVA video encoder V4L2 driver");
diff --git a/drivers/media/platform/st/sti/hva/hva.h b/drivers/media/platform/st/sti/hva/hva.h
new file mode 100644
index 000000000..ba6b89341
--- /dev/null
+++ b/drivers/media/platform/st/sti/hva/hva.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ */
+
+#ifndef HVA_H
+#define HVA_H
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
+
+#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
+
+#define hva_to_dev(h) (h->dev)
+
+#define ctx_to_dev(c) (c->hva_dev->dev)
+
+#define ctx_to_hdev(c) (c->hva_dev)
+
+#define HVA_NAME "st-hva"
+#define HVA_PREFIX "[---:----]"
+
+extern const struct hva_enc nv12h264enc;
+extern const struct hva_enc nv21h264enc;
+
+/**
+ * struct hva_frameinfo - information about hva frame
+ *
+ * @pixelformat: fourcc code for uncompressed video format
+ * @width: width of frame
+ * @height: height of frame
+ * @aligned_width: width of frame (with encoder alignment constraint)
+ * @aligned_height: height of frame (with encoder alignment constraint)
+ * @size: maximum size in bytes required for data
+*/
+struct hva_frameinfo {
+ u32 pixelformat;
+ u32 width;
+ u32 height;
+ u32 aligned_width;
+ u32 aligned_height;
+ u32 size;
+};
+
+/**
+ * struct hva_streaminfo - information about hva stream
+ *
+ * @streamformat: fourcc code of compressed video format (H.264...)
+ * @width: width of stream
+ * @height: height of stream
+ * @profile: profile string
+ * @level: level string
+ */
+struct hva_streaminfo {
+ u32 streamformat;
+ u32 width;
+ u32 height;
+ u8 profile[32];
+ u8 level[32];
+};
+
+/**
+ * struct hva_controls - hva controls set
+ *
+ * @time_per_frame: time per frame in seconds
+ * @bitrate_mode: bitrate mode (constant bitrate or variable bitrate)
+ * @gop_size: groupe of picture size
+ * @bitrate: bitrate (in bps)
+ * @aspect: video aspect
+ * @profile: H.264 profile
+ * @level: H.264 level
+ * @entropy_mode: H.264 entropy mode (CABAC or CVLC)
+ * @cpb_size: coded picture buffer size (in kB)
+ * @dct8x8: transform mode 8x8 enable
+ * @qpmin: minimum quantizer
+ * @qpmax: maximum quantizer
+ * @vui_sar: pixel aspect ratio enable
+ * @vui_sar_idc: pixel aspect ratio identifier
+ * @sei_fp: sei frame packing arrangement enable
+ * @sei_fp_type: sei frame packing arrangement type
+ */
+struct hva_controls {
+ struct v4l2_fract time_per_frame;
+ enum v4l2_mpeg_video_bitrate_mode bitrate_mode;
+ u32 gop_size;
+ u32 bitrate;
+ enum v4l2_mpeg_video_aspect aspect;
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_level level;
+ enum v4l2_mpeg_video_h264_entropy_mode entropy_mode;
+ u32 cpb_size;
+ bool dct8x8;
+ u32 qpmin;
+ u32 qpmax;
+ bool vui_sar;
+ enum v4l2_mpeg_video_h264_vui_sar_idc vui_sar_idc;
+ bool sei_fp;
+ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type sei_fp_type;
+};
+
+/**
+ * struct hva_frame - hva frame buffer (output)
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @info: frame information (width, height, format, alignment...)
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @prepared: true if vaddr/paddr are resolved
+ */
+struct hva_frame {
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head list;
+ struct hva_frameinfo info;
+ dma_addr_t paddr;
+ void *vaddr;
+ bool prepared;
+};
+
+/*
+ * to_hva_frame() - cast struct vb2_v4l2_buffer * to struct hva_frame *
+ */
+#define to_hva_frame(vb) \
+ container_of(vb, struct hva_frame, vbuf)
+
+/**
+ * struct hva_stream - hva stream buffer (capture)
+ *
+ * @vbuf: video buffer information for V4L2
+ * @list: V4L2 m2m list that the frame belongs to
+ * @paddr: physical address (for hardware)
+ * @vaddr: virtual address (kernel can read/write)
+ * @prepared: true if vaddr/paddr are resolved
+ * @size: size of the buffer in bytes
+ * @bytesused: number of bytes occupied by data in the buffer
+ */
+struct hva_stream {
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head list;
+ dma_addr_t paddr;
+ void *vaddr;
+ bool prepared;
+ unsigned int size;
+ unsigned int bytesused;
+};
+
+/*
+ * to_hva_stream() - cast struct vb2_v4l2_buffer * to struct hva_stream *
+ */
+#define to_hva_stream(vb) \
+ container_of(vb, struct hva_stream, vbuf)
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+/**
+ * struct hva_ctx_dbg - instance context debug info
+ *
+ * @debugfs_entry: debugfs entry
+ * @is_valid_period: true if the sequence is valid for performance
+ * @begin: start time of last HW task
+ * @total_duration: total HW processing durations in 0.1ms
+ * @cnt_duration: number of HW processings
+ * @min_duration: minimum HW processing duration in 0.1ms
+ * @max_duration: maximum HW processing duration in 0.1ms
+ * @avg_duration: average HW processing duration in 0.1ms
+ * @max_fps: maximum frames encoded per second (in 0.1Hz)
+ * @total_period: total encoding periods in 0.1ms
+ * @cnt_period: number of periods
+ * @min_period: minimum encoding period in 0.1ms
+ * @max_period: maximum encoding period in 0.1ms
+ * @avg_period: average encoding period in 0.1ms
+ * @total_stream_size: total number of encoded bytes
+ * @avg_fps: average frames encoded per second (in 0.1Hz)
+ * @window_duration: duration of the sampling window in 0.1ms
+ * @cnt_window: number of samples in the window
+ * @window_stream_size: number of encoded bytes upon the sampling window
+ * @last_bitrate: bitrate upon the last sampling window
+ * @min_bitrate: minimum bitrate in kbps
+ * @max_bitrate: maximum bitrate in kbps
+ * @avg_bitrate: average bitrate in kbps
+ */
+struct hva_ctx_dbg {
+ struct dentry *debugfs_entry;
+ bool is_valid_period;
+ ktime_t begin;
+ u32 total_duration;
+ u32 cnt_duration;
+ u32 min_duration;
+ u32 max_duration;
+ u32 avg_duration;
+ u32 max_fps;
+ u32 total_period;
+ u32 cnt_period;
+ u32 min_period;
+ u32 max_period;
+ u32 avg_period;
+ u32 total_stream_size;
+ u32 avg_fps;
+ u32 window_duration;
+ u32 cnt_window;
+ u32 window_stream_size;
+ u32 last_bitrate;
+ u32 min_bitrate;
+ u32 max_bitrate;
+ u32 avg_bitrate;
+};
+#endif
+
+struct hva_dev;
+struct hva_enc;
+
+/**
+ * struct hva_ctx - context of hva instance
+ *
+ * @hva_dev: the device that this instance is associated with
+ * @fh: V4L2 file handle
+ * @ctrl_handler: V4L2 controls handler
+ * @ctrls: hva controls set
+ * @id: instance identifier
+ * @aborting: true if current job aborted
+ * @name: instance name (debug purpose)
+ * @run_work: encode work
+ * @lock: mutex used to lock access of this context
+ * @flags: validity of streaminfo and frameinfo fields
+ * @frame_num: frame number
+ * @stream_num: stream number
+ * @max_stream_size: maximum size in bytes required for stream data
+ * @colorspace: colorspace identifier
+ * @xfer_func: transfer function identifier
+ * @ycbcr_enc: Y'CbCr encoding identifier
+ * @quantization: quantization identifier
+ * @streaminfo: stream properties
+ * @frameinfo: frame properties
+ * @enc: current encoder
+ * @priv: private codec data for this instance, allocated
+ * by encoder @open time
+ * @hw_err: true if hardware error detected
+ * @encoded_frames: number of encoded frames
+ * @sys_errors: number of system errors (memory, resource, pm...)
+ * @encode_errors: number of encoding errors (hw/driver errors)
+ * @frame_errors: number of frame errors (format, size, header...)
+ * @dbg: context debug info
+ */
+struct hva_ctx {
+ struct hva_dev *hva_dev;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct hva_controls ctrls;
+ u8 id;
+ bool aborting;
+ char name[100];
+ struct work_struct run_work;
+ /* mutex protecting this data structure */
+ struct mutex lock;
+ u32 flags;
+ u32 frame_num;
+ u32 stream_num;
+ u32 max_stream_size;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ struct hva_streaminfo streaminfo;
+ struct hva_frameinfo frameinfo;
+ struct hva_enc *enc;
+ void *priv;
+ bool hw_err;
+ u32 encoded_frames;
+ u32 sys_errors;
+ u32 encode_errors;
+ u32 frame_errors;
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ struct hva_ctx_dbg dbg;
+#endif
+};
+
+#define HVA_FLAG_STREAMINFO 0x0001
+#define HVA_FLAG_FRAMEINFO 0x0002
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+/**
+ * struct hva_dev_dbg - device debug info
+ *
+ * @debugfs_entry: debugfs entry
+ * @last_ctx: debug information about last running instance context
+ */
+struct hva_dev_dbg {
+ struct dentry *debugfs_entry;
+ struct hva_ctx last_ctx;
+};
+#endif
+
+#define HVA_MAX_INSTANCES 16
+#define HVA_MAX_ENCODERS 10
+#define HVA_MAX_FORMATS HVA_MAX_ENCODERS
+
+/**
+ * struct hva_dev - abstraction for hva entity
+ *
+ * @v4l2_dev: V4L2 device
+ * @vdev: video device
+ * @pdev: platform device
+ * @dev: device
+ * @lock: mutex used for critical sections & V4L2 ops
+ * serialization
+ * @m2m_dev: memory-to-memory V4L2 device information
+ * @instances: opened instances
+ * @nb_of_instances: number of opened instances
+ * @instance_id: rolling counter identifying an instance (debug purpose)
+ * @regs: register io memory access
+ * @esram_addr: esram address
+ * @esram_size: esram size
+ * @clk: hva clock
+ * @irq_its: status interruption
+ * @irq_err: error interruption
+ * @work_queue: work queue to handle the encode jobs
+ * @protect_mutex: mutex used to lock access of hardware
+ * @interrupt: completion interrupt
+ * @ip_version: IP hardware version
+ * @encoders: registered encoders
+ * @nb_of_encoders: number of registered encoders
+ * @pixelformats: supported uncompressed video formats
+ * @nb_of_pixelformats: number of supported umcompressed video formats
+ * @streamformats: supported compressed video formats
+ * @nb_of_streamformats: number of supported compressed video formats
+ * @sfl_reg: status fifo level register value
+ * @sts_reg: status register value
+ * @lmi_err_reg: local memory interface error register value
+ * @emi_err_reg: external memory interface error register value
+ * @hec_mif_err_reg: HEC memory interface error register value
+ * @dbg: device debug info
+ */
+struct hva_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct platform_device *pdev;
+ struct device *dev;
+ /* mutex protecting vb2_queue structure */
+ struct mutex lock;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct hva_ctx *instances[HVA_MAX_INSTANCES];
+ unsigned int nb_of_instances;
+ unsigned int instance_id;
+ void __iomem *regs;
+ u32 esram_addr;
+ u32 esram_size;
+ struct clk *clk;
+ int irq_its;
+ int irq_err;
+ struct workqueue_struct *work_queue;
+ /* mutex protecting hardware access */
+ struct mutex protect_mutex;
+ struct completion interrupt;
+ unsigned long int ip_version;
+ const struct hva_enc *encoders[HVA_MAX_ENCODERS];
+ u32 nb_of_encoders;
+ u32 pixelformats[HVA_MAX_FORMATS];
+ u32 nb_of_pixelformats;
+ u32 streamformats[HVA_MAX_FORMATS];
+ u32 nb_of_streamformats;
+ u32 sfl_reg;
+ u32 sts_reg;
+ u32 lmi_err_reg;
+ u32 emi_err_reg;
+ u32 hec_mif_err_reg;
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+ struct hva_dev_dbg dbg;
+#endif
+};
+
+/**
+ * struct hva_enc - hva encoder
+ *
+ * @name: encoder name
+ * @streamformat: fourcc code for compressed video format (H.264...)
+ * @pixelformat: fourcc code for uncompressed video format
+ * @max_width: maximum width of frame for this encoder
+ * @max_height: maximum height of frame for this encoder
+ * @open: open encoder
+ * @close: close encoder
+ * @encode: encode a frame (struct hva_frame) in a stream
+ * (struct hva_stream)
+ */
+
+struct hva_enc {
+ const char *name;
+ u32 streamformat;
+ u32 pixelformat;
+ u32 max_width;
+ u32 max_height;
+ int (*open)(struct hva_ctx *ctx);
+ int (*close)(struct hva_ctx *ctx);
+ int (*encode)(struct hva_ctx *ctx, struct hva_frame *frame,
+ struct hva_stream *stream);
+};
+
+#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
+void hva_debugfs_create(struct hva_dev *hva);
+void hva_debugfs_remove(struct hva_dev *hva);
+void hva_dbg_ctx_create(struct hva_ctx *ctx);
+void hva_dbg_ctx_remove(struct hva_ctx *ctx);
+void hva_dbg_perf_begin(struct hva_ctx *ctx);
+void hva_dbg_perf_end(struct hva_ctx *ctx, struct hva_stream *stream);
+#endif
+
+#endif /* HVA_H */
diff --git a/drivers/media/platform/st/stm32/Kconfig b/drivers/media/platform/st/stm32/Kconfig
new file mode 100644
index 000000000..b22dd4753
--- /dev/null
+++ b/drivers/media/platform/st/stm32/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# V4L drivers
+config VIDEO_STM32_DCMI
+ tristate "STM32 Digital Camera Memory Interface (DCMI) support"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ depends on ARCH_STM32 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This module makes the STM32 Digital Camera Memory Interface (DCMI)
+ available as a v4l2 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called stm32-dcmi.
+
+# Mem2mem drivers
+config VIDEO_STM32_DMA2D
+ tristate "STM32 Chrom-Art Accelerator (DMA2D)"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_STM32 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Enables DMA2D hardware support on stm32.
+
+ The STM32 DMA2D is a memory-to-memory engine for pixel conversion
+ and specialized DMA dedicated to image manipulation.
diff --git a/drivers/media/platform/st/stm32/Makefile b/drivers/media/platform/st/stm32/Makefile
new file mode 100644
index 000000000..896ef98a7
--- /dev/null
+++ b/drivers/media/platform/st/stm32/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32-dcmi.o
+stm32-dma2d-objs := dma2d/dma2d.o dma2d/dma2d-hw.o
+obj-$(CONFIG_VIDEO_STM32_DMA2D) += stm32-dma2d.o
diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d-hw.c b/drivers/media/platform/st/stm32/dma2d/dma2d-hw.c
new file mode 100644
index 000000000..ea4cc84d8
--- /dev/null
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d-hw.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ST stm32 Chrom-Art - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2021 Dillon Min
+ * Dillon Min, <dillon.minfei@gmail.com>
+ *
+ * based on s5p-g2d
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ */
+
+#include <linux/io.h>
+
+#include "dma2d.h"
+#include "dma2d-regs.h"
+
+static inline u32 reg_read(void __iomem *base, u32 reg)
+{
+ return readl_relaxed(base + reg);
+}
+
+static inline void reg_write(void __iomem *base, u32 reg, u32 val)
+{
+ writel_relaxed(val, base + reg);
+}
+
+static inline void reg_update_bits(void __iomem *base, u32 reg, u32 mask,
+ u32 val)
+{
+ reg_write(base, reg, (reg_read(base, reg) & ~mask) | val);
+}
+
+void dma2d_start(struct dma2d_dev *d)
+{
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_START, CR_START);
+}
+
+u32 dma2d_get_int(struct dma2d_dev *d)
+{
+ return reg_read(d->regs, DMA2D_ISR_REG);
+}
+
+void dma2d_clear_int(struct dma2d_dev *d)
+{
+ u32 isr_val = reg_read(d->regs, DMA2D_ISR_REG);
+
+ reg_write(d->regs, DMA2D_IFCR_REG, isr_val & 0x003f);
+}
+
+void dma2d_config_common(struct dma2d_dev *d, enum dma2d_op_mode op_mode,
+ u16 width, u16 height)
+{
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_MODE_MASK,
+ op_mode << CR_MODE_SHIFT);
+
+ reg_write(d->regs, DMA2D_NLR_REG, (width << 16) | height);
+}
+
+void dma2d_config_out(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t o_addr)
+{
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_CEIE, CR_CEIE);
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_CTCIE, CR_CTCIE);
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_CAEIE, CR_CAEIE);
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_TCIE, CR_TCIE);
+ reg_update_bits(d->regs, DMA2D_CR_REG, CR_TEIE, CR_TEIE);
+
+ if (frm->fmt->cmode >= CM_MODE_ARGB8888 &&
+ frm->fmt->cmode <= CM_MODE_ARGB4444)
+ reg_update_bits(d->regs, DMA2D_OPFCCR_REG, OPFCCR_CM_MASK,
+ frm->fmt->cmode);
+
+ reg_write(d->regs, DMA2D_OMAR_REG, o_addr);
+
+ reg_write(d->regs, DMA2D_OCOLR_REG,
+ (frm->a_rgb[3] << 24) |
+ (frm->a_rgb[2] << 16) |
+ (frm->a_rgb[1] << 8) |
+ frm->a_rgb[0]);
+
+ reg_update_bits(d->regs, DMA2D_OOR_REG, OOR_LO_MASK,
+ frm->line_offset & 0x3fff);
+}
+
+void dma2d_config_fg(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t f_addr)
+{
+ reg_write(d->regs, DMA2D_FGMAR_REG, f_addr);
+ reg_update_bits(d->regs, DMA2D_FGOR_REG, FGOR_LO_MASK,
+ frm->line_offset);
+
+ if (frm->fmt->cmode >= CM_MODE_ARGB8888 &&
+ frm->fmt->cmode <= CM_MODE_A4)
+ reg_update_bits(d->regs, DMA2D_FGPFCCR_REG, FGPFCCR_CM_MASK,
+ frm->fmt->cmode);
+
+ reg_update_bits(d->regs, DMA2D_FGPFCCR_REG, FGPFCCR_AM_MASK,
+ (frm->a_mode << 16) & 0x03);
+
+ reg_update_bits(d->regs, DMA2D_FGPFCCR_REG, FGPFCCR_ALPHA_MASK,
+ frm->a_rgb[3] << 24);
+
+ reg_write(d->regs, DMA2D_FGCOLR_REG,
+ (frm->a_rgb[2] << 16) |
+ (frm->a_rgb[1] << 8) |
+ frm->a_rgb[0]);
+}
+
+void dma2d_config_bg(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t b_addr)
+{
+ reg_write(d->regs, DMA2D_BGMAR_REG, b_addr);
+ reg_update_bits(d->regs, DMA2D_BGOR_REG, BGOR_LO_MASK,
+ frm->line_offset);
+
+ if (frm->fmt->cmode >= CM_MODE_ARGB8888 &&
+ frm->fmt->cmode <= CM_MODE_A4)
+ reg_update_bits(d->regs, DMA2D_BGPFCCR_REG, BGPFCCR_CM_MASK,
+ frm->fmt->cmode);
+
+ reg_update_bits(d->regs, DMA2D_BGPFCCR_REG, BGPFCCR_AM_MASK,
+ (frm->a_mode << 16) & 0x03);
+
+ reg_update_bits(d->regs, DMA2D_BGPFCCR_REG, BGPFCCR_ALPHA_MASK,
+ frm->a_rgb[3] << 24);
+
+ reg_write(d->regs, DMA2D_BGCOLR_REG,
+ (frm->a_rgb[2] << 16) |
+ (frm->a_rgb[1] << 8) |
+ frm->a_rgb[0]);
+}
diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d-regs.h b/drivers/media/platform/st/stm32/dma2d/dma2d-regs.h
new file mode 100644
index 000000000..6444592d4
--- /dev/null
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d-regs.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ST stm32 Chrom-Art - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2021 Dillon Min
+ * Dillon Min, <dillon.minfei@gmail.com>
+ *
+ * based on s5p-g2d
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ */
+
+#ifndef __DMA2D_REGS_H__
+#define __DMA2D_REGS_H__
+
+#define DMA2D_CR_REG 0x0000
+#define CR_MODE_MASK GENMASK(17, 16)
+#define CR_MODE_SHIFT 16
+#define CR_M2M 0x0000
+#define CR_M2M_PFC BIT(16)
+#define CR_M2M_BLEND BIT(17)
+#define CR_R2M (BIT(17) | BIT(16))
+#define CR_CEIE BIT(13)
+#define CR_CTCIE BIT(12)
+#define CR_CAEIE BIT(11)
+#define CR_TWIE BIT(10)
+#define CR_TCIE BIT(9)
+#define CR_TEIE BIT(8)
+#define CR_ABORT BIT(2)
+#define CR_SUSP BIT(1)
+#define CR_START BIT(0)
+
+#define DMA2D_ISR_REG 0x0004
+#define ISR_CEIF BIT(5)
+#define ISR_CTCIF BIT(4)
+#define ISR_CAEIF BIT(3)
+#define ISR_TWIF BIT(2)
+#define ISR_TCIF BIT(1)
+#define ISR_TEIF BIT(0)
+
+#define DMA2D_IFCR_REG 0x0008
+#define IFCR_CCEIF BIT(5)
+#define IFCR_CCTCIF BIT(4)
+#define IFCR_CAECIF BIT(3)
+#define IFCR_CTWIF BIT(2)
+#define IFCR_CTCIF BIT(1)
+#define IFCR_CTEIF BIT(0)
+
+#define DMA2D_FGMAR_REG 0x000c
+#define DMA2D_FGOR_REG 0x0010
+#define FGOR_LO_MASK GENMASK(13, 0)
+
+#define DMA2D_BGMAR_REG 0x0014
+#define DMA2D_BGOR_REG 0x0018
+#define BGOR_LO_MASK GENMASK(13, 0)
+
+#define DMA2D_FGPFCCR_REG 0x001c
+#define FGPFCCR_ALPHA_MASK GENMASK(31, 24)
+#define FGPFCCR_AM_MASK GENMASK(17, 16)
+#define FGPFCCR_CS_MASK GENMASK(15, 8)
+#define FGPFCCR_START BIT(5)
+#define FGPFCCR_CCM_RGB888 BIT(4)
+#define FGPFCCR_CM_MASK GENMASK(3, 0)
+
+#define DMA2D_FGCOLR_REG 0x0020
+#define FGCOLR_REG_MASK GENMASK(23, 16)
+#define FGCOLR_GREEN_MASK GENMASK(15, 8)
+#define FGCOLR_BLUE_MASK GENMASK(7, 0)
+
+#define DMA2D_BGPFCCR_REG 0x0024
+#define BGPFCCR_ALPHA_MASK GENMASK(31, 24)
+#define BGPFCCR_AM_MASK GENMASK(17, 16)
+#define BGPFCCR_CS_MASK GENMASK(15, 8)
+#define BGPFCCR_START BIT(5)
+#define BGPFCCR_CCM_RGB888 BIT(4)
+#define BGPFCCR_CM_MASK GENMASK(3, 0)
+
+#define DMA2D_BGCOLR_REG 0x0028
+#define BGCOLR_REG_MASK GENMASK(23, 16)
+#define BGCOLR_GREEN_MASK GENMASK(15, 8)
+#define BGCOLR_BLUE_MASK GENMASK(7, 0)
+
+#define DMA2D_OPFCCR_REG 0x0034
+#define OPFCCR_CM_MASK GENMASK(2, 0)
+
+#define DMA2D_OCOLR_REG 0x0038
+#define OCOLR_ALPHA_MASK GENMASK(31, 24)
+#define OCOLR_RED_MASK GENMASK(23, 16)
+#define OCOLR_GREEN_MASK GENMASK(15, 8)
+#define OCOLR_BLUE_MASK GENMASK(7, 0)
+
+#define DMA2D_OMAR_REG 0x003c
+
+#define DMA2D_OOR_REG 0x0040
+#define OOR_LO_MASK GENMASK(13, 0)
+
+#define DMA2D_NLR_REG 0x0044
+#define NLR_PL_MASK GENMASK(29, 16)
+#define NLR_NL_MASK GENMASK(15, 0)
+
+/* Hardware limits */
+#define MAX_WIDTH 2592
+#define MAX_HEIGHT 2592
+
+#define DEFAULT_WIDTH 240
+#define DEFAULT_HEIGHT 320
+#define DEFAULT_SIZE 307200
+
+#define CM_MODE_ARGB8888 0x00
+#define CM_MODE_ARGB4444 0x04
+#define CM_MODE_A4 0x0a
+#endif /* __DMA2D_REGS_H__ */
diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d.c b/drivers/media/platform/st/stm32/dma2d/dma2d.c
new file mode 100644
index 000000000..9706aa41b
--- /dev/null
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d.c
@@ -0,0 +1,736 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * STM32 DMA2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2021 Dillon Min
+ * Dillon Min, <dillon.minfei@gmail.com>
+ *
+ * based on s5p-g2d
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "dma2d.h"
+#include "dma2d-regs.h"
+
+/*
+ * This V4L2 subdev m2m driver enables Chrom-Art Accelerator unit
+ * of STMicroelectronics STM32 SoC series.
+ *
+ * Currently support r2m, m2m, m2m_pfc.
+ *
+ * - r2m, Filling a part or the whole of a destination image with a specific
+ * color.
+ * - m2m, Copying a part or the whole of a source image into a part or the
+ * whole of a destination.
+ * - m2m_pfc, Copying a part or the whole of a source image into a part or the
+ * whole of a destination image with a pixel format conversion.
+ */
+
+#define fh2ctx(__fh) container_of(__fh, struct dma2d_ctx, fh)
+
+static const struct dma2d_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .cmode = DMA2D_CMODE_ARGB8888,
+ .depth = 32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .cmode = DMA2D_CMODE_RGB888,
+ .depth = 24,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .cmode = DMA2D_CMODE_RGB565,
+ .depth = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .cmode = DMA2D_CMODE_ARGB1555,
+ .depth = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .cmode = DMA2D_CMODE_ARGB4444,
+ .depth = 16,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static const struct dma2d_frame def_frame = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .line_offset = 0,
+ .a_rgb = {0x00, 0x00, 0x00, 0xff},
+ .a_mode = DMA2D_ALPHA_MODE_NO_MODIF,
+ .fmt = (struct dma2d_fmt *)&formats[0],
+ .size = DEFAULT_SIZE,
+};
+
+static struct dma2d_fmt *find_fmt(int pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == pixelformat)
+ return (struct dma2d_fmt *)&formats[i];
+ }
+
+ return NULL;
+}
+
+static struct dma2d_frame *get_frame(struct dma2d_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ return V4L2_TYPE_IS_OUTPUT(type) ? &ctx->cap : &ctx->out;
+}
+
+static int dma2d_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(vq);
+ struct dma2d_frame *f = get_frame(ctx, vq->type);
+
+ if (*nplanes)
+ return sizes[0] < f->size ? -EINVAL : 0;
+
+ sizes[0] = f->size;
+ *nplanes = 1;
+
+ return 0;
+}
+
+static int dma2d_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dma2d_buf_prepare(struct vb2_buffer *vb)
+{
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct dma2d_frame *f = get_frame(ctx, vb->vb2_queue->type);
+
+ if (vb2_plane_size(vb, 0) < f->size)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, f->size);
+
+ return 0;
+}
+
+static void dma2d_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int dma2d_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(q);
+ struct dma2d_frame *f = get_frame(ctx, q->type);
+
+ f->sequence = 0;
+ return 0;
+}
+
+static void dma2d_stop_streaming(struct vb2_queue *q)
+{
+ struct dma2d_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ return;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static const struct vb2_ops dma2d_qops = {
+ .queue_setup = dma2d_queue_setup,
+ .buf_out_validate = dma2d_buf_out_validate,
+ .buf_prepare = dma2d_buf_prepare,
+ .buf_queue = dma2d_buf_queue,
+ .start_streaming = dma2d_start_streaming,
+ .stop_streaming = dma2d_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct dma2d_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &dma2d_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->mutex;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &dma2d_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->mutex;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int dma2d_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct dma2d_frame *frm;
+ struct dma2d_ctx *ctx = container_of(ctrl->handler, struct dma2d_ctx,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->dev->ctrl_lock, flags);
+ switch (ctrl->id) {
+ case V4L2_CID_COLORFX:
+ if (ctrl->val == V4L2_COLORFX_SET_RGB)
+ ctx->op_mode = DMA2D_MODE_R2M;
+ else if (ctrl->val == V4L2_COLORFX_NONE)
+ ctx->op_mode = DMA2D_MODE_M2M;
+ break;
+ case V4L2_CID_COLORFX_RGB:
+ frm = get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ frm->a_rgb[2] = (ctrl->val >> 16) & 0xff;
+ frm->a_rgb[1] = (ctrl->val >> 8) & 0xff;
+ frm->a_rgb[0] = (ctrl->val >> 0) & 0xff;
+ break;
+ default:
+ spin_unlock_irqrestore(&ctx->dev->ctrl_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ctx->dev->ctrl_lock, flags);
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops dma2d_ctrl_ops = {
+ .s_ctrl = dma2d_s_ctrl,
+};
+
+static int dma2d_setup_ctrls(struct dma2d_ctx *ctx)
+{
+ struct v4l2_ctrl_handler *handler = &ctx->ctrl_handler;
+
+ v4l2_ctrl_handler_init(handler, 2);
+
+ v4l2_ctrl_new_std_menu(handler, &dma2d_ctrl_ops, V4L2_CID_COLORFX,
+ V4L2_COLORFX_SET_RGB, ~0x10001,
+ V4L2_COLORFX_NONE);
+
+ v4l2_ctrl_new_std(handler, &dma2d_ctrl_ops, V4L2_CID_COLORFX_RGB, 0,
+ 0xffffff, 1, 0);
+
+ return 0;
+}
+
+static int dma2d_open(struct file *file)
+{
+ struct dma2d_dev *dev = video_drvdata(file);
+ struct dma2d_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dev = dev;
+ /* Set default formats */
+ ctx->cap = def_frame;
+ ctx->bg = def_frame;
+ ctx->out = def_frame;
+ ctx->op_mode = DMA2D_MODE_M2M_FPC;
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ if (mutex_lock_interruptible(&dev->mutex)) {
+ kfree(ctx);
+ return -ERESTARTSYS;
+ }
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ mutex_unlock(&dev->mutex);
+ kfree(ctx);
+ return ret;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ dma2d_setup_ctrls(ctx);
+
+ /* Write the default values to the ctx struct */
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static int dma2d_release(struct file *file)
+{
+ struct dma2d_dev *dev = video_drvdata(file);
+ struct dma2d_ctx *ctx = fh2ctx(file->private_data);
+
+ mutex_lock(&dev->mutex);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_unlock(&dev->mutex);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ return 0;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DMA2D_NAME, sizeof(cap->driver));
+ strscpy(cap->card, DMA2D_NAME, sizeof(cap->card));
+ strscpy(cap->bus_info, BUS_INFO, sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].fourcc;
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct dma2d_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct dma2d_frame *frm;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ frm = get_frame(ctx, f->type);
+ f->fmt.pix.width = frm->width;
+ f->fmt.pix.height = frm->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = frm->fmt->fourcc;
+ f->fmt.pix.bytesperline = (frm->width * frm->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = frm->size;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct dma2d_ctx *ctx = prv;
+ struct dma2d_fmt *fmt;
+ enum v4l2_field *field;
+ u32 fourcc = f->fmt.pix.pixelformat;
+
+ fmt = find_fmt(fourcc);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = formats[0].fourcc;
+ fmt = find_fmt(f->fmt.pix.pixelformat);
+ }
+
+ field = &f->fmt.pix.field;
+ if (*field == V4L2_FIELD_ANY)
+ *field = V4L2_FIELD_NONE;
+ else if (*field != V4L2_FIELD_NONE)
+ return -EINVAL;
+
+ if (f->fmt.pix.width > MAX_WIDTH)
+ f->fmt.pix.width = MAX_WIDTH;
+ if (f->fmt.pix.height > MAX_HEIGHT)
+ f->fmt.pix.height = MAX_HEIGHT;
+
+ if (f->fmt.pix.width < 1)
+ f->fmt.pix.width = 1;
+ if (f->fmt.pix.height < 1)
+ f->fmt.pix.height = 1;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && !f->fmt.pix.colorspace) {
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
+ }
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct dma2d_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct dma2d_frame *frm;
+ struct dma2d_fmt *fmt;
+ int ret = 0;
+
+ /* Adjust all values accordingly to the hardware capabilities
+ * and chosen format.
+ */
+ ret = vidioc_try_fmt(file, prv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ fmt = find_fmt(f->fmt.pix.pixelformat);
+ if (!fmt)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ctx->colorspace = f->fmt.pix.colorspace;
+ ctx->xfer_func = f->fmt.pix.xfer_func;
+ ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->quant = f->fmt.pix.quantization;
+ }
+
+ frm = get_frame(ctx, f->type);
+ frm->width = f->fmt.pix.width;
+ frm->height = f->fmt.pix.height;
+ frm->size = f->fmt.pix.sizeimage;
+ /* Reset crop settings */
+ frm->o_width = 0;
+ frm->o_height = 0;
+ frm->c_width = frm->width;
+ frm->c_height = frm->height;
+ frm->right = frm->width;
+ frm->bottom = frm->height;
+ frm->fmt = fmt;
+ frm->line_offset = 0;
+
+ return 0;
+}
+
+static void device_run(void *prv)
+{
+ struct dma2d_ctx *ctx = prv;
+ struct dma2d_dev *dev = ctx->dev;
+ struct dma2d_frame *frm_out, *frm_cap;
+ struct vb2_v4l2_buffer *src, *dst;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->ctrl_lock, flags);
+ dev->curr = ctx;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ if (!dst || !src)
+ goto end;
+
+ frm_cap = get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ frm_out = get_frame(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (!frm_cap || !frm_out)
+ goto end;
+
+ src->sequence = frm_out->sequence++;
+ dst->sequence = frm_cap->sequence++;
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ clk_enable(dev->gate);
+
+ dma2d_config_fg(dev, frm_out,
+ vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
+
+ /* TODO: add M2M_BLEND handler here */
+
+ if (ctx->op_mode != DMA2D_MODE_R2M) {
+ if (frm_out->fmt->fourcc == frm_cap->fmt->fourcc)
+ ctx->op_mode = DMA2D_MODE_M2M;
+ else
+ ctx->op_mode = DMA2D_MODE_M2M_FPC;
+ }
+
+ dma2d_config_out(dev, frm_cap,
+ vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
+ dma2d_config_common(dev, ctx->op_mode, frm_cap->width, frm_cap->height);
+
+ dma2d_start(dev);
+end:
+ spin_unlock_irqrestore(&dev->ctrl_lock, flags);
+}
+
+static irqreturn_t dma2d_isr(int irq, void *prv)
+{
+ struct dma2d_dev *dev = prv;
+ struct dma2d_ctx *ctx = dev->curr;
+ struct vb2_v4l2_buffer *src, *dst;
+ u32 s = dma2d_get_int(dev);
+
+ dma2d_clear_int(dev);
+ if (s & ISR_TCIF || s == 0) {
+ clk_disable(dev->gate);
+
+ WARN_ON(!ctx);
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ WARN_ON(!dst);
+ WARN_ON(!src);
+
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
+
+ dev->curr = NULL;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct v4l2_file_operations dma2d_fops = {
+ .owner = THIS_MODULE,
+ .open = dma2d_open,
+ .release = dma2d_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = v4l2_m2m_get_unmapped_area,
+#endif
+};
+
+static const struct v4l2_ioctl_ops dma2d_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct video_device dma2d_videodev = {
+ .name = DMA2D_NAME,
+ .fops = &dma2d_fops,
+ .ioctl_ops = &dma2d_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+};
+
+static const struct v4l2_m2m_ops dma2d_m2m_ops = {
+ .device_run = device_run,
+};
+
+static const struct of_device_id stm32_dma2d_match[];
+
+static int dma2d_probe(struct platform_device *pdev)
+{
+ struct dma2d_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret = 0;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->ctrl_lock);
+ mutex_init(&dev->mutex);
+ atomic_set(&dev->num_inst, 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs))
+ return PTR_ERR(dev->regs);
+
+ dev->gate = clk_get(&pdev->dev, "dma2d");
+ if (IS_ERR(dev->gate)) {
+ dev_err(&pdev->dev, "failed to get dma2d clock gate\n");
+ ret = -ENXIO;
+ return ret;
+ }
+
+ ret = clk_prepare(dev->gate);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to prepare dma2d clock gate\n");
+ goto put_clk_gate;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto unprep_clk_gate;
+
+ dev->irq = ret;
+
+ ret = devm_request_irq(&pdev->dev, dev->irq, dma2d_isr,
+ 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to install IRQ\n");
+ goto unprep_clk_gate;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto unprep_clk_gate;
+
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_v4l2_dev;
+ }
+
+ *vfd = dma2d_videodev;
+ vfd->lock = &dev->mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+
+ platform_set_drvdata(pdev, dev);
+ dev->m2m_dev = v4l2_m2m_init(&dma2d_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+ goto rel_vdev;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto free_m2m;
+ }
+
+ video_set_drvdata(vfd, dev);
+ dev->vfd = vfd;
+ v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
+ vfd->num);
+ return 0;
+
+free_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+rel_vdev:
+ video_device_release(vfd);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&dev->v4l2_dev);
+unprep_clk_gate:
+ clk_unprepare(dev->gate);
+put_clk_gate:
+ clk_put(dev->gate);
+
+ return ret;
+}
+
+static int dma2d_remove(struct platform_device *pdev)
+{
+ struct dma2d_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing " DMA2D_NAME);
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+ clk_unprepare(dev->gate);
+ clk_put(dev->gate);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_dma2d_match[] = {
+ {
+ .compatible = "st,stm32-dma2d",
+ .data = NULL,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dma2d_match);
+
+static struct platform_driver dma2d_pdrv = {
+ .probe = dma2d_probe,
+ .remove = dma2d_remove,
+ .driver = {
+ .name = DMA2D_NAME,
+ .of_match_table = stm32_dma2d_match,
+ },
+};
+
+module_platform_driver(dma2d_pdrv);
+
+MODULE_AUTHOR("Dillon Min <dillon.minfei@gmail.com>");
+MODULE_DESCRIPTION("STM32 Chrom-Art Accelerator DMA2D driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d.h b/drivers/media/platform/st/stm32/dma2d/dma2d.h
new file mode 100644
index 000000000..3f03a7ca9
--- /dev/null
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * ST stm32 DMA2D - 2D Graphics Accelerator Driver
+ *
+ * Copyright (c) 2021 Dillon Min
+ * Dillon Min, <dillon.minfei@gmail.com>
+ *
+ * based on s5p-g2d
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ */
+
+#ifndef __DMA2D_H__
+#define __DMA2D_H__
+
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#define DMA2D_NAME "stm-dma2d"
+#define BUS_INFO "platform:stm-dma2d"
+enum dma2d_op_mode {
+ DMA2D_MODE_M2M,
+ DMA2D_MODE_M2M_FPC,
+ DMA2D_MODE_M2M_BLEND,
+ DMA2D_MODE_R2M
+};
+
+enum dma2d_cmode {
+ /* output pfc cmode from ARGB888 to ARGB4444 */
+ DMA2D_CMODE_ARGB8888,
+ DMA2D_CMODE_RGB888,
+ DMA2D_CMODE_RGB565,
+ DMA2D_CMODE_ARGB1555,
+ DMA2D_CMODE_ARGB4444,
+ /* bg or fg pfc cmode from L8 to A4 */
+ DMA2D_CMODE_L8,
+ DMA2D_CMODE_AL44,
+ DMA2D_CMODE_AL88,
+ DMA2D_CMODE_L4,
+ DMA2D_CMODE_A8,
+ DMA2D_CMODE_A4
+};
+
+enum dma2d_alpha_mode {
+ DMA2D_ALPHA_MODE_NO_MODIF,
+ DMA2D_ALPHA_MODE_REPLACE,
+ DMA2D_ALPHA_MODE_COMBINE
+};
+
+struct dma2d_fmt {
+ u32 fourcc;
+ int depth;
+ enum dma2d_cmode cmode;
+};
+
+struct dma2d_frame {
+ /* Original dimensions */
+ u32 width;
+ u32 height;
+ /* Crop size */
+ u32 c_width;
+ u32 c_height;
+ /* Offset */
+ u32 o_width;
+ u32 o_height;
+ u32 bottom;
+ u32 right;
+ u16 line_offset;
+ /* Image format */
+ struct dma2d_fmt *fmt;
+ /* [0]: blue
+ * [1]: green
+ * [2]: red
+ * [3]: alpha
+ */
+ u8 a_rgb[4];
+ /*
+ * AM[1:0] of DMA2D_FGPFCCR
+ */
+ enum dma2d_alpha_mode a_mode;
+ u32 size;
+ unsigned int sequence;
+};
+
+struct dma2d_ctx {
+ struct v4l2_fh fh;
+ struct dma2d_dev *dev;
+ struct dma2d_frame cap;
+ struct dma2d_frame out;
+ struct dma2d_frame bg;
+ /* fb_buf always point to bg address */
+ struct v4l2_framebuffer fb_buf;
+ /*
+ * MODE[17:16] of DMA2D_CR
+ */
+ enum dma2d_op_mode op_mode;
+ struct v4l2_ctrl_handler ctrl_handler;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
+};
+
+struct dma2d_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct video_device *vfd;
+ /* for device open/close etc */
+ struct mutex mutex;
+ /* to avoid the conflict with device running and user setting
+ * at the same time
+ */
+ spinlock_t ctrl_lock;
+ atomic_t num_inst;
+ void __iomem *regs;
+ struct clk *gate;
+ struct dma2d_ctx *curr;
+ int irq;
+};
+
+void dma2d_start(struct dma2d_dev *d);
+u32 dma2d_get_int(struct dma2d_dev *d);
+void dma2d_clear_int(struct dma2d_dev *d);
+void dma2d_config_out(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t o_addr);
+void dma2d_config_fg(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t f_addr);
+void dma2d_config_bg(struct dma2d_dev *d, struct dma2d_frame *frm,
+ dma_addr_t b_addr);
+void dma2d_config_common(struct dma2d_dev *d, enum dma2d_op_mode op_mode,
+ u16 width, u16 height);
+
+#endif /* __DMA2D_H__ */
diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
new file mode 100644
index 000000000..37458d4d9
--- /dev/null
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -0,0 +1,2229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for STM32 Digital Camera Memory Interface
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Authors: Yannick Fertre <yannick.fertre@st.com>
+ * Hugues Fruchet <hugues.fruchet@st.com>
+ * for STMicroelectronics.
+ *
+ * This driver is based on atmel_isi.c
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-rect.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define DRV_NAME "stm32-dcmi"
+
+/* Registers offset for DCMI */
+#define DCMI_CR 0x00 /* Control Register */
+#define DCMI_SR 0x04 /* Status Register */
+#define DCMI_RIS 0x08 /* Raw Interrupt Status register */
+#define DCMI_IER 0x0C /* Interrupt Enable Register */
+#define DCMI_MIS 0x10 /* Masked Interrupt Status register */
+#define DCMI_ICR 0x14 /* Interrupt Clear Register */
+#define DCMI_ESCR 0x18 /* Embedded Synchronization Code Register */
+#define DCMI_ESUR 0x1C /* Embedded Synchronization Unmask Register */
+#define DCMI_CWSTRT 0x20 /* Crop Window STaRT */
+#define DCMI_CWSIZE 0x24 /* Crop Window SIZE */
+#define DCMI_DR 0x28 /* Data Register */
+#define DCMI_IDR 0x2C /* IDentifier Register */
+
+/* Bits definition for control register (DCMI_CR) */
+#define CR_CAPTURE BIT(0)
+#define CR_CM BIT(1)
+#define CR_CROP BIT(2)
+#define CR_JPEG BIT(3)
+#define CR_ESS BIT(4)
+#define CR_PCKPOL BIT(5)
+#define CR_HSPOL BIT(6)
+#define CR_VSPOL BIT(7)
+#define CR_FCRC_0 BIT(8)
+#define CR_FCRC_1 BIT(9)
+#define CR_EDM_0 BIT(10)
+#define CR_EDM_1 BIT(11)
+#define CR_ENABLE BIT(14)
+
+/* Bits definition for status register (DCMI_SR) */
+#define SR_HSYNC BIT(0)
+#define SR_VSYNC BIT(1)
+#define SR_FNE BIT(2)
+
+/*
+ * Bits definition for interrupt registers
+ * (DCMI_RIS, DCMI_IER, DCMI_MIS, DCMI_ICR)
+ */
+#define IT_FRAME BIT(0)
+#define IT_OVR BIT(1)
+#define IT_ERR BIT(2)
+#define IT_VSYNC BIT(3)
+#define IT_LINE BIT(4)
+
+enum state {
+ STOPPED = 0,
+ WAIT_FOR_BUFFER,
+ RUNNING,
+};
+
+#define MIN_WIDTH 16U
+#define MAX_WIDTH 2592U
+#define MIN_HEIGHT 16U
+#define MAX_HEIGHT 2592U
+
+#define TIMEOUT_MS 1000
+
+#define OVERRUN_ERROR_THRESHOLD 3
+
+struct dcmi_format {
+ u32 fourcc;
+ u32 mbus_code;
+ u8 bpp;
+};
+
+struct dcmi_framesize {
+ u32 width;
+ u32 height;
+};
+
+struct dcmi_buf {
+ struct vb2_v4l2_buffer vb;
+ bool prepared;
+ struct sg_table sgt;
+ size_t size;
+ struct list_head list;
+};
+
+struct stm32_dcmi {
+ /* Protects the access of variables shared within the interrupt */
+ spinlock_t irqlock;
+ struct device *dev;
+ void __iomem *regs;
+ struct resource *res;
+ struct reset_control *rstc;
+ int sequence;
+ struct list_head buffers;
+ struct dcmi_buf *active;
+ int irq;
+
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *source;
+ struct v4l2_format fmt;
+ struct v4l2_rect crop;
+ bool do_crop;
+
+ const struct dcmi_format **sd_formats;
+ unsigned int num_of_sd_formats;
+ const struct dcmi_format *sd_format;
+ struct dcmi_framesize *sd_framesizes;
+ unsigned int num_of_sd_framesizes;
+ struct dcmi_framesize sd_framesize;
+ struct v4l2_rect sd_bounds;
+
+ /* Protect this data structure */
+ struct mutex lock;
+ struct vb2_queue queue;
+
+ struct v4l2_mbus_config_parallel bus;
+ enum v4l2_mbus_type bus_type;
+ struct completion complete;
+ struct clk *mclk;
+ enum state state;
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
+ u32 dma_max_burst;
+ u32 misr;
+ int errors_count;
+ int overrun_count;
+ int buffers_count;
+
+ /* Ensure DMA operations atomicity */
+ struct mutex dma_lock;
+
+ struct media_device mdev;
+ struct media_pad vid_cap_pad;
+ struct media_pipeline pipeline;
+};
+
+static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct stm32_dcmi, notifier);
+}
+
+static inline u32 reg_read(void __iomem *base, u32 reg)
+{
+ return readl_relaxed(base + reg);
+}
+
+static inline void reg_write(void __iomem *base, u32 reg, u32 val)
+{
+ writel_relaxed(val, base + reg);
+}
+
+static inline void reg_set(void __iomem *base, u32 reg, u32 mask)
+{
+ reg_write(base, reg, reg_read(base, reg) | mask);
+}
+
+static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
+{
+ reg_write(base, reg, reg_read(base, reg) & ~mask);
+}
+
+static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf);
+
+static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
+ struct dcmi_buf *buf,
+ size_t bytesused,
+ int err)
+{
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (!buf)
+ return;
+
+ list_del_init(&buf->list);
+
+ vbuf = &buf->vb;
+
+ vbuf->sequence = dcmi->sequence++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
+ vb2_buffer_done(&vbuf->vb2_buf,
+ err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dev_dbg(dcmi->dev, "buffer[%d] done seq=%d, bytesused=%zu\n",
+ vbuf->vb2_buf.index, vbuf->sequence, bytesused);
+
+ dcmi->buffers_count++;
+ dcmi->active = NULL;
+}
+
+static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
+{
+ struct dcmi_buf *buf;
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ if (dcmi->state != RUNNING) {
+ spin_unlock_irq(&dcmi->irqlock);
+ return -EINVAL;
+ }
+
+ /* Restart a new DMA transfer with next buffer */
+ if (list_empty(&dcmi->buffers)) {
+ dev_dbg(dcmi->dev, "Capture restart is deferred to next buffer queueing\n");
+ dcmi->state = WAIT_FOR_BUFFER;
+ spin_unlock_irq(&dcmi->irqlock);
+ return 0;
+ }
+ buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
+ dcmi->active = buf;
+
+ spin_unlock_irq(&dcmi->irqlock);
+
+ return dcmi_start_capture(dcmi, buf);
+}
+
+static void dcmi_dma_callback(void *param)
+{
+ struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
+ struct dma_tx_state state;
+ enum dma_status status;
+ struct dcmi_buf *buf = dcmi->active;
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ /* Check DMA status */
+ status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
+
+ switch (status) {
+ case DMA_IN_PROGRESS:
+ dev_dbg(dcmi->dev, "%s: Received DMA_IN_PROGRESS\n", __func__);
+ break;
+ case DMA_PAUSED:
+ dev_err(dcmi->dev, "%s: Received DMA_PAUSED\n", __func__);
+ break;
+ case DMA_ERROR:
+ dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
+
+ /* Return buffer to V4L2 in error state */
+ dcmi_buffer_done(dcmi, buf, 0, -EIO);
+ break;
+ case DMA_COMPLETE:
+ dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
+
+ /* Return buffer to V4L2 */
+ dcmi_buffer_done(dcmi, buf, buf->size, 0);
+
+ spin_unlock_irq(&dcmi->irqlock);
+
+ /* Restart capture */
+ if (dcmi_restart_capture(dcmi))
+ dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n",
+ __func__);
+ return;
+ default:
+ dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
+ break;
+ }
+
+ spin_unlock_irq(&dcmi->irqlock);
+}
+
+static int dcmi_start_dma(struct stm32_dcmi *dcmi,
+ struct dcmi_buf *buf)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_slave_config config;
+ int ret;
+
+ memset(&config, 0, sizeof(config));
+
+ config.src_addr = (dma_addr_t)dcmi->res->start + DCMI_DR;
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_maxburst = 4;
+
+ /* Configure DMA channel */
+ ret = dmaengine_slave_config(dcmi->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: DMA channel config failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /*
+ * Avoid call of dmaengine_terminate_sync() between
+ * dmaengine_prep_slave_single() and dmaengine_submit()
+ * by locking the whole DMA submission sequence
+ */
+ mutex_lock(&dcmi->dma_lock);
+
+ /* Prepare a DMA transaction */
+ desc = dmaengine_prep_slave_sg(dcmi->dma_chan, buf->sgt.sgl, buf->sgt.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_sg failed\n", __func__);
+ mutex_unlock(&dcmi->dma_lock);
+ return -EINVAL;
+ }
+
+ /* Set completion callback routine for notification */
+ desc->callback = dcmi_dma_callback;
+ desc->callback_param = dcmi;
+
+ /* Push current DMA transaction in the pending queue */
+ dcmi->dma_cookie = dmaengine_submit(desc);
+ if (dma_submit_error(dcmi->dma_cookie)) {
+ dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
+ mutex_unlock(&dcmi->dma_lock);
+ return -ENXIO;
+ }
+
+ mutex_unlock(&dcmi->dma_lock);
+
+ dma_async_issue_pending(dcmi->dma_chan);
+
+ return 0;
+}
+
+static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf)
+{
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = dcmi_start_dma(dcmi, buf);
+ if (ret) {
+ dcmi->errors_count++;
+ return ret;
+ }
+
+ /* Enable capture */
+ reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
+
+ return 0;
+}
+
+static void dcmi_set_crop(struct stm32_dcmi *dcmi)
+{
+ u32 size, start;
+
+ /* Crop resolution */
+ size = ((dcmi->crop.height - 1) << 16) |
+ ((dcmi->crop.width << 1) - 1);
+ reg_write(dcmi->regs, DCMI_CWSIZE, size);
+
+ /* Crop start point */
+ start = ((dcmi->crop.top) << 16) |
+ ((dcmi->crop.left << 1));
+ reg_write(dcmi->regs, DCMI_CWSTRT, start);
+
+ dev_dbg(dcmi->dev, "Cropping to %ux%u@%u:%u\n",
+ dcmi->crop.width, dcmi->crop.height,
+ dcmi->crop.left, dcmi->crop.top);
+
+ /* Enable crop */
+ reg_set(dcmi->regs, DCMI_CR, CR_CROP);
+}
+
+static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+ struct dcmi_buf *buf = dcmi->active;
+
+ if (!buf)
+ return;
+
+ /*
+ * Because of variable JPEG buffer size sent by sensor,
+ * DMA transfer never completes due to transfer size never reached.
+ * In order to ensure that all the JPEG data are transferred
+ * in active buffer memory, DMA is drained.
+ * Then DMA tx status gives the amount of data transferred
+ * to memory, which is then returned to V4L2 through the active
+ * buffer payload.
+ */
+
+ /* Drain DMA */
+ dmaengine_synchronize(dcmi->dma_chan);
+
+ /* Get DMA residue to get JPEG size */
+ status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
+ if (status != DMA_ERROR && state.residue < buf->size) {
+ /* Return JPEG buffer to V4L2 with received JPEG buffer size */
+ dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0);
+ } else {
+ dcmi->errors_count++;
+ dev_err(dcmi->dev, "%s: Cannot get JPEG size from DMA\n",
+ __func__);
+ /* Return JPEG buffer to V4L2 in ERROR state */
+ dcmi_buffer_done(dcmi, buf, 0, -EIO);
+ }
+
+ /* Abort DMA operation */
+ dmaengine_terminate_sync(dcmi->dma_chan);
+
+ /* Restart capture */
+ if (dcmi_restart_capture(dcmi))
+ dev_err(dcmi->dev, "%s: Cannot restart capture on JPEG received\n",
+ __func__);
+}
+
+static irqreturn_t dcmi_irq_thread(int irq, void *arg)
+{
+ struct stm32_dcmi *dcmi = arg;
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ if (dcmi->misr & IT_OVR) {
+ dcmi->overrun_count++;
+ if (dcmi->overrun_count > OVERRUN_ERROR_THRESHOLD)
+ dcmi->errors_count++;
+ }
+ if (dcmi->misr & IT_ERR)
+ dcmi->errors_count++;
+
+ if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG &&
+ dcmi->misr & IT_FRAME) {
+ /* JPEG received */
+ spin_unlock_irq(&dcmi->irqlock);
+ dcmi_process_jpeg(dcmi);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irq(&dcmi->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dcmi_irq_callback(int irq, void *arg)
+{
+ struct stm32_dcmi *dcmi = arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcmi->irqlock, flags);
+
+ dcmi->misr = reg_read(dcmi->regs, DCMI_MIS);
+
+ /* Clear interrupt */
+ reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
+
+ spin_unlock_irqrestore(&dcmi->irqlock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int dcmi_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
+ unsigned int size;
+
+ size = dcmi->fmt.fmt.pix.sizeimage;
+
+ /* Make sure the image size is large enough */
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n",
+ *nbuffers, size);
+
+ return 0;
+}
+
+static int dcmi_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int dcmi_buf_prepare(struct vb2_buffer *vb)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+ unsigned long size;
+ unsigned int num_sgs = 1;
+ dma_addr_t dma_buf;
+ struct scatterlist *sg;
+ int i, ret;
+
+ size = dcmi->fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(dcmi->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ if (!buf->prepared) {
+ /* Get memory addresses */
+ buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+ if (buf->size > dcmi->dma_max_burst)
+ num_sgs = DIV_ROUND_UP(buf->size, dcmi->dma_max_burst);
+
+ ret = sg_alloc_table(&buf->sgt, num_sgs, GFP_ATOMIC);
+ if (ret) {
+ dev_err(dcmi->dev, "sg table alloc failed\n");
+ return ret;
+ }
+
+ dma_buf = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+
+ dev_dbg(dcmi->dev, "buffer[%d] phy=%pad size=%zu\n",
+ vb->index, &dma_buf, buf->size);
+
+ for_each_sg(buf->sgt.sgl, sg, num_sgs, i) {
+ size_t bytes = min_t(size_t, size, dcmi->dma_max_burst);
+
+ sg_dma_address(sg) = dma_buf;
+ sg_dma_len(sg) = bytes;
+ dma_buf += bytes;
+ size -= bytes;
+ }
+
+ buf->prepared = true;
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
+ }
+
+ return 0;
+}
+
+static void dcmi_buf_queue(struct vb2_buffer *vb)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ /* Enqueue to video buffers list */
+ list_add_tail(&buf->list, &dcmi->buffers);
+
+ if (dcmi->state == WAIT_FOR_BUFFER) {
+ dcmi->state = RUNNING;
+ dcmi->active = buf;
+
+ dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
+ buf->vb.vb2_buf.index);
+
+ spin_unlock_irq(&dcmi->irqlock);
+ if (dcmi_start_capture(dcmi, buf))
+ dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
+ __func__);
+ return;
+ }
+
+ spin_unlock_irq(&dcmi->irqlock);
+}
+
+static struct media_entity *dcmi_find_source(struct stm32_dcmi *dcmi)
+{
+ struct media_entity *entity = &dcmi->vdev->entity;
+ struct media_pad *pad;
+
+ /* Walk searching for entity having no sink */
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_pad_remote_pad_first(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ }
+
+ return entity;
+}
+
+static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
+ struct v4l2_subdev_format *format)
+{
+ struct media_entity *entity = &dcmi->source->entity;
+ struct v4l2_subdev *subdev;
+ struct media_pad *sink_pad = NULL;
+ struct media_pad *src_pad = NULL;
+ struct media_pad *pad = NULL;
+ struct v4l2_subdev_format fmt = *format;
+ bool found = false;
+ int ret;
+
+ /*
+ * Starting from sensor subdevice, walk within
+ * pipeline and set format on each subdevice
+ */
+ while (1) {
+ unsigned int i;
+
+ /* Search if current entity has a source pad */
+ for (i = 0; i < entity->num_pads; i++) {
+ pad = &entity->pads[i];
+ if (pad->flags & MEDIA_PAD_FL_SOURCE) {
+ src_pad = pad;
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ break;
+
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ /* Propagate format on sink pad if any, otherwise source pad */
+ if (sink_pad)
+ pad = sink_pad;
+
+ dev_dbg(dcmi->dev, "\"%s\":%d pad format set to 0x%x %ux%u\n",
+ subdev->name, pad->index, format->format.code,
+ format->format.width, format->format.height);
+
+ fmt.pad = pad->index;
+ ret = v4l2_subdev_call(subdev, pad, set_fmt, NULL, &fmt);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: Failed to set format 0x%x %ux%u on \"%s\":%d pad (%d)\n",
+ __func__, format->format.code,
+ format->format.width, format->format.height,
+ subdev->name, pad->index, ret);
+ return ret;
+ }
+
+ if (fmt.format.code != format->format.code ||
+ fmt.format.width != format->format.width ||
+ fmt.format.height != format->format.height) {
+ dev_dbg(dcmi->dev, "\"%s\":%d pad format has been changed to 0x%x %ux%u\n",
+ subdev->name, pad->index, fmt.format.code,
+ fmt.format.width, fmt.format.height);
+ }
+
+ /* Walk to next entity */
+ sink_pad = media_pad_remote_pad_first(src_pad);
+ if (!sink_pad || !is_media_entity_v4l2_subdev(sink_pad->entity))
+ break;
+
+ entity = sink_pad->entity;
+ }
+ *format = fmt;
+
+ return 0;
+}
+
+static int dcmi_pipeline_s_stream(struct stm32_dcmi *dcmi, int state)
+{
+ struct media_entity *entity = &dcmi->vdev->entity;
+ struct v4l2_subdev *subdev;
+ struct media_pad *pad;
+ int ret;
+
+ /* Start/stop all entities within pipeline */
+ while (1) {
+ pad = &entity->pads[0];
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ break;
+
+ pad = media_pad_remote_pad_first(pad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ break;
+
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, state);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(dcmi->dev, "%s: \"%s\" failed to %s streaming (%d)\n",
+ __func__, subdev->name,
+ state ? "start" : "stop", ret);
+ return ret;
+ }
+
+ dev_dbg(dcmi->dev, "\"%s\" is %s\n",
+ subdev->name, state ? "started" : "stopped");
+ }
+
+ return 0;
+}
+
+static int dcmi_pipeline_start(struct stm32_dcmi *dcmi)
+{
+ return dcmi_pipeline_s_stream(dcmi, 1);
+}
+
+static void dcmi_pipeline_stop(struct stm32_dcmi *dcmi)
+{
+ dcmi_pipeline_s_stream(dcmi, 0);
+}
+
+static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
+ struct dcmi_buf *buf, *node;
+ u32 val = 0;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dcmi->dev);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
+ __func__, ret);
+ goto err_unlocked;
+ }
+
+ ret = video_device_pipeline_start(dcmi->vdev, &dcmi->pipeline);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n",
+ __func__, ret);
+ goto err_pm_put;
+ }
+
+ ret = dcmi_pipeline_start(dcmi);
+ if (ret)
+ goto err_media_pipeline_stop;
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ /* Set bus width */
+ switch (dcmi->bus.bus_width) {
+ case 14:
+ val |= CR_EDM_0 | CR_EDM_1;
+ break;
+ case 12:
+ val |= CR_EDM_1;
+ break;
+ case 10:
+ val |= CR_EDM_0;
+ break;
+ default:
+ /* Set bus width to 8 bits by default */
+ break;
+ }
+
+ /* Set vertical synchronization polarity */
+ if (dcmi->bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ val |= CR_VSPOL;
+
+ /* Set horizontal synchronization polarity */
+ if (dcmi->bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ val |= CR_HSPOL;
+
+ /* Set pixel clock polarity */
+ if (dcmi->bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ val |= CR_PCKPOL;
+
+ /*
+ * BT656 embedded synchronisation bus mode.
+ *
+ * Default SAV/EAV mode is supported here with default codes
+ * SAV=0xff000080 & EAV=0xff00009d.
+ * With DCMI this means LSC=SAV=0x80 & LEC=EAV=0x9d.
+ */
+ if (dcmi->bus_type == V4L2_MBUS_BT656) {
+ val |= CR_ESS;
+
+ /* Unmask all codes */
+ reg_write(dcmi->regs, DCMI_ESUR, 0xffffffff);/* FEC:LEC:LSC:FSC */
+
+ /* Trig on LSC=0x80 & LEC=0x9d codes, ignore FSC and FEC */
+ reg_write(dcmi->regs, DCMI_ESCR, 0xff9d80ff);/* FEC:LEC:LSC:FSC */
+ }
+
+ reg_write(dcmi->regs, DCMI_CR, val);
+
+ /* Set crop */
+ if (dcmi->do_crop)
+ dcmi_set_crop(dcmi);
+
+ /* Enable jpeg capture */
+ if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG)
+ reg_set(dcmi->regs, DCMI_CR, CR_CM);/* Snapshot mode */
+
+ /* Enable dcmi */
+ reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
+
+ dcmi->sequence = 0;
+ dcmi->errors_count = 0;
+ dcmi->overrun_count = 0;
+ dcmi->buffers_count = 0;
+
+ /*
+ * Start transfer if at least one buffer has been queued,
+ * otherwise transfer is deferred at buffer queueing
+ */
+ if (list_empty(&dcmi->buffers)) {
+ dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n");
+ dcmi->state = WAIT_FOR_BUFFER;
+ spin_unlock_irq(&dcmi->irqlock);
+ return 0;
+ }
+
+ buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
+ dcmi->active = buf;
+
+ dcmi->state = RUNNING;
+
+ dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
+
+ spin_unlock_irq(&dcmi->irqlock);
+ ret = dcmi_start_capture(dcmi, buf);
+ if (ret) {
+ dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n",
+ __func__);
+ goto err_pipeline_stop;
+ }
+
+ /* Enable interruptions */
+ if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG)
+ reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
+ else
+ reg_set(dcmi->regs, DCMI_IER, IT_OVR | IT_ERR);
+
+ return 0;
+
+err_pipeline_stop:
+ dcmi_pipeline_stop(dcmi);
+
+err_media_pipeline_stop:
+ video_device_pipeline_stop(dcmi->vdev);
+
+err_pm_put:
+ pm_runtime_put(dcmi->dev);
+err_unlocked:
+ spin_lock_irq(&dcmi->irqlock);
+ /*
+ * Return all buffers to vb2 in QUEUED state.
+ * This will give ownership back to userspace
+ */
+ list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ dcmi->active = NULL;
+ spin_unlock_irq(&dcmi->irqlock);
+
+ return ret;
+}
+
+static void dcmi_stop_streaming(struct vb2_queue *vq)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
+ struct dcmi_buf *buf, *node;
+
+ dcmi_pipeline_stop(dcmi);
+
+ video_device_pipeline_stop(dcmi->vdev);
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ /* Disable interruptions */
+ reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
+
+ /* Disable DCMI */
+ reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
+
+ /* Return all queued buffers to vb2 in ERROR state */
+ list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ dcmi->active = NULL;
+ dcmi->state = STOPPED;
+
+ spin_unlock_irq(&dcmi->irqlock);
+
+ /* Stop all pending DMA operations */
+ mutex_lock(&dcmi->dma_lock);
+ dmaengine_terminate_sync(dcmi->dma_chan);
+ mutex_unlock(&dcmi->dma_lock);
+
+ pm_runtime_put(dcmi->dev);
+
+ if (dcmi->errors_count)
+ dev_warn(dcmi->dev, "Some errors found while streaming: errors=%d (overrun=%d), buffers=%d\n",
+ dcmi->errors_count, dcmi->overrun_count,
+ dcmi->buffers_count);
+ dev_dbg(dcmi->dev, "Stop streaming, errors=%d (overrun=%d), buffers=%d\n",
+ dcmi->errors_count, dcmi->overrun_count,
+ dcmi->buffers_count);
+}
+
+static const struct vb2_ops dcmi_video_qops = {
+ .queue_setup = dcmi_queue_setup,
+ .buf_init = dcmi_buf_init,
+ .buf_prepare = dcmi_buf_prepare,
+ .buf_queue = dcmi_buf_queue,
+ .start_streaming = dcmi_start_streaming,
+ .stop_streaming = dcmi_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int dcmi_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ *fmt = dcmi->fmt;
+
+ return 0;
+}
+
+static const struct dcmi_format *find_format_by_fourcc(struct stm32_dcmi *dcmi,
+ unsigned int fourcc)
+{
+ unsigned int num_formats = dcmi->num_of_sd_formats;
+ const struct dcmi_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ fmt = dcmi->sd_formats[i];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static void __find_outer_frame_size(struct stm32_dcmi *dcmi,
+ struct v4l2_pix_format *pix,
+ struct dcmi_framesize *framesize)
+{
+ struct dcmi_framesize *match = NULL;
+ unsigned int i;
+ unsigned int min_err = UINT_MAX;
+
+ for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
+ struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
+ int w_err = (fsize->width - pix->width);
+ int h_err = (fsize->height - pix->height);
+ int err = w_err + h_err;
+
+ if (w_err >= 0 && h_err >= 0 && err < min_err) {
+ min_err = err;
+ match = fsize;
+ }
+ }
+ if (!match)
+ match = &dcmi->sd_framesizes[0];
+
+ *framesize = *match;
+}
+
+static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
+ const struct dcmi_format **sd_format,
+ struct dcmi_framesize *sd_framesize)
+{
+ const struct dcmi_format *sd_fmt;
+ struct dcmi_framesize sd_fsize;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ bool do_crop;
+ int ret;
+
+ sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
+ if (!sd_fmt) {
+ if (!dcmi->num_of_sd_formats)
+ return -ENODATA;
+
+ sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
+ pix->pixelformat = sd_fmt->fourcc;
+ }
+
+ /* Limit to hardware capabilities */
+ pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
+ pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
+
+ /* No crop if JPEG is requested */
+ do_crop = dcmi->do_crop && (pix->pixelformat != V4L2_PIX_FMT_JPEG);
+
+ if (do_crop && dcmi->num_of_sd_framesizes) {
+ struct dcmi_framesize outer_sd_fsize;
+ /*
+ * If crop is requested and sensor have discrete frame sizes,
+ * select the frame size that is just larger than request
+ */
+ __find_outer_frame_size(dcmi, pix, &outer_sd_fsize);
+ pix->width = outer_sd_fsize.width;
+ pix->height = outer_sd_fsize.height;
+ }
+
+ v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
+ ret = v4l2_subdev_call_state_try(dcmi->source, pad, set_fmt, &format);
+ if (ret < 0)
+ return ret;
+
+ /* Update pix regarding to what sensor can do */
+ v4l2_fill_pix_format(pix, &format.format);
+
+ /* Save resolution that sensor can actually do */
+ sd_fsize.width = pix->width;
+ sd_fsize.height = pix->height;
+
+ if (do_crop) {
+ struct v4l2_rect c = dcmi->crop;
+ struct v4l2_rect max_rect;
+
+ /*
+ * Adjust crop by making the intersection between
+ * format resolution request and crop request
+ */
+ max_rect.top = 0;
+ max_rect.left = 0;
+ max_rect.width = pix->width;
+ max_rect.height = pix->height;
+ v4l2_rect_map_inside(&c, &max_rect);
+ c.top = clamp_t(s32, c.top, 0, pix->height - c.height);
+ c.left = clamp_t(s32, c.left, 0, pix->width - c.width);
+ dcmi->crop = c;
+
+ /* Adjust format resolution request to crop */
+ pix->width = dcmi->crop.width;
+ pix->height = dcmi->crop.height;
+ }
+
+ pix->field = V4L2_FIELD_NONE;
+ pix->bytesperline = pix->width * sd_fmt->bpp;
+ pix->sizeimage = pix->bytesperline * pix->height;
+
+ if (sd_format)
+ *sd_format = sd_fmt;
+ if (sd_framesize)
+ *sd_framesize = sd_fsize;
+
+ return 0;
+}
+
+static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct dcmi_format *sd_format;
+ struct dcmi_framesize sd_framesize;
+ struct v4l2_mbus_framefmt *mf = &format.format;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int ret;
+
+ /*
+ * Try format, fmt.width/height could have been changed
+ * to match sensor capability or crop request
+ * sd_format & sd_framesize will contain what subdev
+ * can do for this request.
+ */
+ ret = dcmi_try_fmt(dcmi, f, &sd_format, &sd_framesize);
+ if (ret)
+ return ret;
+
+ /* Disable crop if JPEG is requested or BT656 bus is selected */
+ if (pix->pixelformat == V4L2_PIX_FMT_JPEG &&
+ dcmi->bus_type != V4L2_MBUS_BT656)
+ dcmi->do_crop = false;
+
+ /* pix to mbus format */
+ v4l2_fill_mbus_format(mf, pix,
+ sd_format->mbus_code);
+ mf->width = sd_framesize.width;
+ mf->height = sd_framesize.height;
+
+ ret = dcmi_pipeline_s_fmt(dcmi, &format);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dcmi->dev, "Sensor format set to 0x%x %ux%u\n",
+ mf->code, mf->width, mf->height);
+ dev_dbg(dcmi->dev, "Buffer format set to %4.4s %ux%u\n",
+ (char *)&pix->pixelformat,
+ pix->width, pix->height);
+
+ dcmi->fmt = *f;
+ dcmi->sd_format = sd_format;
+ dcmi->sd_framesize = sd_framesize;
+
+ return 0;
+}
+
+static int dcmi_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ if (vb2_is_streaming(&dcmi->queue))
+ return -EBUSY;
+
+ return dcmi_set_fmt(dcmi, f);
+}
+
+static int dcmi_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ return dcmi_try_fmt(dcmi, f, NULL, NULL);
+}
+
+static int dcmi_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ if (f->index >= dcmi->num_of_sd_formats)
+ return -EINVAL;
+
+ f->pixelformat = dcmi->sd_formats[f->index]->fourcc;
+ return 0;
+}
+
+static int dcmi_get_sensor_format(struct stm32_dcmi *dcmi,
+ struct v4l2_pix_format *pix)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(dcmi->source, pad, get_fmt, NULL, &fmt);
+ if (ret)
+ return ret;
+
+ v4l2_fill_pix_format(pix, &fmt.format);
+
+ return 0;
+}
+
+static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
+ struct v4l2_pix_format *pix)
+{
+ const struct dcmi_format *sd_fmt;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ int ret;
+
+ sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
+ if (!sd_fmt) {
+ if (!dcmi->num_of_sd_formats)
+ return -ENODATA;
+
+ sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
+ pix->pixelformat = sd_fmt->fourcc;
+ }
+
+ v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
+ ret = v4l2_subdev_call_state_try(dcmi->source, pad, set_fmt, &format);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dcmi_get_sensor_bounds(struct stm32_dcmi *dcmi,
+ struct v4l2_rect *r)
+{
+ struct v4l2_subdev_selection bounds = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP_BOUNDS,
+ };
+ unsigned int max_width, max_height, max_pixsize;
+ struct v4l2_pix_format pix;
+ unsigned int i;
+ int ret;
+
+ /*
+ * Get sensor bounds first
+ */
+ ret = v4l2_subdev_call(dcmi->source, pad, get_selection,
+ NULL, &bounds);
+ if (!ret)
+ *r = bounds.r;
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ /*
+ * If selection is not implemented,
+ * fallback by enumerating sensor frame sizes
+ * and take the largest one
+ */
+ max_width = 0;
+ max_height = 0;
+ max_pixsize = 0;
+ for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
+ struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
+ unsigned int pixsize = fsize->width * fsize->height;
+
+ if (pixsize > max_pixsize) {
+ max_pixsize = pixsize;
+ max_width = fsize->width;
+ max_height = fsize->height;
+ }
+ }
+ if (max_pixsize > 0) {
+ r->top = 0;
+ r->left = 0;
+ r->width = max_width;
+ r->height = max_height;
+ return 0;
+ }
+
+ /*
+ * If frame sizes enumeration is not implemented,
+ * fallback by getting current sensor frame size
+ */
+ ret = dcmi_get_sensor_format(dcmi, &pix);
+ if (ret)
+ return ret;
+
+ r->top = 0;
+ r->left = 0;
+ r->width = pix.width;
+ r->height = pix.height;
+
+ return 0;
+}
+
+static int dcmi_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r = dcmi->sd_bounds;
+ return 0;
+ case V4L2_SEL_TGT_CROP:
+ if (dcmi->do_crop) {
+ s->r = dcmi->crop;
+ } else {
+ s->r.top = 0;
+ s->r.left = 0;
+ s->r.width = dcmi->fmt.fmt.pix.width;
+ s->r.height = dcmi->fmt.fmt.pix.height;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dcmi_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ struct v4l2_rect r = s->r;
+ struct v4l2_rect max_rect;
+ struct v4l2_pix_format pix;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /* Reset sensor resolution to max resolution */
+ pix.pixelformat = dcmi->fmt.fmt.pix.pixelformat;
+ pix.width = dcmi->sd_bounds.width;
+ pix.height = dcmi->sd_bounds.height;
+ dcmi_set_sensor_format(dcmi, &pix);
+
+ /*
+ * Make the intersection between
+ * sensor resolution
+ * and crop request
+ */
+ max_rect.top = 0;
+ max_rect.left = 0;
+ max_rect.width = pix.width;
+ max_rect.height = pix.height;
+ v4l2_rect_map_inside(&r, &max_rect);
+ r.top = clamp_t(s32, r.top, 0, pix.height - r.height);
+ r.left = clamp_t(s32, r.left, 0, pix.width - r.width);
+
+ if (!(r.top == dcmi->sd_bounds.top &&
+ r.left == dcmi->sd_bounds.left &&
+ r.width == dcmi->sd_bounds.width &&
+ r.height == dcmi->sd_bounds.height)) {
+ /* Crop if request is different than sensor resolution */
+ dcmi->do_crop = true;
+ dcmi->crop = r;
+ dev_dbg(dcmi->dev, "s_selection: crop %ux%u@(%u,%u) from %ux%u\n",
+ r.width, r.height, r.left, r.top,
+ pix.width, pix.height);
+ } else {
+ /* Disable crop */
+ dcmi->do_crop = false;
+ dev_dbg(dcmi->dev, "s_selection: crop is disabled\n");
+ }
+
+ s->r = r;
+ return 0;
+}
+
+static int dcmi_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DRV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "STM32 Camera Memory Interface",
+ sizeof(cap->card));
+ strscpy(cap->bus_info, "platform:dcmi", sizeof(cap->bus_info));
+ return 0;
+}
+
+static int dcmi_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index != 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strscpy(i->name, "Camera", sizeof(i->name));
+ return 0;
+}
+
+static int dcmi_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int dcmi_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int dcmi_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ const struct dcmi_format *sd_fmt;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ sd_fmt = find_format_by_fourcc(dcmi, fsize->pixel_format);
+ if (!sd_fmt)
+ return -EINVAL;
+
+ fse.code = sd_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(dcmi->source, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int dcmi_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *p)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ return v4l2_g_parm_cap(video_devdata(file), dcmi->source, p);
+}
+
+static int dcmi_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *p)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ return v4l2_s_parm_cap(video_devdata(file), dcmi->source, p);
+}
+
+static int dcmi_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ const struct dcmi_format *sd_fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ sd_fmt = find_format_by_fourcc(dcmi, fival->pixel_format);
+ if (!sd_fmt)
+ return -EINVAL;
+
+ fie.code = sd_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(dcmi->source, pad,
+ enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static const struct of_device_id stm32_dcmi_of_match[] = {
+ { .compatible = "st,stm32-dcmi"},
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_dcmi_of_match);
+
+static int dcmi_open(struct file *file)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ struct v4l2_subdev *sd = dcmi->source;
+ int ret;
+
+ if (mutex_lock_interruptible(&dcmi->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto fh_rel;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto fh_rel;
+
+ ret = dcmi_set_fmt(dcmi, &dcmi->fmt);
+ if (ret)
+ v4l2_subdev_call(sd, core, s_power, 0);
+fh_rel:
+ if (ret)
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&dcmi->lock);
+ return ret;
+}
+
+static int dcmi_release(struct file *file)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ struct v4l2_subdev *sd = dcmi->source;
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&dcmi->lock);
+
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (fh_singular)
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ mutex_unlock(&dcmi->lock);
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops dcmi_ioctl_ops = {
+ .vidioc_querycap = dcmi_querycap,
+
+ .vidioc_try_fmt_vid_cap = dcmi_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = dcmi_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = dcmi_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = dcmi_enum_fmt_vid_cap,
+ .vidioc_g_selection = dcmi_g_selection,
+ .vidioc_s_selection = dcmi_s_selection,
+
+ .vidioc_enum_input = dcmi_enum_input,
+ .vidioc_g_input = dcmi_g_input,
+ .vidioc_s_input = dcmi_s_input,
+
+ .vidioc_g_parm = dcmi_g_parm,
+ .vidioc_s_parm = dcmi_s_parm,
+
+ .vidioc_enum_framesizes = dcmi_enum_framesizes,
+ .vidioc_enum_frameintervals = dcmi_enum_frameintervals,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations dcmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = dcmi_open,
+ .release = dcmi_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = vb2_fop_get_unmapped_area,
+#endif
+ .read = vb2_fop_read,
+};
+
+static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi)
+{
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = CIF_WIDTH,
+ .height = CIF_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = dcmi->sd_formats[0]->fourcc,
+ },
+ };
+ int ret;
+
+ ret = dcmi_try_fmt(dcmi, &f, NULL, NULL);
+ if (ret)
+ return ret;
+ dcmi->sd_format = dcmi->sd_formats[0];
+ dcmi->fmt = f;
+ return 0;
+}
+
+static const struct dcmi_format dcmi_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_1X16,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_1X16,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
+ .bpp = 1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .bpp = 1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .bpp = 1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .bpp = 1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .bpp = 1,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR14,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR14_1X14,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG14,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG14_1X14,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG14,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG14_1X14,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB14,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB14_1X14,
+ .bpp = 2,
+ },
+};
+
+static int dcmi_formats_init(struct stm32_dcmi *dcmi)
+{
+ const struct dcmi_format *sd_fmts[ARRAY_SIZE(dcmi_formats)];
+ unsigned int num_fmts = 0, i, j;
+ struct v4l2_subdev *subdev = dcmi->source;
+ struct v4l2_subdev_mbus_code_enum mbus_code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code)) {
+ for (i = 0; i < ARRAY_SIZE(dcmi_formats); i++) {
+ if (dcmi_formats[i].mbus_code != mbus_code.code)
+ continue;
+
+ /* Exclude JPEG if BT656 bus is selected */
+ if (dcmi_formats[i].fourcc == V4L2_PIX_FMT_JPEG &&
+ dcmi->bus_type == V4L2_MBUS_BT656)
+ continue;
+
+ /* Code supported, have we got this fourcc yet? */
+ for (j = 0; j < num_fmts; j++)
+ if (sd_fmts[j]->fourcc ==
+ dcmi_formats[i].fourcc) {
+ /* Already available */
+ dev_dbg(dcmi->dev, "Skipping fourcc/code: %4.4s/0x%x\n",
+ (char *)&sd_fmts[j]->fourcc,
+ mbus_code.code);
+ break;
+ }
+ if (j == num_fmts) {
+ /* New */
+ sd_fmts[num_fmts++] = dcmi_formats + i;
+ dev_dbg(dcmi->dev, "Supported fourcc/code: %4.4s/0x%x\n",
+ (char *)&sd_fmts[num_fmts - 1]->fourcc,
+ sd_fmts[num_fmts - 1]->mbus_code);
+ }
+ }
+ mbus_code.index++;
+ }
+
+ if (!num_fmts)
+ return -ENXIO;
+
+ dcmi->num_of_sd_formats = num_fmts;
+ dcmi->sd_formats = devm_kcalloc(dcmi->dev,
+ num_fmts, sizeof(struct dcmi_format *),
+ GFP_KERNEL);
+ if (!dcmi->sd_formats) {
+ dev_err(dcmi->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ memcpy(dcmi->sd_formats, sd_fmts,
+ num_fmts * sizeof(struct dcmi_format *));
+ dcmi->sd_format = dcmi->sd_formats[0];
+
+ return 0;
+}
+
+static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
+{
+ unsigned int num_fsize = 0;
+ struct v4l2_subdev *subdev = dcmi->source;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .code = dcmi->sd_format->mbus_code,
+ };
+ unsigned int ret;
+ unsigned int i;
+
+ /* Allocate discrete framesizes array */
+ while (!v4l2_subdev_call(subdev, pad, enum_frame_size,
+ NULL, &fse))
+ fse.index++;
+
+ num_fsize = fse.index;
+ if (!num_fsize)
+ return 0;
+
+ dcmi->num_of_sd_framesizes = num_fsize;
+ dcmi->sd_framesizes = devm_kcalloc(dcmi->dev, num_fsize,
+ sizeof(struct dcmi_framesize),
+ GFP_KERNEL);
+ if (!dcmi->sd_framesizes) {
+ dev_err(dcmi->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ /* Fill array with sensor supported framesizes */
+ dev_dbg(dcmi->dev, "Sensor supports %u frame sizes:\n", num_fsize);
+ for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
+ fse.index = i;
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+ dcmi->sd_framesizes[fse.index].width = fse.max_width;
+ dcmi->sd_framesizes[fse.index].height = fse.max_height;
+ dev_dbg(dcmi->dev, "%ux%u\n", fse.max_width, fse.max_height);
+ }
+
+ return 0;
+}
+
+static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+ int ret;
+
+ /*
+ * Now that the graph is complete,
+ * we search for the source subdevice
+ * in order to expose it through V4L2 interface
+ */
+ dcmi->source = media_entity_to_v4l2_subdev(dcmi_find_source(dcmi));
+ if (!dcmi->source) {
+ dev_err(dcmi->dev, "Source subdevice not found\n");
+ return -ENODEV;
+ }
+
+ dcmi->vdev->ctrl_handler = dcmi->source->ctrl_handler;
+
+ ret = dcmi_formats_init(dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "No supported mediabus format found\n");
+ return ret;
+ }
+
+ ret = dcmi_framesizes_init(dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "Could not initialize framesizes\n");
+ return ret;
+ }
+
+ ret = dcmi_get_sensor_bounds(dcmi, &dcmi->sd_bounds);
+ if (ret) {
+ dev_err(dcmi->dev, "Could not get sensor bounds\n");
+ return ret;
+ }
+
+ ret = dcmi_set_default_fmt(dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "Could not set default format\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dcmi->dev, dcmi->irq, dcmi_irq_callback,
+ dcmi_irq_thread, IRQF_ONESHOT,
+ dev_name(dcmi->dev), dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "Unable to request irq %d\n", dcmi->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+
+ dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev));
+
+ /* Checks internally if vdev has been init or not */
+ video_unregister_device(dcmi->vdev);
+}
+
+static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+ unsigned int ret;
+ int src_pad;
+
+ dev_dbg(dcmi->dev, "Subdev \"%s\" bound\n", subdev->name);
+
+ /*
+ * Link this sub-device to DCMI, it could be
+ * a parallel camera sensor or a bridge
+ */
+ src_pad = media_entity_get_fwnode_pad(&subdev->entity,
+ subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+
+ ret = media_create_pad_link(&subdev->entity, src_pad,
+ &dcmi->vdev->entity, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ dev_err(dcmi->dev, "Failed to create media pad link with subdev \"%s\"\n",
+ subdev->name);
+ else
+ dev_dbg(dcmi->dev, "DCMI is now linked to \"%s\"\n",
+ subdev->name);
+
+ return ret;
+}
+
+static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
+ .bound = dcmi_graph_notify_bound,
+ .unbind = dcmi_graph_notify_unbind,
+ .complete = dcmi_graph_notify_complete,
+};
+
+static int dcmi_graph_init(struct stm32_dcmi *dcmi)
+{
+ struct v4l2_async_subdev *asd;
+ struct device_node *ep;
+ int ret;
+
+ ep = of_graph_get_next_endpoint(dcmi->dev->of_node, NULL);
+ if (!ep) {
+ dev_err(dcmi->dev, "Failed to get next endpoint\n");
+ return -EINVAL;
+ }
+
+ v4l2_async_nf_init(&dcmi->notifier);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&dcmi->notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
+
+ of_node_put(ep);
+
+ if (IS_ERR(asd)) {
+ dev_err(dcmi->dev, "Failed to add subdev notifier\n");
+ return PTR_ERR(asd);
+ }
+
+ dcmi->notifier.ops = &dcmi_graph_notify_ops;
+
+ ret = v4l2_async_nf_register(&dcmi->v4l2_dev, &dcmi->notifier);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "Failed to register notifier\n");
+ v4l2_async_nf_cleanup(&dcmi->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dcmi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match = NULL;
+ struct v4l2_fwnode_endpoint ep = { .bus_type = 0 };
+ struct stm32_dcmi *dcmi;
+ struct vb2_queue *q;
+ struct dma_chan *chan;
+ struct dma_slave_caps caps;
+ struct clk *mclk;
+ int irq;
+ int ret = 0;
+
+ match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Could not find a match in devicetree\n");
+ return -ENODEV;
+ }
+
+ dcmi = devm_kzalloc(&pdev->dev, sizeof(struct stm32_dcmi), GFP_KERNEL);
+ if (!dcmi)
+ return -ENOMEM;
+
+ dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(dcmi->rstc)) {
+ if (PTR_ERR(dcmi->rstc) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get reset control\n");
+
+ return PTR_ERR(dcmi->rstc);
+ }
+
+ /* Get bus characteristics from devicetree */
+ np = of_graph_get_next_endpoint(np, NULL);
+ if (!np) {
+ dev_err(&pdev->dev, "Could not find the endpoint\n");
+ return -ENODEV;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
+ of_node_put(np);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ return ret;
+ }
+
+ if (ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ dev_err(&pdev->dev, "CSI bus not supported\n");
+ return -ENODEV;
+ }
+
+ if (ep.bus_type == V4L2_MBUS_BT656 &&
+ ep.bus.parallel.bus_width != 8) {
+ dev_err(&pdev->dev, "BT656 bus conflicts with %u bits bus width (8 bits required)\n",
+ ep.bus.parallel.bus_width);
+ return -ENODEV;
+ }
+
+ dcmi->bus.flags = ep.bus.parallel.flags;
+ dcmi->bus.bus_width = ep.bus.parallel.bus_width;
+ dcmi->bus.data_shift = ep.bus.parallel.data_shift;
+ dcmi->bus_type = ep.bus_type;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq ? irq : -ENXIO;
+
+ dcmi->irq = irq;
+
+ dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!dcmi->res) {
+ dev_err(&pdev->dev, "Could not get resource\n");
+ return -ENODEV;
+ }
+
+ dcmi->regs = devm_ioremap_resource(&pdev->dev, dcmi->res);
+ if (IS_ERR(dcmi->regs)) {
+ dev_err(&pdev->dev, "Could not map registers\n");
+ return PTR_ERR(dcmi->regs);
+ }
+
+ mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(mclk)) {
+ if (PTR_ERR(mclk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get mclk\n");
+ return PTR_ERR(mclk);
+ }
+
+ chan = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request DMA channel: %d\n", ret);
+ return ret;
+ }
+
+ dcmi->dma_max_burst = UINT_MAX;
+ ret = dma_get_slave_caps(chan, &caps);
+ if (!ret && caps.max_sg_burst)
+ dcmi->dma_max_burst = caps.max_sg_burst * DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ spin_lock_init(&dcmi->irqlock);
+ mutex_init(&dcmi->lock);
+ mutex_init(&dcmi->dma_lock);
+ init_completion(&dcmi->complete);
+ INIT_LIST_HEAD(&dcmi->buffers);
+
+ dcmi->dev = &pdev->dev;
+ dcmi->mclk = mclk;
+ dcmi->state = STOPPED;
+ dcmi->dma_chan = chan;
+
+ q = &dcmi->queue;
+
+ dcmi->v4l2_dev.mdev = &dcmi->mdev;
+
+ /* Initialize media device */
+ strscpy(dcmi->mdev.model, DRV_NAME, sizeof(dcmi->mdev.model));
+ dcmi->mdev.dev = &pdev->dev;
+ media_device_init(&dcmi->mdev);
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
+ if (ret)
+ goto err_media_device_cleanup;
+
+ dcmi->vdev = video_device_alloc();
+ if (!dcmi->vdev) {
+ ret = -ENOMEM;
+ goto err_device_unregister;
+ }
+
+ /* Video node */
+ dcmi->vdev->fops = &dcmi_fops;
+ dcmi->vdev->v4l2_dev = &dcmi->v4l2_dev;
+ dcmi->vdev->queue = &dcmi->queue;
+ strscpy(dcmi->vdev->name, KBUILD_MODNAME, sizeof(dcmi->vdev->name));
+ dcmi->vdev->release = video_device_release;
+ dcmi->vdev->ioctl_ops = &dcmi_ioctl_ops;
+ dcmi->vdev->lock = &dcmi->lock;
+ dcmi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ video_set_drvdata(dcmi->vdev, dcmi);
+
+ /* Media entity pads */
+ dcmi->vid_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&dcmi->vdev->entity,
+ 1, &dcmi->vid_cap_pad);
+ if (ret) {
+ dev_err(dcmi->dev, "Failed to init media entity pad\n");
+ goto err_device_release;
+ }
+ dcmi->vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT;
+
+ ret = video_register_device(dcmi->vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(dcmi->dev, "Failed to register video device\n");
+ goto err_media_entity_cleanup;
+ }
+
+ dev_dbg(dcmi->dev, "Device registered as %s\n",
+ video_device_node_name(dcmi->vdev));
+
+ /* Buffer queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ q->lock = &dcmi->lock;
+ q->drv_priv = dcmi;
+ q->buf_struct_size = sizeof(struct dcmi_buf);
+ q->ops = &dcmi_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->dev = &pdev->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize vb2 queue\n");
+ goto err_media_entity_cleanup;
+ }
+
+ ret = dcmi_graph_init(dcmi);
+ if (ret < 0)
+ goto err_media_entity_cleanup;
+
+ /* Reset device */
+ ret = reset_control_assert(dcmi->rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to assert the reset line\n");
+ goto err_cleanup;
+ }
+
+ usleep_range(3000, 5000);
+
+ ret = reset_control_deassert(dcmi->rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to deassert the reset line\n");
+ goto err_cleanup;
+ }
+
+ dev_info(&pdev->dev, "Probe done\n");
+
+ platform_set_drvdata(pdev, dcmi);
+
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+err_cleanup:
+ v4l2_async_nf_cleanup(&dcmi->notifier);
+err_media_entity_cleanup:
+ media_entity_cleanup(&dcmi->vdev->entity);
+err_device_release:
+ video_device_release(dcmi->vdev);
+err_device_unregister:
+ v4l2_device_unregister(&dcmi->v4l2_dev);
+err_media_device_cleanup:
+ media_device_cleanup(&dcmi->mdev);
+ dma_release_channel(dcmi->dma_chan);
+
+ return ret;
+}
+
+static int dcmi_remove(struct platform_device *pdev)
+{
+ struct stm32_dcmi *dcmi = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ v4l2_async_nf_unregister(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
+ media_entity_cleanup(&dcmi->vdev->entity);
+ v4l2_device_unregister(&dcmi->v4l2_dev);
+ media_device_cleanup(&dcmi->mdev);
+
+ dma_release_channel(dcmi->dma_chan);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_runtime_suspend(struct device *dev)
+{
+ struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dcmi->mclk);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_runtime_resume(struct device *dev)
+{
+ struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dcmi->mclk);
+ if (ret)
+ dev_err(dev, "%s: Failed to prepare_enable clock\n", __func__);
+
+ return ret;
+}
+
+static __maybe_unused int dcmi_suspend(struct device *dev)
+{
+ /* disable clock */
+ pm_runtime_force_suspend(dev);
+
+ /* change pinctrl state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static __maybe_unused int dcmi_resume(struct device *dev)
+{
+ /* restore pinctl default state */
+ pinctrl_pm_select_default_state(dev);
+
+ /* clock enable */
+ pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dcmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dcmi_suspend, dcmi_resume)
+ SET_RUNTIME_PM_OPS(dcmi_runtime_suspend,
+ dcmi_runtime_resume, NULL)
+};
+
+static struct platform_driver stm32_dcmi_driver = {
+ .probe = dcmi_probe,
+ .remove = dcmi_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(stm32_dcmi_of_match),
+ .pm = &dcmi_pm_ops,
+ },
+};
+
+module_platform_driver(stm32_dcmi_driver);
+
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
+MODULE_LICENSE("GPL");