summaryrefslogtreecommitdiffstats
path: root/drivers/media/v4l2-core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/v4l2-core')
-rw-r--r--drivers/media/v4l2-core/Kconfig100
-rw-r--r--drivers/media/v4l2-core/Makefile42
-rw-r--r--drivers/media/v4l2-core/tuner-core.c1424
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c971
-rw-r--r--drivers/media/v4l2-core/v4l2-cci.c194
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c584
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c1202
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c1305
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c2591
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c1680
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h95
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c501
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c1211
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c293
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c1156
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c373
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c117
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c746
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c1255
-rw-r--r--drivers/media/v4l2-core/v4l2-h264.c453
-rw-r--r--drivers/media/v4l2-core/v4l2-i2c.c184
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c3445
-rw-r--r--drivers/media/v4l2-core/v4l2-jpeg.c677
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c599
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c1619
-rw-r--r--drivers/media/v4l2-core/v4l2-spi.c78
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev-priv.h14
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2247
-rw-r--r--drivers/media/v4l2-core/v4l2-trace.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-vp9.c1850
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c1198
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c402
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c681
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c326
34 files changed, 29625 insertions, 0 deletions
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
new file mode 100644
index 0000000000..f77ebd688c
--- /dev/null
+++ b/drivers/media/v4l2-core/Kconfig
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Generic video config states
+#
+
+config VIDEO_V4L2_I2C
+ bool
+ depends on I2C && VIDEO_DEV
+ default y
+
+config VIDEO_V4L2_SUBDEV_API
+ bool
+ depends on VIDEO_DEV && MEDIA_CONTROLLER
+ help
+ Enables the V4L2 sub-device pad-level userspace API used to configure
+ video format, size and frame rate between hardware blocks.
+
+ This API is mostly used by camera interfaces in embedded platforms.
+
+config VIDEO_ADV_DEBUG
+ bool "Enable advanced debug functionality on V4L2 drivers"
+ help
+ Say Y here to enable advanced debugging functionality on some
+ V4L devices.
+ In doubt, say N.
+
+config VIDEO_FIXED_MINOR_RANGES
+ bool "Enable old-style fixed minor ranges on drivers/video devices"
+ help
+ Say Y here to enable the old-style fixed-range minor assignments.
+ Only useful if you rely on the old behavior and use mknod instead of udev.
+
+ When in doubt, say N.
+
+# Used by drivers that need tuner.ko
+config VIDEO_TUNER
+ tristate
+
+# Used by drivers that need v4l2-jpeg.ko
+config V4L2_JPEG_HELPER
+ tristate
+
+# Used by drivers that need v4l2-h264.ko
+config V4L2_H264
+ tristate
+
+# Used by drivers that need v4l2-vp9.ko
+config V4L2_VP9
+ tristate
+
+# Used by drivers that need v4l2-mem2mem.ko
+config V4L2_MEM2MEM_DEV
+ tristate
+ depends on VIDEOBUF2_CORE
+
+# Used by LED subsystem flash drivers
+config V4L2_FLASH_LED_CLASS
+ tristate "V4L2 flash API for LED flash class devices"
+ depends on VIDEO_DEV
+ depends on LEDS_CLASS_FLASH
+ select MEDIA_CONTROLLER
+ select V4L2_ASYNC
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Say Y here to enable V4L2 flash API support for LED flash
+ class drivers.
+
+ When in doubt, say N.
+
+config V4L2_FWNODE
+ tristate
+ select V4L2_ASYNC
+
+config V4L2_ASYNC
+ tristate
+
+config V4L2_CCI
+ tristate
+
+config V4L2_CCI_I2C
+ tristate
+ depends on I2C
+ select REGMAP_I2C
+ select V4L2_CCI
+
+# Used by drivers that need Videobuf modules
+config VIDEOBUF_GEN
+ tristate
+
+config VIDEOBUF_DMA_SG
+ tristate
+ select VIDEOBUF_GEN
+
+config VIDEOBUF_VMALLOC
+ tristate
+ select VIDEOBUF_GEN
+
+config VIDEOBUF_DMA_CONTIG
+ tristate
+ select VIDEOBUF_GEN
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
new file mode 100644
index 0000000000..be25517057
--- /dev/null
+++ b/drivers/media/v4l2-core/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the V4L2 core
+#
+
+ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I$(srctree)/drivers/media/tuners
+
+tuner-objs := tuner-core.o
+
+videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
+ v4l2-event.o v4l2-subdev.o v4l2-common.o \
+ v4l2-ctrls-core.o v4l2-ctrls-api.o \
+ v4l2-ctrls-request.o v4l2-ctrls-defs.o
+
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+videodev-$(CONFIG_COMPAT) += v4l2-compat-ioctl32.o
+videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o
+videodev-$(CONFIG_SPI) += v4l2-spi.o
+videodev-$(CONFIG_TRACEPOINTS) += v4l2-trace.o
+videodev-$(CONFIG_VIDEO_V4L2_I2C) += v4l2-i2c.o
+
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+
+obj-$(CONFIG_V4L2_ASYNC) += v4l2-async.o
+obj-$(CONFIG_V4L2_CCI) += v4l2-cci.o
+obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o
+obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
+obj-$(CONFIG_V4L2_H264) += v4l2-h264.o
+obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o
+obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
+obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o
+
+obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
+obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
+obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
+obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
+
+obj-$(CONFIG_VIDEO_TUNER) += tuner.o
+obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
new file mode 100644
index 0000000000..5687089bea
--- /dev/null
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * i2c tv tuner chip device driver
+ * core core, i.e. kernel interfaces, registering and so on
+ *
+ * Copyright(c) by Ralph Metzler, Gerd Knorr, Gunther Mayer
+ *
+ * Copyright(c) 2005-2011 by Mauro Carvalho Chehab
+ * - Added support for a separate Radio tuner
+ * - Major rework and cleanups at the code
+ *
+ * This driver supports many devices and the idea is to let the driver
+ * detect which device is present. So rather than listing all supported
+ * devices here, we pretend to support a single, fake device type that will
+ * handle both radio and analog TV tuning.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/videodev2.h>
+#include <media/tuner.h>
+#include <media/tuner-types.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include "mt20xx.h"
+#include "tda8290.h"
+#include "tea5761.h"
+#include "tea5767.h"
+#include "xc2028.h"
+#include "tuner-simple.h"
+#include "tda9887.h"
+#include "xc5000.h"
+#include "tda18271.h"
+#include "xc4000.h"
+
+#define UNSET (-1U)
+
+/*
+ * Driver modprobe parameters
+ */
+
+/* insmod options used at init time => read/only */
+static unsigned int addr;
+static unsigned int no_autodetect;
+static unsigned int show_i2c;
+
+module_param(addr, int, 0444);
+module_param(no_autodetect, int, 0444);
+module_param(show_i2c, int, 0444);
+
+/* insmod options used at runtime => read/write */
+static int tuner_debug;
+static unsigned int tv_range[2] = { 44, 958 };
+static unsigned int radio_range[2] = { 65, 108 };
+static char pal[] = "--";
+static char secam[] = "--";
+static char ntsc[] = "-";
+
+module_param_named(debug, tuner_debug, int, 0644);
+module_param_array(tv_range, int, NULL, 0644);
+module_param_array(radio_range, int, NULL, 0644);
+module_param_string(pal, pal, sizeof(pal), 0644);
+module_param_string(secam, secam, sizeof(secam), 0644);
+module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
+
+/*
+ * Static vars
+ */
+
+static LIST_HEAD(tuner_list);
+static const struct v4l2_subdev_ops tuner_ops;
+
+/*
+ * Debug macros
+ */
+
+#undef pr_fmt
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": %d-%04x: " fmt, \
+ i2c_adapter_id(t->i2c->adapter), t->i2c->addr
+
+
+#define dprintk(fmt, arg...) do { \
+ if (tuner_debug) \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg); \
+} while (0)
+
+/*
+ * Internal enums/struct used inside the driver
+ */
+
+/**
+ * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER
+ *
+ * @TUNER_PAD_RF_INPUT:
+ * Radiofrequency (RF) sink pad, usually linked to a RF connector entity.
+ * @TUNER_PAD_OUTPUT:
+ * tuner video output source pad. Contains the video chrominance
+ * and luminance or the hole bandwidth of the signal converted to
+ * an Intermediate Frequency (IF) or to baseband (on zero-IF tuners).
+ * @TUNER_PAD_AUD_OUT:
+ * Tuner audio output source pad. Tuners used to decode analog TV
+ * signals have an extra pad for audio output. Old tuners use an
+ * analog stage with a saw filter for the audio IF frequency. The
+ * output of the pad is, in this case, the audio IF, with should be
+ * decoded either by the bridge chipset (that's the case of cx2388x
+ * chipsets) or may require an external IF sound processor, like
+ * msp34xx. On modern silicon tuners, the audio IF decoder is usually
+ * incorporated at the tuner. On such case, the output of this pad
+ * is an audio sampled data.
+ * @TUNER_NUM_PADS:
+ * Number of pads of the tuner.
+ */
+enum tuner_pad_index {
+ TUNER_PAD_RF_INPUT,
+ TUNER_PAD_OUTPUT,
+ TUNER_PAD_AUD_OUT,
+ TUNER_NUM_PADS
+};
+
+/**
+ * enum if_vid_dec_pad_index - video IF-PLL pad index
+ * for MEDIA_ENT_F_IF_VID_DECODER
+ *
+ * @IF_VID_DEC_PAD_IF_INPUT:
+ * video Intermediate Frequency (IF) sink pad
+ * @IF_VID_DEC_PAD_OUT:
+ * IF-PLL video output source pad. Contains the video chrominance
+ * and luminance IF signals.
+ * @IF_VID_DEC_PAD_NUM_PADS:
+ * Number of pads of the video IF-PLL.
+ */
+enum if_vid_dec_pad_index {
+ IF_VID_DEC_PAD_IF_INPUT,
+ IF_VID_DEC_PAD_OUT,
+ IF_VID_DEC_PAD_NUM_PADS
+};
+
+struct tuner {
+ /* device */
+ struct dvb_frontend fe;
+ struct i2c_client *i2c;
+ struct v4l2_subdev sd;
+ struct list_head list;
+
+ /* keep track of the current settings */
+ v4l2_std_id std;
+ unsigned int tv_freq;
+ unsigned int radio_freq;
+ unsigned int audmode;
+
+ enum v4l2_tuner_type mode;
+ unsigned int mode_mask; /* Combination of allowable modes */
+
+ bool standby; /* Standby mode */
+
+ unsigned int type; /* chip type id */
+ void *config;
+ const char *name;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad[TUNER_NUM_PADS];
+#endif
+};
+
+/*
+ * Function prototypes
+ */
+
+static void set_tv_freq(struct i2c_client *c, unsigned int freq);
+static void set_radio_freq(struct i2c_client *c, unsigned int freq);
+
+/*
+ * tuner attach/detach logic
+ */
+
+/* This macro allows us to probe dynamically, avoiding static links */
+#ifdef CONFIG_MEDIA_ATTACH
+#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
+ int __r = -EINVAL; \
+ typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
+ if (__a) { \
+ __r = (int) __a(ARGS); \
+ symbol_put(FUNCTION); \
+ } else { \
+ printk(KERN_ERR "TUNER: Unable to find " \
+ "symbol "#FUNCTION"()\n"); \
+ } \
+ __r; \
+})
+
+static void tuner_detach(struct dvb_frontend *fe)
+{
+ if (fe->ops.tuner_ops.release) {
+ fe->ops.tuner_ops.release(fe);
+ symbol_put_addr(fe->ops.tuner_ops.release);
+ }
+ if (fe->ops.analog_ops.release) {
+ fe->ops.analog_ops.release(fe);
+ symbol_put_addr(fe->ops.analog_ops.release);
+ }
+}
+#else
+#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
+ FUNCTION(ARGS); \
+})
+
+static void tuner_detach(struct dvb_frontend *fe)
+{
+ if (fe->ops.tuner_ops.release)
+ fe->ops.tuner_ops.release(fe);
+ if (fe->ops.analog_ops.release)
+ fe->ops.analog_ops.release(fe);
+}
+#endif
+
+
+static inline struct tuner *to_tuner(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct tuner, sd);
+}
+
+/*
+ * struct analog_demod_ops callbacks
+ */
+
+static void fe_set_params(struct dvb_frontend *fe,
+ struct analog_parameters *params)
+{
+ struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
+ struct tuner *t = fe->analog_demod_priv;
+
+ if (NULL == fe_tuner_ops->set_analog_params) {
+ pr_warn("Tuner frontend module has no way to set freq\n");
+ return;
+ }
+ fe_tuner_ops->set_analog_params(fe, params);
+}
+
+static void fe_standby(struct dvb_frontend *fe)
+{
+ struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
+
+ if (fe_tuner_ops->sleep)
+ fe_tuner_ops->sleep(fe);
+}
+
+static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
+{
+ struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
+ struct tuner *t = fe->analog_demod_priv;
+
+ if (fe_tuner_ops->set_config)
+ return fe_tuner_ops->set_config(fe, priv_cfg);
+
+ pr_warn("Tuner frontend module has no way to set config\n");
+
+ return 0;
+}
+
+static void tuner_status(struct dvb_frontend *fe);
+
+static const struct analog_demod_ops tuner_analog_ops = {
+ .set_params = fe_set_params,
+ .standby = fe_standby,
+ .set_config = fe_set_config,
+ .tuner_status = tuner_status
+};
+
+/*
+ * Functions to select between radio and TV and tuner probe/remove functions
+ */
+
+/**
+ * set_type - Sets the tuner type for a given device
+ *
+ * @c: i2c_client descriptor
+ * @type: type of the tuner (e. g. tuner number)
+ * @new_mode_mask: Indicates if tuner supports TV and/or Radio
+ * @new_config: an optional parameter used by a few tuners to adjust
+ * internal parameters, like LNA mode
+ * @tuner_callback: an optional function to be called when switching
+ * to analog mode
+ *
+ * This function applies the tuner config to tuner specified
+ * by tun_setup structure. It contains several per-tuner initialization "magic"
+ */
+static void set_type(struct i2c_client *c, unsigned int type,
+ unsigned int new_mode_mask, void *new_config,
+ int (*tuner_callback) (void *dev, int component, int cmd, int arg))
+{
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+ struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+ unsigned char buffer[4];
+ int tune_now = 1;
+
+ if (type == UNSET || type == TUNER_ABSENT) {
+ dprintk("tuner 0x%02x: Tuner type absent\n", c->addr);
+ return;
+ }
+
+ t->type = type;
+ t->config = new_config;
+ if (tuner_callback != NULL) {
+ dprintk("defining GPIO callback\n");
+ t->fe.callback = tuner_callback;
+ }
+
+ /* discard private data, in case set_type() was previously called */
+ tuner_detach(&t->fe);
+ t->fe.analog_demod_priv = NULL;
+
+ switch (t->type) {
+ case TUNER_MT2032:
+ if (!dvb_attach(microtune_attach,
+ &t->fe, t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
+ break;
+ case TUNER_PHILIPS_TDA8290:
+ {
+ if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter,
+ t->i2c->addr, t->config))
+ goto attach_failed;
+ break;
+ }
+ case TUNER_TEA5767:
+ if (!dvb_attach(tea5767_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
+ t->mode_mask = T_RADIO;
+ break;
+ case TUNER_TEA5761:
+ if (!dvb_attach(tea5761_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
+ t->mode_mask = T_RADIO;
+ break;
+ case TUNER_PHILIPS_FMD1216ME_MK3:
+ case TUNER_PHILIPS_FMD1216MEX_MK3:
+ buffer[0] = 0x0b;
+ buffer[1] = 0xdc;
+ buffer[2] = 0x9c;
+ buffer[3] = 0x60;
+ i2c_master_send(c, buffer, 4);
+ mdelay(1);
+ buffer[2] = 0x86;
+ buffer[3] = 0x54;
+ i2c_master_send(c, buffer, 4);
+ if (!dvb_attach(simple_tuner_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr, t->type))
+ goto attach_failed;
+ break;
+ case TUNER_PHILIPS_TD1316:
+ buffer[0] = 0x0b;
+ buffer[1] = 0xdc;
+ buffer[2] = 0x86;
+ buffer[3] = 0xa4;
+ i2c_master_send(c, buffer, 4);
+ if (!dvb_attach(simple_tuner_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr, t->type))
+ goto attach_failed;
+ break;
+ case TUNER_XC2028:
+ {
+ struct xc2028_config cfg = {
+ .i2c_adap = t->i2c->adapter,
+ .i2c_addr = t->i2c->addr,
+ };
+ if (!dvb_attach(xc2028_attach, &t->fe, &cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ case TUNER_TDA9887:
+ if (!dvb_attach(tda9887_attach,
+ &t->fe, t->i2c->adapter, t->i2c->addr))
+ goto attach_failed;
+ break;
+ case TUNER_XC5000:
+ {
+ struct xc5000_config xc5000_cfg = {
+ .i2c_address = t->i2c->addr,
+ /* if_khz will be set at dvb_attach() */
+ .if_khz = 0,
+ };
+
+ if (!dvb_attach(xc5000_attach,
+ &t->fe, t->i2c->adapter, &xc5000_cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ case TUNER_XC5000C:
+ {
+ struct xc5000_config xc5000c_cfg = {
+ .i2c_address = t->i2c->addr,
+ /* if_khz will be set at dvb_attach() */
+ .if_khz = 0,
+ .chip_id = XC5000C,
+ };
+
+ if (!dvb_attach(xc5000_attach,
+ &t->fe, t->i2c->adapter, &xc5000c_cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ case TUNER_NXP_TDA18271:
+ {
+ struct tda18271_config cfg = {
+ .small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
+ };
+
+ if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr,
+ t->i2c->adapter, &cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ case TUNER_XC4000:
+ {
+ struct xc4000_config xc4000_cfg = {
+ .i2c_address = t->i2c->addr,
+ /* FIXME: the correct parameters will be set */
+ /* only when the digital dvb_attach() occurs */
+ .default_pm = 0,
+ .dvb_amplitude = 0,
+ .set_smoothedcvbs = 0,
+ .if_khz = 0
+ };
+ if (!dvb_attach(xc4000_attach,
+ &t->fe, t->i2c->adapter, &xc4000_cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
+ default:
+ if (!dvb_attach(simple_tuner_attach, &t->fe,
+ t->i2c->adapter, t->i2c->addr, t->type))
+ goto attach_failed;
+
+ break;
+ }
+
+ if ((NULL == analog_ops->set_params) &&
+ (fe_tuner_ops->set_analog_params)) {
+
+ t->name = fe_tuner_ops->info.name;
+
+ t->fe.analog_demod_priv = t;
+ memcpy(analog_ops, &tuner_analog_ops,
+ sizeof(struct analog_demod_ops));
+
+ if (fe_tuner_ops->get_rf_strength)
+ analog_ops->has_signal = fe_tuner_ops->get_rf_strength;
+ if (fe_tuner_ops->get_afc)
+ analog_ops->get_afc = fe_tuner_ops->get_afc;
+
+ } else {
+ t->name = analog_ops->info.name;
+ }
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ t->sd.entity.name = t->name;
+#endif
+
+ dprintk("type set to %s\n", t->name);
+
+ t->mode_mask = new_mode_mask;
+
+ /* Some tuners require more initialization setup before use,
+ such as firmware download or device calibration.
+ trying to set a frequency here will just fail
+ FIXME: better to move set_freq to the tuner code. This is needed
+ on analog tuners for PLL to properly work
+ */
+ if (tune_now) {
+ if (V4L2_TUNER_RADIO == t->mode)
+ set_radio_freq(c, t->radio_freq);
+ else
+ set_tv_freq(c, t->tv_freq);
+ }
+
+ dprintk("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
+ c->adapter->name, c->dev.driver->name, c->addr << 1, type,
+ t->mode_mask);
+ return;
+
+attach_failed:
+ dprintk("Tuner attach for type = %d failed.\n", t->type);
+ t->type = TUNER_ABSENT;
+
+ return;
+}
+
+/**
+ * tuner_s_type_addr - Sets the tuner type for a device
+ *
+ * @sd: subdev descriptor
+ * @tun_setup: type to be associated to a given tuner i2c address
+ *
+ * This function applies the tuner config to tuner specified
+ * by tun_setup structure.
+ * If tuner I2C address is UNSET, then it will only set the device
+ * if the tuner supports the mode specified in the call.
+ * If the address is specified, the change will be applied only if
+ * tuner I2C address matches.
+ * The call can change the tuner number and the tuner mode.
+ */
+static int tuner_s_type_addr(struct v4l2_subdev *sd,
+ struct tuner_setup *tun_setup)
+{
+ struct tuner *t = to_tuner(sd);
+ struct i2c_client *c = v4l2_get_subdevdata(sd);
+
+ dprintk("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
+ tun_setup->type,
+ tun_setup->addr,
+ tun_setup->mode_mask,
+ tun_setup->config);
+
+ if ((t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) &&
+ (t->mode_mask & tun_setup->mode_mask))) ||
+ (tun_setup->addr == c->addr)) {
+ set_type(c, tun_setup->type, tun_setup->mode_mask,
+ tun_setup->config, tun_setup->tuner_callback);
+ } else
+ dprintk("set addr discarded for type %i, mask %x. Asked to change tuner at addr 0x%02x, with mask %x\n",
+ t->type, t->mode_mask,
+ tun_setup->addr, tun_setup->mode_mask);
+
+ return 0;
+}
+
+/**
+ * tuner_s_config - Sets tuner configuration
+ *
+ * @sd: subdev descriptor
+ * @cfg: tuner configuration
+ *
+ * Calls tuner set_config() private function to set some tuner-internal
+ * parameters
+ */
+static int tuner_s_config(struct v4l2_subdev *sd,
+ const struct v4l2_priv_tun_config *cfg)
+{
+ struct tuner *t = to_tuner(sd);
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ if (t->type != cfg->tuner)
+ return 0;
+
+ if (analog_ops->set_config) {
+ analog_ops->set_config(&t->fe, cfg->priv);
+ return 0;
+ }
+
+ dprintk("Tuner frontend module has no way to set config\n");
+ return 0;
+}
+
+/**
+ * tuner_lookup - Seek for tuner adapters
+ *
+ * @adap: i2c_adapter struct
+ * @radio: pointer to be filled if the adapter is radio
+ * @tv: pointer to be filled if the adapter is TV
+ *
+ * Search for existing radio and/or TV tuners on the given I2C adapter,
+ * discarding demod-only adapters (tda9887).
+ *
+ * Note that when this function is called from tuner_probe you can be
+ * certain no other devices will be added/deleted at the same time, I2C
+ * core protects against that.
+ */
+static void tuner_lookup(struct i2c_adapter *adap,
+ struct tuner **radio, struct tuner **tv)
+{
+ struct tuner *pos;
+
+ *radio = NULL;
+ *tv = NULL;
+
+ list_for_each_entry(pos, &tuner_list, list) {
+ int mode_mask;
+
+ if (pos->i2c->adapter != adap ||
+ strcmp(pos->i2c->dev.driver->name, "tuner"))
+ continue;
+
+ mode_mask = pos->mode_mask;
+ if (*radio == NULL && mode_mask == T_RADIO)
+ *radio = pos;
+ /* Note: currently TDA9887 is the only demod-only
+ device. If other devices appear then we need to
+ make this test more general. */
+ else if (*tv == NULL && pos->type != TUNER_TDA9887 &&
+ (pos->mode_mask & T_ANALOG_TV))
+ *tv = pos;
+ }
+}
+
+/**
+ *tuner_probe - Probes the existing tuners on an I2C bus
+ *
+ * @client: i2c_client descriptor
+ *
+ * This routine probes for tuners at the expected I2C addresses. On most
+ * cases, if a device answers to a given I2C address, it assumes that the
+ * device is a tuner. On a few cases, however, an additional logic is needed
+ * to double check if the device is really a tuner, or to identify the tuner
+ * type, like on tea5767/5761 devices.
+ *
+ * During client attach, set_type is called by adapter's attach_inform callback.
+ * set_type must then be completed by tuner_probe.
+ */
+static int tuner_probe(struct i2c_client *client)
+{
+ struct tuner *t;
+ struct tuner *radio;
+ struct tuner *tv;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int ret;
+#endif
+
+ t = kzalloc(sizeof(struct tuner), GFP_KERNEL);
+ if (NULL == t)
+ return -ENOMEM;
+ v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops);
+ t->i2c = client;
+ t->name = "(tuner unset)";
+ t->type = UNSET;
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ t->standby = true;
+ t->radio_freq = 87.5 * 16000; /* Initial freq range */
+ t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */
+
+ if (show_i2c) {
+ unsigned char buffer[16];
+ int rc;
+
+ memset(buffer, 0, sizeof(buffer));
+ rc = i2c_master_recv(client, buffer, sizeof(buffer));
+ if (rc >= 0)
+ pr_info("I2C RECV = %*ph\n", rc, buffer);
+ }
+
+ /* autodetection code based on the i2c addr */
+ if (!no_autodetect) {
+ switch (client->addr) {
+ case 0x10:
+ if (tuner_symbol_probe(tea5761_autodetection,
+ t->i2c->adapter,
+ t->i2c->addr) >= 0) {
+ t->type = TUNER_TEA5761;
+ t->mode_mask = T_RADIO;
+ tuner_lookup(t->i2c->adapter, &radio, &tv);
+ if (tv)
+ tv->mode_mask &= ~T_RADIO;
+
+ goto register_client;
+ }
+ kfree(t);
+ return -ENODEV;
+ case 0x42:
+ case 0x43:
+ case 0x4a:
+ case 0x4b:
+ /* If chip is not tda8290, don't register.
+ since it can be tda9887*/
+ if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
+ t->i2c->addr) >= 0) {
+ dprintk("tda829x detected\n");
+ } else {
+ /* Default is being tda9887 */
+ t->type = TUNER_TDA9887;
+ t->mode_mask = T_RADIO | T_ANALOG_TV;
+ goto register_client;
+ }
+ break;
+ case 0x60:
+ if (tuner_symbol_probe(tea5767_autodetection,
+ t->i2c->adapter, t->i2c->addr)
+ >= 0) {
+ t->type = TUNER_TEA5767;
+ t->mode_mask = T_RADIO;
+ /* Sets freq to FM range */
+ tuner_lookup(t->i2c->adapter, &radio, &tv);
+ if (tv)
+ tv->mode_mask &= ~T_RADIO;
+
+ goto register_client;
+ }
+ break;
+ }
+ }
+
+ /* Initializes only the first TV tuner on this adapter. Why only the
+ first? Because there are some devices (notably the ones with TI
+ tuners) that have more than one i2c address for the *same* device.
+ Experience shows that, except for just one case, the first
+ address is the right one. The exception is a Russian tuner
+ (ACORP_Y878F). So, the desired behavior is just to enable the
+ first found TV tuner. */
+ tuner_lookup(t->i2c->adapter, &radio, &tv);
+ if (tv == NULL) {
+ t->mode_mask = T_ANALOG_TV;
+ if (radio == NULL)
+ t->mode_mask |= T_RADIO;
+ dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask);
+ }
+
+ /* Should be just before return */
+register_client:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ t->sd.entity.name = t->name;
+ /*
+ * Handle the special case where the tuner has actually
+ * two stages: the PLL to tune into a frequency and the
+ * IF-PLL demodulator (tda988x).
+ */
+ if (t->type == TUNER_TDA9887) {
+ t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ t->pad[IF_VID_DEC_PAD_IF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
+ t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[IF_VID_DEC_PAD_OUT].sig_type = PAD_SIGNAL_ANALOG;
+ ret = media_entity_pads_init(&t->sd.entity,
+ IF_VID_DEC_PAD_NUM_PADS,
+ &t->pad[0]);
+ t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER;
+ } else {
+ t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ t->pad[TUNER_PAD_RF_INPUT].sig_type = PAD_SIGNAL_ANALOG;
+ t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[TUNER_PAD_OUTPUT].sig_type = PAD_SIGNAL_ANALOG;
+ t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ t->pad[TUNER_PAD_AUD_OUT].sig_type = PAD_SIGNAL_AUDIO;
+ ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS,
+ &t->pad[0]);
+ t->sd.entity.function = MEDIA_ENT_F_TUNER;
+ }
+
+ if (ret < 0) {
+ pr_err("failed to initialize media entity!\n");
+ kfree(t);
+ return ret;
+ }
+#endif
+ /* Sets a default mode */
+ if (t->mode_mask & T_ANALOG_TV)
+ t->mode = V4L2_TUNER_ANALOG_TV;
+ else
+ t->mode = V4L2_TUNER_RADIO;
+ set_type(client, t->type, t->mode_mask, t->config, t->fe.callback);
+ list_add_tail(&t->list, &tuner_list);
+
+ pr_info("Tuner %d found with type(s)%s%s.\n",
+ t->type,
+ t->mode_mask & T_RADIO ? " Radio" : "",
+ t->mode_mask & T_ANALOG_TV ? " TV" : "");
+ return 0;
+}
+
+/**
+ * tuner_remove - detaches a tuner
+ *
+ * @client: i2c_client descriptor
+ */
+
+static void tuner_remove(struct i2c_client *client)
+{
+ struct tuner *t = to_tuner(i2c_get_clientdata(client));
+
+ v4l2_device_unregister_subdev(&t->sd);
+ tuner_detach(&t->fe);
+ t->fe.analog_demod_priv = NULL;
+
+ list_del(&t->list);
+ kfree(t);
+}
+
+/*
+ * Functions to switch between Radio and TV
+ *
+ * A few cards have a separate I2C tuner for radio. Those routines
+ * take care of switching between TV/Radio mode, filtering only the
+ * commands that apply to the Radio or TV tuner.
+ */
+
+/**
+ * check_mode - Verify if tuner supports the requested mode
+ * @t: a pointer to the module's internal struct_tuner
+ * @mode: mode of the tuner, as defined by &enum v4l2_tuner_type.
+ *
+ * This function checks if the tuner is capable of tuning analog TV,
+ * digital TV or radio, depending on what the caller wants. If the
+ * tuner can't support that mode, it returns -EINVAL. Otherwise, it
+ * returns 0.
+ * This function is needed for boards that have a separate tuner for
+ * radio (like devices with tea5767).
+ *
+ * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
+ * select a TV frequency. So, t_mode = T_ANALOG_TV could actually
+ * be used to represent a Digital TV too.
+ */
+static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
+{
+ int t_mode;
+ if (mode == V4L2_TUNER_RADIO)
+ t_mode = T_RADIO;
+ else
+ t_mode = T_ANALOG_TV;
+
+ if ((t_mode & t->mode_mask) == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * set_mode - Switch tuner to other mode.
+ * @t: a pointer to the module's internal struct_tuner
+ * @mode: enum v4l2_type (radio or TV)
+ *
+ * If tuner doesn't support the needed mode (radio or TV), prints a
+ * debug message and returns -EINVAL, changing its state to standby.
+ * Otherwise, changes the mode and returns 0.
+ */
+static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
+{
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ if (mode != t->mode) {
+ if (check_mode(t, mode) == -EINVAL) {
+ dprintk("Tuner doesn't support mode %d. Putting tuner to sleep\n",
+ mode);
+ t->standby = true;
+ if (analog_ops->standby)
+ analog_ops->standby(&t->fe);
+ return -EINVAL;
+ }
+ t->mode = mode;
+ dprintk("Changing to mode %d\n", mode);
+ }
+ return 0;
+}
+
+/**
+ * set_freq - Set the tuner to the desired frequency.
+ * @t: a pointer to the module's internal struct_tuner
+ * @freq: frequency to set (0 means to use the current frequency)
+ */
+static void set_freq(struct tuner *t, unsigned int freq)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
+
+ if (t->mode == V4L2_TUNER_RADIO) {
+ if (!freq)
+ freq = t->radio_freq;
+ set_radio_freq(client, freq);
+ } else {
+ if (!freq)
+ freq = t->tv_freq;
+ set_tv_freq(client, freq);
+ }
+}
+
+/*
+ * Functions that are specific for TV mode
+ */
+
+/**
+ * set_tv_freq - Set tuner frequency, freq in Units of 62.5 kHz = 1/16MHz
+ *
+ * @c: i2c_client descriptor
+ * @freq: frequency
+ */
+static void set_tv_freq(struct i2c_client *c, unsigned int freq)
+{
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ struct analog_parameters params = {
+ .mode = t->mode,
+ .audmode = t->audmode,
+ .std = t->std
+ };
+
+ if (t->type == UNSET) {
+ pr_warn("tuner type not set\n");
+ return;
+ }
+ if (NULL == analog_ops->set_params) {
+ pr_warn("Tuner has no way to set tv freq\n");
+ return;
+ }
+ if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) {
+ dprintk("TV freq (%d.%02d) out of range (%d-%d)\n",
+ freq / 16, freq % 16 * 100 / 16, tv_range[0],
+ tv_range[1]);
+ /* V4L2 spec: if the freq is not possible then the closest
+ possible value should be selected */
+ if (freq < tv_range[0] * 16)
+ freq = tv_range[0] * 16;
+ else
+ freq = tv_range[1] * 16;
+ }
+ params.frequency = freq;
+ dprintk("tv freq set to %d.%02d\n",
+ freq / 16, freq % 16 * 100 / 16);
+ t->tv_freq = freq;
+ t->standby = false;
+
+ analog_ops->set_params(&t->fe, &params);
+}
+
+/**
+ * tuner_fixup_std - force a given video standard variant
+ *
+ * @t: tuner internal struct
+ * @std: TV standard
+ *
+ * A few devices or drivers have problem to detect some standard variations.
+ * On other operational systems, the drivers generally have a per-country
+ * code, and some logic to apply per-country hacks. V4L2 API doesn't provide
+ * such hacks. Instead, it relies on a proper video standard selection from
+ * the userspace application. However, as some apps are buggy, not allowing
+ * to distinguish all video standard variations, a modprobe parameter can
+ * be used to force a video standard match.
+ */
+static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
+{
+ if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) {
+ switch (pal[0]) {
+ case '6':
+ return V4L2_STD_PAL_60;
+ case 'b':
+ case 'B':
+ case 'g':
+ case 'G':
+ return V4L2_STD_PAL_BG;
+ case 'i':
+ case 'I':
+ return V4L2_STD_PAL_I;
+ case 'd':
+ case 'D':
+ case 'k':
+ case 'K':
+ return V4L2_STD_PAL_DK;
+ case 'M':
+ case 'm':
+ return V4L2_STD_PAL_M;
+ case 'N':
+ case 'n':
+ if (pal[1] == 'c' || pal[1] == 'C')
+ return V4L2_STD_PAL_Nc;
+ return V4L2_STD_PAL_N;
+ default:
+ pr_warn("pal= argument not recognised\n");
+ break;
+ }
+ }
+ if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
+ switch (secam[0]) {
+ case 'b':
+ case 'B':
+ case 'g':
+ case 'G':
+ case 'h':
+ case 'H':
+ return V4L2_STD_SECAM_B |
+ V4L2_STD_SECAM_G |
+ V4L2_STD_SECAM_H;
+ case 'd':
+ case 'D':
+ case 'k':
+ case 'K':
+ return V4L2_STD_SECAM_DK;
+ case 'l':
+ case 'L':
+ if ((secam[1] == 'C') || (secam[1] == 'c'))
+ return V4L2_STD_SECAM_LC;
+ return V4L2_STD_SECAM_L;
+ default:
+ pr_warn("secam= argument not recognised\n");
+ break;
+ }
+ }
+
+ if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
+ switch (ntsc[0]) {
+ case 'm':
+ case 'M':
+ return V4L2_STD_NTSC_M;
+ case 'j':
+ case 'J':
+ return V4L2_STD_NTSC_M_JP;
+ case 'k':
+ case 'K':
+ return V4L2_STD_NTSC_M_KR;
+ default:
+ pr_info("ntsc= argument not recognised\n");
+ break;
+ }
+ }
+ return std;
+}
+
+/*
+ * Functions that are specific for Radio mode
+ */
+
+/**
+ * set_radio_freq - Set tuner frequency, freq in Units of 62.5 Hz = 1/16kHz
+ *
+ * @c: i2c_client descriptor
+ * @freq: frequency
+ */
+static void set_radio_freq(struct i2c_client *c, unsigned int freq)
+{
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ struct analog_parameters params = {
+ .mode = t->mode,
+ .audmode = t->audmode,
+ .std = t->std
+ };
+
+ if (t->type == UNSET) {
+ pr_warn("tuner type not set\n");
+ return;
+ }
+ if (NULL == analog_ops->set_params) {
+ pr_warn("tuner has no way to set radio frequency\n");
+ return;
+ }
+ if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) {
+ dprintk("radio freq (%d.%02d) out of range (%d-%d)\n",
+ freq / 16000, freq % 16000 * 100 / 16000,
+ radio_range[0], radio_range[1]);
+ /* V4L2 spec: if the freq is not possible then the closest
+ possible value should be selected */
+ if (freq < radio_range[0] * 16000)
+ freq = radio_range[0] * 16000;
+ else
+ freq = radio_range[1] * 16000;
+ }
+ params.frequency = freq;
+ dprintk("radio freq set to %d.%02d\n",
+ freq / 16000, freq % 16000 * 100 / 16000);
+ t->radio_freq = freq;
+ t->standby = false;
+
+ analog_ops->set_params(&t->fe, &params);
+ /*
+ * The tuner driver might decide to change the audmode if it only
+ * supports stereo, so update t->audmode.
+ */
+ t->audmode = params.audmode;
+}
+
+/*
+ * Debug function for reporting tuner status to userspace
+ */
+
+/**
+ * tuner_status - Dumps the current tuner status at dmesg
+ * @fe: pointer to struct dvb_frontend
+ *
+ * This callback is used only for driver debug purposes, answering to
+ * VIDIOC_LOG_STATUS. No changes should happen on this call.
+ */
+static void tuner_status(struct dvb_frontend *fe)
+{
+ struct tuner *t = fe->analog_demod_priv;
+ unsigned long freq, freq_fraction;
+ struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
+ struct analog_demod_ops *analog_ops = &fe->ops.analog_ops;
+ const char *p;
+
+ switch (t->mode) {
+ case V4L2_TUNER_RADIO:
+ p = "radio";
+ break;
+ case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
+ p = "digital TV";
+ break;
+ case V4L2_TUNER_ANALOG_TV:
+ default:
+ p = "analog TV";
+ break;
+ }
+ if (t->mode == V4L2_TUNER_RADIO) {
+ freq = t->radio_freq / 16000;
+ freq_fraction = (t->radio_freq % 16000) * 100 / 16000;
+ } else {
+ freq = t->tv_freq / 16;
+ freq_fraction = (t->tv_freq % 16) * 100 / 16;
+ }
+ pr_info("Tuner mode: %s%s\n", p,
+ t->standby ? " on standby mode" : "");
+ pr_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
+ pr_info("Standard: 0x%08lx\n", (unsigned long)t->std);
+ if (t->mode != V4L2_TUNER_RADIO)
+ return;
+ if (fe_tuner_ops->get_status) {
+ u32 tuner_status = 0;
+
+ fe_tuner_ops->get_status(&t->fe, &tuner_status);
+ if (tuner_status & TUNER_STATUS_LOCKED)
+ pr_info("Tuner is locked.\n");
+ if (tuner_status & TUNER_STATUS_STEREO)
+ pr_info("Stereo: yes\n");
+ }
+ if (analog_ops->has_signal) {
+ u16 signal;
+
+ if (!analog_ops->has_signal(fe, &signal))
+ pr_info("Signal strength: %hu\n", signal);
+ }
+}
+
+/*
+ * Function to splicitly change mode to radio. Probably not needed anymore
+ */
+
+static int tuner_s_radio(struct v4l2_subdev *sd)
+{
+ struct tuner *t = to_tuner(sd);
+
+ if (set_mode(t, V4L2_TUNER_RADIO) == 0)
+ set_freq(t, 0);
+ return 0;
+}
+
+/*
+ * Tuner callbacks to handle userspace ioctl's
+ */
+
+/**
+ * tuner_standby - places the tuner in standby mode
+ * @sd: pointer to struct v4l2_subdev
+ */
+static int tuner_standby(struct v4l2_subdev *sd)
+{
+ struct tuner *t = to_tuner(sd);
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ dprintk("Putting tuner to sleep\n");
+ t->standby = true;
+ if (analog_ops->standby)
+ analog_ops->standby(&t->fe);
+ return 0;
+}
+
+static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct tuner *t = to_tuner(sd);
+
+ if (set_mode(t, V4L2_TUNER_ANALOG_TV))
+ return 0;
+
+ t->std = tuner_fixup_std(t, std);
+ if (t->std != std)
+ dprintk("Fixup standard %llx to %llx\n", std, t->std);
+ set_freq(t, 0);
+ return 0;
+}
+
+static int tuner_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequency *f)
+{
+ struct tuner *t = to_tuner(sd);
+
+ if (set_mode(t, f->type) == 0)
+ set_freq(t, f->frequency);
+ return 0;
+}
+
+/**
+ * tuner_g_frequency - Get the tuned frequency for the tuner
+ * @sd: pointer to struct v4l2_subdev
+ * @f: pointer to struct v4l2_frequency
+ *
+ * At return, the structure f will be filled with tuner frequency
+ * if the tuner matches the f->type.
+ * Note: f->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
+static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
+{
+ struct tuner *t = to_tuner(sd);
+ struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
+
+ if (check_mode(t, f->type) == -EINVAL)
+ return 0;
+ if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) {
+ u32 abs_freq;
+
+ fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
+ f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
+ DIV_ROUND_CLOSEST(abs_freq * 2, 125) :
+ DIV_ROUND_CLOSEST(abs_freq, 62500);
+ } else {
+ f->frequency = (V4L2_TUNER_RADIO == f->type) ?
+ t->radio_freq : t->tv_freq;
+ }
+ return 0;
+}
+
+/**
+ * tuner_g_tuner - Fill in tuner information
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * At return, the structure vt will be filled with tuner information
+ * if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
+static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
+{
+ struct tuner *t = to_tuner(sd);
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+ struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
+
+ if (check_mode(t, vt->type) == -EINVAL)
+ return 0;
+ if (vt->type == t->mode && analog_ops->get_afc)
+ analog_ops->get_afc(&t->fe, &vt->afc);
+ if (vt->type == t->mode && analog_ops->has_signal) {
+ u16 signal = (u16)vt->signal;
+
+ if (!analog_ops->has_signal(&t->fe, &signal))
+ vt->signal = signal;
+ }
+ if (vt->type != V4L2_TUNER_RADIO) {
+ vt->capability |= V4L2_TUNER_CAP_NORM;
+ vt->rangelow = tv_range[0] * 16;
+ vt->rangehigh = tv_range[1] * 16;
+ return 0;
+ }
+
+ /* radio mode */
+ if (vt->type == t->mode) {
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+ if (fe_tuner_ops->get_status) {
+ u32 tuner_status = 0;
+
+ fe_tuner_ops->get_status(&t->fe, &tuner_status);
+ vt->rxsubchans =
+ (tuner_status & TUNER_STATUS_STEREO) ?
+ V4L2_TUNER_SUB_STEREO :
+ V4L2_TUNER_SUB_MONO;
+ }
+ vt->audmode = t->audmode;
+ }
+ vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ vt->rangelow = radio_range[0] * 16000;
+ vt->rangehigh = radio_range[1] * 16000;
+
+ return 0;
+}
+
+/**
+ * tuner_s_tuner - Set the tuner's audio mode
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * Sets the audio mode if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
+static int tuner_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
+{
+ struct tuner *t = to_tuner(sd);
+
+ if (set_mode(t, vt->type))
+ return 0;
+
+ if (t->mode == V4L2_TUNER_RADIO) {
+ t->audmode = vt->audmode;
+ /*
+ * For radio audmode can only be mono or stereo. Map any
+ * other values to stereo. The actual tuner driver that is
+ * called in set_radio_freq can decide to limit the audmode to
+ * mono if only mono is supported.
+ */
+ if (t->audmode != V4L2_TUNER_MODE_MONO &&
+ t->audmode != V4L2_TUNER_MODE_STEREO)
+ t->audmode = V4L2_TUNER_MODE_STEREO;
+ }
+ set_freq(t, 0);
+
+ return 0;
+}
+
+static int tuner_log_status(struct v4l2_subdev *sd)
+{
+ struct tuner *t = to_tuner(sd);
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ if (analog_ops->tuner_status)
+ analog_ops->tuner_status(&t->fe);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tuner_suspend(struct device *dev)
+{
+ struct i2c_client *c = to_i2c_client(dev);
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+ struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+
+ dprintk("suspend\n");
+
+ if (t->fe.ops.tuner_ops.suspend)
+ t->fe.ops.tuner_ops.suspend(&t->fe);
+ else if (!t->standby && analog_ops->standby)
+ analog_ops->standby(&t->fe);
+
+ return 0;
+}
+
+static int tuner_resume(struct device *dev)
+{
+ struct i2c_client *c = to_i2c_client(dev);
+ struct tuner *t = to_tuner(i2c_get_clientdata(c));
+
+ dprintk("resume\n");
+
+ if (t->fe.ops.tuner_ops.resume)
+ t->fe.ops.tuner_ops.resume(&t->fe);
+ else if (!t->standby)
+ if (set_mode(t, t->mode) == 0)
+ set_freq(t, 0);
+
+ return 0;
+}
+#endif
+
+static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+
+ /* TUNER_SET_CONFIG is still called by tuner-simple.c, so we have
+ to handle it here.
+ There must be a better way of doing this... */
+ switch (cmd) {
+ case TUNER_SET_CONFIG:
+ return tuner_s_config(sd, arg);
+ }
+ return -ENOIOCTLCMD;
+}
+
+/*
+ * Callback structs
+ */
+
+static const struct v4l2_subdev_core_ops tuner_core_ops = {
+ .log_status = tuner_log_status,
+};
+
+static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = {
+ .standby = tuner_standby,
+ .s_radio = tuner_s_radio,
+ .g_tuner = tuner_g_tuner,
+ .s_tuner = tuner_s_tuner,
+ .s_frequency = tuner_s_frequency,
+ .g_frequency = tuner_g_frequency,
+ .s_type_addr = tuner_s_type_addr,
+ .s_config = tuner_s_config,
+};
+
+static const struct v4l2_subdev_video_ops tuner_video_ops = {
+ .s_std = tuner_s_std,
+};
+
+static const struct v4l2_subdev_ops tuner_ops = {
+ .core = &tuner_core_ops,
+ .tuner = &tuner_tuner_ops,
+ .video = &tuner_video_ops,
+};
+
+/*
+ * I2C structs and module init functions
+ */
+
+static const struct dev_pm_ops tuner_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tuner_suspend, tuner_resume)
+};
+
+static const struct i2c_device_id tuner_id[] = {
+ { "tuner", }, /* autodetect */
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tuner_id);
+
+static struct i2c_driver tuner_driver = {
+ .driver = {
+ .name = "tuner",
+ .pm = &tuner_pm_ops,
+ },
+ .probe = tuner_probe,
+ .remove = tuner_remove,
+ .command = tuner_command,
+ .id_table = tuner_id,
+};
+
+module_i2c_driver(tuner_driver);
+
+MODULE_DESCRIPTION("device driver for various TV and TV+FM radio tuners");
+MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
new file mode 100644
index 0000000000..8cfd593d29
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -0,0 +1,971 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * V4L2 asynchronous subdevice registration API
+ *
+ * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#include "v4l2-subdev-priv.h"
+
+static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ if (!n->ops || !n->ops->bound)
+ return 0;
+
+ return n->ops->bound(n, subdev, asc);
+}
+
+static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ if (!n->ops || !n->ops->unbind)
+ return;
+
+ n->ops->unbind(n, subdev, asc);
+}
+
+static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
+{
+ if (!n->ops || !n->ops->complete)
+ return 0;
+
+ return n->ops->complete(n);
+}
+
+static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
+ struct v4l2_async_connection *asc)
+{
+ if (!n->ops || !n->ops->destroy)
+ return;
+
+ n->ops->destroy(asc);
+}
+
+static bool match_i2c(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_match_desc *match)
+{
+#if IS_ENABLED(CONFIG_I2C)
+ struct i2c_client *client = i2c_verify_client(sd->dev);
+
+ return client &&
+ match->i2c.adapter_id == client->adapter->nr &&
+ match->i2c.address == client->addr;
+#else
+ return false;
+#endif
+}
+
+static struct device *notifier_dev(struct v4l2_async_notifier *notifier)
+{
+ if (notifier->sd)
+ return notifier->sd->dev;
+
+ if (notifier->v4l2_dev)
+ return notifier->v4l2_dev->dev;
+
+ return NULL;
+}
+
+static bool
+match_fwnode_one(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
+ struct v4l2_async_match_desc *match)
+{
+ struct fwnode_handle *asd_dev_fwnode;
+ bool ret;
+
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: fwnode match: need %pfw, trying %pfw\n",
+ sd_fwnode, match->fwnode);
+
+ if (sd_fwnode == match->fwnode) {
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: direct match found\n");
+ return true;
+ }
+
+ if (!fwnode_graph_is_endpoint(match->fwnode)) {
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: direct match not found\n");
+ return false;
+ }
+
+ asd_dev_fwnode = fwnode_graph_get_port_parent(match->fwnode);
+
+ ret = sd_fwnode == asd_dev_fwnode;
+
+ fwnode_handle_put(asd_dev_fwnode);
+
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: device--endpoint match %sfound\n",
+ ret ? "" : "not ");
+
+ return ret;
+}
+
+static bool match_fwnode(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_match_desc *match)
+{
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: matching for notifier %pfw, sd fwnode %pfw\n",
+ dev_fwnode(notifier_dev(notifier)), sd->fwnode);
+
+ if (!list_empty(&sd->async_subdev_endpoint_list)) {
+ struct v4l2_async_subdev_endpoint *ase;
+
+ dev_dbg(sd->dev,
+ "v4l2-async: endpoint fwnode list available, looking for %pfw\n",
+ match->fwnode);
+
+ list_for_each_entry(ase, &sd->async_subdev_endpoint_list,
+ async_subdev_endpoint_entry) {
+ bool matched = ase->endpoint == match->fwnode;
+
+ dev_dbg(sd->dev,
+ "v4l2-async: endpoint-endpoint match %sfound with %pfw\n",
+ matched ? "" : "not ", ase->endpoint);
+
+ if (matched)
+ return true;
+ }
+
+ dev_dbg(sd->dev, "async: no endpoint matched\n");
+
+ return false;
+ }
+
+ if (match_fwnode_one(notifier, sd, sd->fwnode, match))
+ return true;
+
+ /* Also check the secondary fwnode. */
+ if (IS_ERR_OR_NULL(sd->fwnode->secondary))
+ return false;
+
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: trying secondary fwnode match\n");
+
+ return match_fwnode_one(notifier, sd, sd->fwnode->secondary, match);
+}
+
+static LIST_HEAD(subdev_list);
+static LIST_HEAD(notifier_list);
+static DEFINE_MUTEX(list_lock);
+
+static struct v4l2_async_connection *
+v4l2_async_find_match(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd)
+{
+ bool (*match)(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_match_desc *match);
+ struct v4l2_async_connection *asc;
+
+ list_for_each_entry(asc, &notifier->waiting_list, asc_entry) {
+ /* bus_type has been verified valid before */
+ switch (asc->match.type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ match = match_i2c;
+ break;
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE:
+ match = match_fwnode;
+ break;
+ default:
+ /* Cannot happen, unless someone breaks us */
+ WARN_ON(true);
+ return NULL;
+ }
+
+ /* match cannot be NULL here */
+ if (match(notifier, sd, &asc->match))
+ return asc;
+ }
+
+ return NULL;
+}
+
+/* Compare two async match descriptors for equivalence */
+static bool v4l2_async_match_equal(struct v4l2_async_match_desc *match1,
+ struct v4l2_async_match_desc *match2)
+{
+ if (match1->type != match2->type)
+ return false;
+
+ switch (match1->type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ return match1->i2c.adapter_id == match2->i2c.adapter_id &&
+ match1->i2c.address == match2->i2c.address;
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE:
+ return match1->fwnode == match2->fwnode;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Find the sub-device notifier registered by a sub-device driver. */
+static struct v4l2_async_notifier *
+v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *n;
+
+ list_for_each_entry(n, &notifier_list, notifier_entry)
+ if (n->sd == sd)
+ return n;
+
+ return NULL;
+}
+
+/* Get v4l2_device related to the notifier if one can be found. */
+static struct v4l2_device *
+v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
+{
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ return notifier->v4l2_dev;
+}
+
+/*
+ * Return true if all child sub-device notifiers are complete, false otherwise.
+ */
+static bool
+v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_connection *asc;
+
+ if (!list_empty(&notifier->waiting_list))
+ return false;
+
+ list_for_each_entry(asc, &notifier->done_list, asc_entry) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(asc->sd);
+
+ if (subdev_notifier &&
+ !v4l2_async_nf_can_complete(subdev_notifier))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Complete the master notifier if possible. This is done when all async
+ * sub-devices have been bound; v4l2_device is also available then.
+ */
+static int
+v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_notifier *__notifier = notifier;
+
+ /* Quick check whether there are still more sub-devices here. */
+ if (!list_empty(&notifier->waiting_list))
+ return 0;
+
+ if (notifier->sd)
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: trying to complete\n");
+
+ /* Check the entire notifier tree; find the root notifier first. */
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ /* This is root if it has v4l2_dev. */
+ if (!notifier->v4l2_dev) {
+ dev_dbg(notifier_dev(__notifier),
+ "v4l2-async: V4L2 device not available\n");
+ return 0;
+ }
+
+ /* Is everything ready? */
+ if (!v4l2_async_nf_can_complete(notifier))
+ return 0;
+
+ dev_dbg(notifier_dev(__notifier), "v4l2-async: complete\n");
+
+ return v4l2_async_nf_call_complete(notifier);
+}
+
+static int
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
+
+static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *sd)
+{
+ struct media_link *link = NULL;
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+
+ if (sd->entity.function != MEDIA_ENT_F_LENS &&
+ sd->entity.function != MEDIA_ENT_F_FLASH)
+ return 0;
+
+ link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
+
+#endif
+
+ return IS_ERR(link) ? PTR_ERR(link) : 0;
+}
+
+static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
+{
+ struct v4l2_async_notifier *subdev_notifier;
+ bool registered = false;
+ int ret;
+
+ if (list_empty(&sd->asc_list)) {
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0)
+ return ret;
+ registered = true;
+ }
+
+ ret = v4l2_async_nf_call_bound(notifier, sd, asc);
+ if (ret < 0) {
+ if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
+ dev_dbg(notifier_dev(notifier),
+ "failed binding %pfw (%d)\n",
+ asc->match.fwnode, ret);
+ goto err_unregister_subdev;
+ }
+
+ if (registered) {
+ /*
+ * Depending of the function of the entities involved, we may
+ * want to create links between them (for example between a
+ * sensor and its lens or between a sensor's source pad and the
+ * connected device's sink pad).
+ */
+ ret = v4l2_async_create_ancillary_links(notifier, sd);
+ if (ret) {
+ if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
+ dev_dbg(notifier_dev(notifier),
+ "failed creating links for %pfw (%d)\n",
+ asc->match.fwnode, ret);
+ goto err_call_unbind;
+ }
+ }
+
+ list_add(&asc->asc_subdev_entry, &sd->asc_list);
+ asc->sd = sd;
+
+ /* Move from the waiting list to notifier's done */
+ list_move(&asc->asc_entry, &notifier->done_list);
+
+ dev_dbg(notifier_dev(notifier), "v4l2-async: %s bound (ret %d)\n",
+ dev_name(sd->dev), ret);
+
+ /*
+ * See if the sub-device has a notifier. If not, return here.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (!subdev_notifier || subdev_notifier->parent)
+ return 0;
+
+ /*
+ * Proceed with checking for the sub-device notifier's async
+ * sub-devices, and return the result. The error will be handled by the
+ * caller.
+ */
+ subdev_notifier->parent = notifier;
+
+ return v4l2_async_nf_try_all_subdevs(subdev_notifier);
+
+err_call_unbind:
+ v4l2_async_nf_call_unbind(notifier, sd, asc);
+ list_del(&asc->asc_subdev_entry);
+
+err_unregister_subdev:
+ if (registered)
+ v4l2_device_unregister_subdev(sd);
+
+ return ret;
+}
+
+/* Test all async sub-devices in a notifier for a match. */
+static int
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_nf_find_v4l2_dev(notifier);
+ struct v4l2_subdev *sd;
+
+ if (!v4l2_dev)
+ return 0;
+
+ dev_dbg(notifier_dev(notifier), "v4l2-async: trying all sub-devices\n");
+
+again:
+ list_for_each_entry(sd, &subdev_list, async_list) {
+ struct v4l2_async_connection *asc;
+ int ret;
+
+ asc = v4l2_async_find_match(notifier, sd);
+ if (!asc)
+ continue;
+
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: match found, subdev %s\n", sd->name);
+
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asc);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * v4l2_async_match_notify() may lead to registering a
+ * new notifier and thus changing the async subdevs
+ * list. In order to proceed safely from here, restart
+ * parsing the list from the beginning.
+ */
+ goto again;
+ }
+
+ return 0;
+}
+
+static void v4l2_async_unbind_subdev_one(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_connection *asc)
+{
+ list_move_tail(&asc->asc_entry, &notifier->waiting_list);
+ if (list_is_singular(&asc->asc_subdev_entry)) {
+ v4l2_async_nf_call_unbind(notifier, asc->sd, asc);
+ v4l2_device_unregister_subdev(asc->sd);
+ asc->sd = NULL;
+ }
+ list_del(&asc->asc_subdev_entry);
+}
+
+/* Unbind all sub-devices in the notifier tree. */
+static void
+v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_connection *asc, *asc_tmp;
+
+ list_for_each_entry_safe(asc, asc_tmp, &notifier->done_list,
+ asc_entry) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(asc->sd);
+
+ if (subdev_notifier)
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
+
+ v4l2_async_unbind_subdev_one(notifier, asc);
+ }
+
+ notifier->parent = NULL;
+}
+
+/* See if an async sub-device can be found in a notifier's lists. */
+static bool
+v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_match_desc *match)
+{
+ struct v4l2_async_connection *asc;
+
+ list_for_each_entry(asc, &notifier->waiting_list, asc_entry)
+ if (v4l2_async_match_equal(&asc->match, match))
+ return true;
+
+ list_for_each_entry(asc, &notifier->done_list, asc_entry)
+ if (v4l2_async_match_equal(&asc->match, match))
+ return true;
+
+ return false;
+}
+
+/*
+ * Find out whether an async sub-device was set up already or whether it exists
+ * in a given notifier.
+ */
+static bool
+v4l2_async_nf_has_async_match(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_match_desc *match)
+{
+ struct list_head *heads[] = {
+ &notifier->waiting_list,
+ &notifier->done_list,
+ };
+ unsigned int i;
+
+ lockdep_assert_held(&list_lock);
+
+ /* Check that an asd is not being added more than once. */
+ for (i = 0; i < ARRAY_SIZE(heads); i++) {
+ struct v4l2_async_connection *asc;
+
+ list_for_each_entry(asc, heads[i], asc_entry) {
+ if (&asc->match == match)
+ continue;
+ if (v4l2_async_match_equal(&asc->match, match))
+ return true;
+ }
+ }
+
+ /* Check that an asc does not exist in other notifiers. */
+ list_for_each_entry(notifier, &notifier_list, notifier_entry)
+ if (v4l2_async_nf_has_async_match_entry(notifier, match))
+ return true;
+
+ return false;
+}
+
+static int v4l2_async_nf_match_valid(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_match_desc *match)
+{
+ struct device *dev = notifier_dev(notifier);
+
+ switch (match->type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE:
+ if (v4l2_async_nf_has_async_match(notifier, match)) {
+ dev_dbg(dev, "v4l2-async: match descriptor already listed in a notifier\n");
+ return -EEXIST;
+ }
+ break;
+ default:
+ dev_err(dev, "v4l2-async: Invalid match type %u on %p\n",
+ match->type, match);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev)
+{
+ INIT_LIST_HEAD(&notifier->waiting_list);
+ INIT_LIST_HEAD(&notifier->done_list);
+ notifier->v4l2_dev = v4l2_dev;
+}
+EXPORT_SYMBOL(v4l2_async_nf_init);
+
+void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd)
+{
+ INIT_LIST_HEAD(&notifier->waiting_list);
+ INIT_LIST_HEAD(&notifier->done_list);
+ notifier->sd = sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init);
+
+static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_connection *asc;
+ int ret;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(asc, &notifier->waiting_list, asc_entry) {
+ ret = v4l2_async_nf_match_valid(notifier, &asc->match);
+ if (ret)
+ goto err_unlock;
+ }
+
+ ret = v4l2_async_nf_try_all_subdevs(notifier);
+ if (ret < 0)
+ goto err_unbind;
+
+ ret = v4l2_async_nf_try_complete(notifier);
+ if (ret < 0)
+ goto err_unbind;
+
+ /* Keep also completed notifiers on the list */
+ list_add(&notifier->notifier_entry, &notifier_list);
+
+ mutex_unlock(&list_lock);
+
+ return 0;
+
+err_unbind:
+ /*
+ * On failure, unbind all sub-devices registered through this notifier.
+ */
+ v4l2_async_nf_unbind_all_subdevs(notifier);
+
+err_unlock:
+ mutex_unlock(&list_lock);
+
+ return ret;
+}
+
+int v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
+{
+ int ret;
+
+ if (WARN_ON(!notifier->v4l2_dev == !notifier->sd))
+ return -EINVAL;
+
+ ret = __v4l2_async_nf_register(notifier);
+ if (ret)
+ notifier->v4l2_dev = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_async_nf_register);
+
+static void
+__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+{
+ if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
+ return;
+
+ v4l2_async_nf_unbind_all_subdevs(notifier);
+
+ list_del(&notifier->notifier_entry);
+}
+
+void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+{
+ mutex_lock(&list_lock);
+
+ __v4l2_async_nf_unregister(notifier);
+
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(v4l2_async_nf_unregister);
+
+static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_connection *asc, *tmp;
+
+ if (!notifier || !notifier->waiting_list.next)
+ return;
+
+ WARN_ON(!list_empty(&notifier->done_list));
+
+ list_for_each_entry_safe(asc, tmp, &notifier->waiting_list, asc_entry) {
+ list_del(&asc->asc_entry);
+ v4l2_async_nf_call_destroy(notifier, asc);
+
+ if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
+ fwnode_handle_put(asc->match.fwnode);
+
+ kfree(asc);
+ }
+
+ notifier->sd = NULL;
+ notifier->v4l2_dev = NULL;
+}
+
+void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
+{
+ mutex_lock(&list_lock);
+
+ __v4l2_async_nf_cleanup(notifier);
+
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
+
+static void __v4l2_async_nf_add_connection(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_connection *asc)
+{
+ mutex_lock(&list_lock);
+
+ list_add_tail(&asc->asc_entry, &notifier->waiting_list);
+
+ mutex_unlock(&list_lock);
+}
+
+struct v4l2_async_connection *
+__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asc_struct_size)
+{
+ struct v4l2_async_connection *asc;
+
+ asc = kzalloc(asc_struct_size, GFP_KERNEL);
+ if (!asc)
+ return ERR_PTR(-ENOMEM);
+
+ asc->notifier = notifier;
+ asc->match.type = V4L2_ASYNC_MATCH_TYPE_FWNODE;
+ asc->match.fwnode = fwnode_handle_get(fwnode);
+
+ __v4l2_async_nf_add_connection(notifier, asc);
+
+ return asc;
+}
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
+
+struct v4l2_async_connection *
+__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ unsigned int asc_struct_size)
+{
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *remote;
+
+ remote = fwnode_graph_get_remote_endpoint(endpoint);
+ if (!remote)
+ return ERR_PTR(-ENOTCONN);
+
+ asc = __v4l2_async_nf_add_fwnode(notif, remote, asc_struct_size);
+ /*
+ * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
+ * so drop the one we got in fwnode_graph_get_remote_port_parent.
+ */
+ fwnode_handle_put(remote);
+ return asc;
+}
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
+
+struct v4l2_async_connection *
+__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
+ unsigned short address, unsigned int asc_struct_size)
+{
+ struct v4l2_async_connection *asc;
+
+ asc = kzalloc(asc_struct_size, GFP_KERNEL);
+ if (!asc)
+ return ERR_PTR(-ENOMEM);
+
+ asc->notifier = notifier;
+ asc->match.type = V4L2_ASYNC_MATCH_TYPE_I2C;
+ asc->match.i2c.adapter_id = adapter_id;
+ asc->match.i2c.address = address;
+
+ __v4l2_async_nf_add_connection(notifier, asc);
+
+ return asc;
+}
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
+
+int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd,
+ struct fwnode_handle *fwnode)
+{
+ struct v4l2_async_subdev_endpoint *ase;
+
+ ase = kmalloc(sizeof(*ase), GFP_KERNEL);
+ if (!ase)
+ return -ENOMEM;
+
+ ase->endpoint = fwnode;
+ list_add(&ase->async_subdev_endpoint_entry,
+ &sd->async_subdev_endpoint_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_subdev_endpoint_add);
+
+struct v4l2_async_connection *
+v4l2_async_connection_unique(struct v4l2_subdev *sd)
+{
+ if (!list_is_singular(&sd->asc_list))
+ return NULL;
+
+ return list_first_entry(&sd->asc_list,
+ struct v4l2_async_connection, asc_subdev_entry);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_connection_unique);
+
+int v4l2_async_register_subdev(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *subdev_notifier;
+ struct v4l2_async_notifier *notifier;
+ struct v4l2_async_connection *asc;
+ int ret;
+
+ INIT_LIST_HEAD(&sd->asc_list);
+
+ /*
+ * No reference taken. The reference is held by the device (struct
+ * v4l2_subdev.dev), and async sub-device does not exist independently
+ * of the device at any point of time.
+ *
+ * The async sub-device shall always be registered for its device node,
+ * not the endpoint node.
+ */
+ if (!sd->fwnode && sd->dev) {
+ sd->fwnode = dev_fwnode(sd->dev);
+ } else if (fwnode_graph_is_endpoint(sd->fwnode)) {
+ dev_warn(sd->dev, "sub-device fwnode is an endpoint!\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(notifier, &notifier_list, notifier_entry) {
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_nf_find_v4l2_dev(notifier);
+
+ if (!v4l2_dev)
+ continue;
+
+ while ((asc = v4l2_async_find_match(notifier, sd))) {
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd,
+ asc);
+ if (ret)
+ goto err_unbind;
+
+ ret = v4l2_async_nf_try_complete(notifier);
+ if (ret)
+ goto err_unbind;
+ }
+ }
+
+ /* None matched, wait for hot-plugging */
+ list_add(&sd->async_list, &subdev_list);
+
+ mutex_unlock(&list_lock);
+
+ return 0;
+
+err_unbind:
+ /*
+ * Complete failed. Unbind the sub-devices bound through registering
+ * this async sub-device.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (subdev_notifier)
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
+
+ if (asc)
+ v4l2_async_unbind_subdev_one(notifier, asc);
+
+ mutex_unlock(&list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_async_register_subdev);
+
+void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_connection *asc, *asc_tmp;
+
+ if (!sd->async_list.next)
+ return;
+
+ v4l2_subdev_put_privacy_led(sd);
+
+ mutex_lock(&list_lock);
+
+ __v4l2_async_nf_unregister(sd->subdev_notifier);
+ __v4l2_async_nf_cleanup(sd->subdev_notifier);
+ kfree(sd->subdev_notifier);
+ sd->subdev_notifier = NULL;
+
+ if (sd->asc_list.next) {
+ list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list,
+ asc_subdev_entry) {
+ list_move(&asc->asc_entry,
+ &asc->notifier->waiting_list);
+
+ v4l2_async_unbind_subdev_one(asc->notifier, asc);
+ }
+ }
+
+ list_del(&sd->async_list);
+ sd->async_list.next = NULL;
+
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(v4l2_async_unregister_subdev);
+
+static void print_waiting_match(struct seq_file *s,
+ struct v4l2_async_match_desc *match)
+{
+ switch (match->type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ seq_printf(s, " [i2c] dev=%d-%04x\n", match->i2c.adapter_id,
+ match->i2c.address);
+ break;
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE: {
+ struct fwnode_handle *devnode, *fwnode = match->fwnode;
+
+ devnode = fwnode_graph_is_endpoint(fwnode) ?
+ fwnode_graph_get_port_parent(fwnode) :
+ fwnode_handle_get(fwnode);
+
+ seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
+ devnode->dev ? dev_name(devnode->dev) : "nil",
+ fwnode);
+
+ fwnode_handle_put(devnode);
+ break;
+ }
+ }
+}
+
+static const char *
+v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
+{
+ if (notifier->v4l2_dev)
+ return notifier->v4l2_dev->name;
+ else if (notifier->sd)
+ return notifier->sd->name;
+ else
+ return "nil";
+}
+
+static int pending_subdevs_show(struct seq_file *s, void *data)
+{
+ struct v4l2_async_notifier *notif;
+ struct v4l2_async_connection *asc;
+
+ mutex_lock(&list_lock);
+
+ list_for_each_entry(notif, &notifier_list, notifier_entry) {
+ seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
+ list_for_each_entry(asc, &notif->waiting_list, asc_entry)
+ print_waiting_match(s, &asc->match);
+ }
+
+ mutex_unlock(&list_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
+
+static struct dentry *v4l2_async_debugfs_dir;
+
+static int __init v4l2_async_init(void)
+{
+ v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
+ debugfs_create_file("pending_async_subdevices", 0444,
+ v4l2_async_debugfs_dir, NULL,
+ &pending_subdevs_fops);
+
+ return 0;
+}
+
+static void __exit v4l2_async_exit(void)
+{
+ debugfs_remove_recursive(v4l2_async_debugfs_dir);
+}
+
+subsys_initcall(v4l2_async_init);
+module_exit(v4l2_async_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
new file mode 100644
index 0000000000..10005c80f4
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-cci.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPI Camera Control Interface (CCI) register access helpers.
+ *
+ * Copyright (C) 2023 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#include <media/v4l2-cci.h>
+
+int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+{
+ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ little_endian = reg & CCI_REG_LE;
+ len = CCI_REG_WIDTH_BYTES(reg);
+ reg = CCI_REG_ADDR(reg);
+
+ ret = regmap_bulk_read(map, reg, buf, len);
+ if (ret) {
+ dev_err(regmap_get_device(map), "Error reading reg 0x%4x: %d\n",
+ reg, ret);
+ goto out;
+ }
+
+ switch (len) {
+ case 1:
+ *val = buf[0];
+ break;
+ case 2:
+ if (little_endian)
+ *val = get_unaligned_le16(buf);
+ else
+ *val = get_unaligned_be16(buf);
+ break;
+ case 3:
+ if (little_endian)
+ *val = get_unaligned_le24(buf);
+ else
+ *val = get_unaligned_be24(buf);
+ break;
+ case 4:
+ if (little_endian)
+ *val = get_unaligned_le32(buf);
+ else
+ *val = get_unaligned_be32(buf);
+ break;
+ case 8:
+ if (little_endian)
+ *val = get_unaligned_le64(buf);
+ else
+ *val = get_unaligned_be64(buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+ len, reg);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cci_read);
+
+int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+{
+ bool little_endian;
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ little_endian = reg & CCI_REG_LE;
+ len = CCI_REG_WIDTH_BYTES(reg);
+ reg = CCI_REG_ADDR(reg);
+
+ switch (len) {
+ case 1:
+ buf[0] = val;
+ break;
+ case 2:
+ if (little_endian)
+ put_unaligned_le16(val, buf);
+ else
+ put_unaligned_be16(val, buf);
+ break;
+ case 3:
+ if (little_endian)
+ put_unaligned_le24(val, buf);
+ else
+ put_unaligned_be24(val, buf);
+ break;
+ case 4:
+ if (little_endian)
+ put_unaligned_le32(val, buf);
+ else
+ put_unaligned_be32(val, buf);
+ break;
+ case 8:
+ if (little_endian)
+ put_unaligned_le64(val, buf);
+ else
+ put_unaligned_be64(val, buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+ len, reg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = regmap_bulk_write(map, reg, buf, len);
+ if (ret)
+ dev_err(regmap_get_device(map), "Error writing reg 0x%4x: %d\n",
+ reg, ret);
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cci_write);
+
+int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err)
+{
+ u64 readval;
+ int ret;
+
+ ret = cci_read(map, reg, &readval, err);
+ if (ret)
+ return ret;
+
+ val = (readval & ~mask) | (val & mask);
+
+ return cci_write(map, reg, val, err);
+}
+EXPORT_SYMBOL_GPL(cci_update_bits);
+
+int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_regs; i++) {
+ ret = cci_write(map, regs[i].reg, regs[i].val, err);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cci_multi_reg_write);
+
+#if IS_ENABLED(CONFIG_V4L2_CCI_I2C)
+struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client,
+ int reg_addr_bits)
+{
+ struct regmap_config config = {
+ .reg_bits = reg_addr_bits,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .disable_locking = true,
+ };
+
+ return devm_regmap_init_i2c(client, &config);
+}
+EXPORT_SYMBOL_GPL(devm_cci_regmap_init_i2c);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("MIPI Camera Control Interface (CCI) support");
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
new file mode 100644
index 0000000000..3a4b15a98e
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Video for Linux Two
+ *
+ * A generic video device interface for the LINUX operating system
+ * using a set of device structures/vectors for low level operations.
+ *
+ * This file replaces the videodev.c file that comes with the
+ * regular kernel distribution.
+ *
+ * Author: Bill Dirks <bill@thedirks.org>
+ * based on code by Alan Cox, <alan@cymru.net>
+ */
+
+/*
+ * Video capture interface for Linux
+ *
+ * A generic video device interface for the LINUX operating system
+ * using a set of device structures/vectors for low level operations.
+ *
+ * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
+ *
+ * Fixes:
+ */
+
+/*
+ * Video4linux 1/2 integration by Justin Schoeman
+ * <justin@suntiger.ee.up.ac.za>
+ * 2.4 PROCFS support ported from 2.4 kernels by
+ * Iñaki García Etxebarria <garetxe@euskalnet.net>
+ * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
+ * 2.4 devfs support ported from 2.4 kernels by
+ * Dan Merillat <dan@merillat.org>
+ * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/div64.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include <linux/videodev2.h>
+
+/*
+ *
+ * V 4 L 2 D R I V E R H E L P E R A P I
+ *
+ */
+
+/*
+ * Video Standard Operations (contributed by Michael Schimek)
+ */
+
+/* Helper functions for control handling */
+
+/* Fill in a struct v4l2_queryctrl */
+int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
+{
+ const char *name;
+ s64 min = _min;
+ s64 max = _max;
+ u64 step = _step;
+ s64 def = _def;
+
+ v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
+ &min, &max, &step, &def, &qctrl->flags);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ qctrl->minimum = min;
+ qctrl->maximum = max;
+ qctrl->step = step;
+ qctrl->default_value = def;
+ qctrl->reserved[0] = qctrl->reserved[1] = 0;
+ strscpy(qctrl->name, name, sizeof(qctrl->name));
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_query_fill);
+
+/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
+ * and max don't have to be aligned, but there must be at least one valid
+ * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
+ * of 16 between 17 and 31. */
+static unsigned int clamp_align(unsigned int x, unsigned int min,
+ unsigned int max, unsigned int align)
+{
+ /* Bits that must be zero to be aligned */
+ unsigned int mask = ~((1 << align) - 1);
+
+ /* Clamp to aligned min and max */
+ x = clamp(x, (min + ~mask) & mask, max & mask);
+
+ /* Round to nearest aligned value */
+ if (align)
+ x = (x + (1 << (align - 1))) & mask;
+
+ return x;
+}
+
+static unsigned int clamp_roundup(unsigned int x, unsigned int min,
+ unsigned int max, unsigned int alignment)
+{
+ x = clamp(x, min, max);
+ if (alignment)
+ x = round_up(x, alignment);
+
+ return x;
+}
+
+void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
+ unsigned int walign,
+ u32 *h, unsigned int hmin, unsigned int hmax,
+ unsigned int halign, unsigned int salign)
+{
+ *w = clamp_align(*w, wmin, wmax, walign);
+ *h = clamp_align(*h, hmin, hmax, halign);
+
+ /* Usually we don't need to align the size and are done now. */
+ if (!salign)
+ return;
+
+ /* How much alignment do we have? */
+ walign = __ffs(*w);
+ halign = __ffs(*h);
+ /* Enough to satisfy the image alignment? */
+ if (walign + halign < salign) {
+ /* Max walign where there is still a valid width */
+ unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
+ /* Max halign where there is still a valid height */
+ unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
+
+ /* up the smaller alignment until we have enough */
+ do {
+ if (halign >= hmaxa ||
+ (walign <= halign && walign < wmaxa)) {
+ *w = clamp_align(*w, wmin, wmax, walign + 1);
+ walign = __ffs(*w);
+ } else {
+ *h = clamp_align(*h, hmin, hmax, halign + 1);
+ halign = __ffs(*h);
+ }
+ } while (halign + walign < salign);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l_bound_align_image);
+
+const void *
+__v4l2_find_nearest_size(const void *array, size_t array_size,
+ size_t entry_size, size_t width_offset,
+ size_t height_offset, s32 width, s32 height)
+{
+ u32 error, min_error = U32_MAX;
+ const void *best = NULL;
+ unsigned int i;
+
+ if (!array)
+ return NULL;
+
+ for (i = 0; i < array_size; i++, array += entry_size) {
+ const u32 *entry_width = array + width_offset;
+ const u32 *entry_height = array + height_offset;
+
+ error = abs(*entry_width - width) + abs(*entry_height - height);
+ if (error > min_error)
+ continue;
+
+ min_error = error;
+ best = array;
+ if (!error)
+ break;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size);
+
+int v4l2_g_parm_cap(struct video_device *vdev,
+ struct v4l2_subdev *sd, struct v4l2_streamparm *a)
+{
+ struct v4l2_subdev_frame_interval ival = { 0 };
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ if (vdev->device_caps & V4L2_CAP_READWRITE)
+ a->parm.capture.readbuffers = 2;
+ if (v4l2_subdev_has_op(sd, video, g_frame_interval))
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ ret = v4l2_subdev_call(sd, video, g_frame_interval, &ival);
+ if (!ret)
+ a->parm.capture.timeperframe = ival.interval;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_g_parm_cap);
+
+int v4l2_s_parm_cap(struct video_device *vdev,
+ struct v4l2_subdev *sd, struct v4l2_streamparm *a)
+{
+ struct v4l2_subdev_frame_interval ival = {
+ .interval = a->parm.capture.timeperframe
+ };
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ memset(&a->parm, 0, sizeof(a->parm));
+ if (vdev->device_caps & V4L2_CAP_READWRITE)
+ a->parm.capture.readbuffers = 2;
+ else
+ a->parm.capture.readbuffers = 0;
+
+ if (v4l2_subdev_has_op(sd, video, g_frame_interval))
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ ret = v4l2_subdev_call(sd, video, s_frame_interval, &ival);
+ if (!ret)
+ a->parm.capture.timeperframe = ival.interval;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_s_parm_cap);
+
+const struct v4l2_format_info *v4l2_format_info(u32 format)
+{
+ static const struct v4l2_format_info formats[] = {
+ /* RGB formats */
+ { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+
+ /* YUV packed formats */
+ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
+ { .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
+
+ /* YUV planar formats */
+ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+
+ { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+
+ /* Tiled YUV formats */
+ { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }},
+ { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+
+ /* YUV planar formats, non contiguous variant */
+ { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
+
+ /* Bayer RGB formats */
+ { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i)
+ if (formats[i].format == format)
+ return &formats[i];
+ return NULL;
+}
+EXPORT_SYMBOL(v4l2_format_info);
+
+static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane)
+{
+ if (!info->block_w[plane])
+ return 1;
+ return info->block_w[plane];
+}
+
+static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane)
+{
+ if (!info->block_h[plane])
+ return 1;
+ return info->block_h[plane];
+}
+
+void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
+ const struct v4l2_frmsize_stepwise *frmsize)
+{
+ if (!frmsize)
+ return;
+
+ /*
+ * Clamp width/height to meet min/max constraints and round it up to
+ * macroblock alignment.
+ */
+ *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width,
+ frmsize->step_width);
+ *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height,
+ frmsize->step_height);
+}
+EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints);
+
+int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
+ u32 pixelformat, u32 width, u32 height)
+{
+ const struct v4l2_format_info *info;
+ struct v4l2_plane_pix_format *plane;
+ int i;
+
+ info = v4l2_format_info(pixelformat);
+ if (!info)
+ return -EINVAL;
+
+ pixfmt->width = width;
+ pixfmt->height = height;
+ pixfmt->pixelformat = pixelformat;
+ pixfmt->num_planes = info->mem_planes;
+
+ if (info->mem_planes == 1) {
+ plane = &pixfmt->plane_fmt[0];
+ plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
+ plane->sizeimage = 0;
+
+ for (i = 0; i < info->comp_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
+ unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
+ unsigned int aligned_width;
+ unsigned int aligned_height;
+
+ aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
+ aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
+
+ plane->sizeimage += info->bpp[i] *
+ DIV_ROUND_UP(aligned_width, hdiv) *
+ DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
+ }
+ } else {
+ for (i = 0; i < info->comp_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
+ unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
+ unsigned int aligned_width;
+ unsigned int aligned_height;
+
+ aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
+ aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
+
+ plane = &pixfmt->plane_fmt[i];
+ plane->bytesperline =
+ info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i];
+ plane->sizeimage =
+ plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
+
+int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
+ u32 width, u32 height)
+{
+ const struct v4l2_format_info *info;
+ int i;
+
+ info = v4l2_format_info(pixelformat);
+ if (!info)
+ return -EINVAL;
+
+ /* Single planar API cannot be used for multi plane formats. */
+ if (info->mem_planes > 1)
+ return -EINVAL;
+
+ pixfmt->width = width;
+ pixfmt->height = height;
+ pixfmt->pixelformat = pixelformat;
+ pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0];
+ pixfmt->sizeimage = 0;
+
+ for (i = 0; i < info->comp_planes; i++) {
+ unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
+ unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
+ unsigned int aligned_width;
+ unsigned int aligned_height;
+
+ aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
+ aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
+
+ pixfmt->sizeimage += info->bpp[i] *
+ DIV_ROUND_UP(aligned_width, hdiv) *
+ DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i];
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
+
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
+ unsigned int div)
+{
+ struct v4l2_ctrl *ctrl;
+ s64 freq;
+
+ ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ);
+ if (ctrl) {
+ struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ };
+ int ret;
+
+ qm.index = v4l2_ctrl_g_ctrl(ctrl);
+
+ ret = v4l2_querymenu(handler, &qm);
+ if (ret)
+ return -ENOENT;
+
+ freq = qm.value;
+ } else {
+ if (!mul || !div)
+ return -ENOENT;
+
+ ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl)
+ return -ENOENT;
+
+ freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div);
+
+ pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n",
+ __func__);
+ pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n",
+ __func__);
+ }
+
+ return freq > 0 ? freq : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
+
+/*
+ * Simplify a fraction using a simple continued fraction decomposition. The
+ * idea here is to convert fractions such as 333333/10000000 to 1/30 using
+ * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
+ * arbitrary parameters to remove non-significative terms from the simple
+ * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
+ * respectively seems to give nice results.
+ */
+void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
+ unsigned int n_terms, unsigned int threshold)
+{
+ u32 *an;
+ u32 x, y, r;
+ unsigned int i, n;
+
+ an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
+ if (an == NULL)
+ return;
+
+ /*
+ * Convert the fraction to a simple continued fraction. See
+ * https://en.wikipedia.org/wiki/Continued_fraction
+ * Stop if the current term is bigger than or equal to the given
+ * threshold.
+ */
+ x = *numerator;
+ y = *denominator;
+
+ for (n = 0; n < n_terms && y != 0; ++n) {
+ an[n] = x / y;
+ if (an[n] >= threshold) {
+ if (n < 2)
+ n++;
+ break;
+ }
+
+ r = x - an[n] * y;
+ x = y;
+ y = r;
+ }
+
+ /* Expand the simple continued fraction back to an integer fraction. */
+ x = 0;
+ y = 1;
+
+ for (i = n; i > 0; --i) {
+ r = y;
+ y = an[i-1] * y + x;
+ x = r;
+ }
+
+ *numerator = y;
+ *denominator = x;
+ kfree(an);
+}
+EXPORT_SYMBOL_GPL(v4l2_simplify_fraction);
+
+/*
+ * Convert a fraction to a frame interval in 100ns multiples. The idea here is
+ * to compute numerator / denominator * 10000000 using 32 bit fixed point
+ * arithmetic only.
+ */
+u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
+{
+ u32 multiplier;
+
+ /* Saturate the result if the operation would overflow. */
+ if (denominator == 0 ||
+ numerator/denominator >= ((u32)-1)/10000000)
+ return (u32)-1;
+
+ /*
+ * Divide both the denominator and the multiplier by two until
+ * numerator * multiplier doesn't overflow. If anyone knows a better
+ * algorithm please let me know.
+ */
+ multiplier = 10000000;
+ while (numerator > ((u32)-1)/multiplier) {
+ multiplier /= 2;
+ denominator /= 2;
+ }
+
+ return denominator ? numerator * multiplier / denominator : 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
new file mode 100644
index 0000000000..f3bed37859
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -0,0 +1,1202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ * Separated from fs stuff by Arnd Bergmann <arnd@arndb.de>
+ *
+ * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
+ * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
+ * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
+ * Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be)
+ * Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * ioctls.
+ */
+
+#include <linux/compat.h>
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-subdev.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-ioctl.h>
+
+/*
+ * Per-ioctl data copy handlers.
+ *
+ * Those come in pairs, with a get_v4l2_foo() and a put_v4l2_foo() routine,
+ * where "v4l2_foo" is the name of the V4L2 struct.
+ *
+ * They basically get two __user pointers, one with a 32-bits struct that
+ * came from the userspace call and a 64-bits struct, also allocated as
+ * userspace, but filled internally by do_video_ioctl().
+ *
+ * For ioctls that have pointers inside it, the functions will also
+ * receive an ancillary buffer with extra space, used to pass extra
+ * data to the routine.
+ */
+
+struct v4l2_window32 {
+ struct v4l2_rect w;
+ __u32 field; /* enum v4l2_field */
+ __u32 chromakey;
+ compat_caddr_t clips; /* always NULL */
+ __u32 clipcount; /* always 0 */
+ compat_caddr_t bitmap; /* always NULL */
+ __u8 global_alpha;
+};
+
+static int get_v4l2_window32(struct v4l2_window *p64,
+ struct v4l2_window32 __user *p32)
+{
+ struct v4l2_window32 w32;
+
+ if (copy_from_user(&w32, p32, sizeof(w32)))
+ return -EFAULT;
+
+ *p64 = (struct v4l2_window) {
+ .w = w32.w,
+ .field = w32.field,
+ .chromakey = w32.chromakey,
+ .clips = NULL,
+ .clipcount = 0,
+ .bitmap = NULL,
+ .global_alpha = w32.global_alpha,
+ };
+
+ return 0;
+}
+
+static int put_v4l2_window32(struct v4l2_window *p64,
+ struct v4l2_window32 __user *p32)
+{
+ struct v4l2_window32 w32;
+
+ memset(&w32, 0, sizeof(w32));
+ w32 = (struct v4l2_window32) {
+ .w = p64->w,
+ .field = p64->field,
+ .chromakey = p64->chromakey,
+ .clips = 0,
+ .clipcount = 0,
+ .bitmap = 0,
+ .global_alpha = p64->global_alpha,
+ };
+
+ if (copy_to_user(p32, &w32, sizeof(w32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct v4l2_format32 {
+ __u32 type; /* enum v4l2_buf_type */
+ union {
+ struct v4l2_pix_format pix;
+ struct v4l2_pix_format_mplane pix_mp;
+ struct v4l2_window32 win;
+ struct v4l2_vbi_format vbi;
+ struct v4l2_sliced_vbi_format sliced;
+ struct v4l2_sdr_format sdr;
+ struct v4l2_meta_format meta;
+ __u8 raw_data[200]; /* user-defined */
+ } fmt;
+};
+
+/**
+ * struct v4l2_create_buffers32 - VIDIOC_CREATE_BUFS32 argument
+ * @index: on return, index of the first created buffer
+ * @count: entry: number of requested buffers,
+ * return: number of created buffers
+ * @memory: buffer memory type
+ * @format: frame format, for which buffers are requested
+ * @capabilities: capabilities of this buffer type.
+ * @flags: additional buffer management attributes (ignored unless the
+ * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and
+ * configured for MMAP streaming I/O).
+ * @reserved: future extensions
+ */
+struct v4l2_create_buffers32 {
+ __u32 index;
+ __u32 count;
+ __u32 memory; /* enum v4l2_memory */
+ struct v4l2_format32 format;
+ __u32 capabilities;
+ __u32 flags;
+ __u32 reserved[6];
+};
+
+static int get_v4l2_format32(struct v4l2_format *p64,
+ struct v4l2_format32 __user *p32)
+{
+ if (get_user(p64->type, &p32->type))
+ return -EFAULT;
+
+ switch (p64->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return copy_from_user(&p64->fmt.pix, &p32->fmt.pix,
+ sizeof(p64->fmt.pix)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return copy_from_user(&p64->fmt.pix_mp, &p32->fmt.pix_mp,
+ sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ return get_v4l2_window32(&p64->fmt.win, &p32->fmt.win);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ return copy_from_user(&p64->fmt.vbi, &p32->fmt.vbi,
+ sizeof(p64->fmt.vbi)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ return copy_from_user(&p64->fmt.sliced, &p32->fmt.sliced,
+ sizeof(p64->fmt.sliced)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ return copy_from_user(&p64->fmt.sdr, &p32->fmt.sdr,
+ sizeof(p64->fmt.sdr)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return copy_from_user(&p64->fmt.meta, &p32->fmt.meta,
+ sizeof(p64->fmt.meta)) ? -EFAULT : 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int get_v4l2_create32(struct v4l2_create_buffers *p64,
+ struct v4l2_create_buffers32 __user *p32)
+{
+ if (copy_from_user(p64, p32,
+ offsetof(struct v4l2_create_buffers32, format)))
+ return -EFAULT;
+ if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags)))
+ return -EFAULT;
+ return get_v4l2_format32(&p64->format, &p32->format);
+}
+
+static int put_v4l2_format32(struct v4l2_format *p64,
+ struct v4l2_format32 __user *p32)
+{
+ switch (p64->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return copy_to_user(&p32->fmt.pix, &p64->fmt.pix,
+ sizeof(p64->fmt.pix)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return copy_to_user(&p32->fmt.pix_mp, &p64->fmt.pix_mp,
+ sizeof(p64->fmt.pix_mp)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ return put_v4l2_window32(&p64->fmt.win, &p32->fmt.win);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ return copy_to_user(&p32->fmt.vbi, &p64->fmt.vbi,
+ sizeof(p64->fmt.vbi)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ return copy_to_user(&p32->fmt.sliced, &p64->fmt.sliced,
+ sizeof(p64->fmt.sliced)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ return copy_to_user(&p32->fmt.sdr, &p64->fmt.sdr,
+ sizeof(p64->fmt.sdr)) ? -EFAULT : 0;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return copy_to_user(&p32->fmt.meta, &p64->fmt.meta,
+ sizeof(p64->fmt.meta)) ? -EFAULT : 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int put_v4l2_create32(struct v4l2_create_buffers *p64,
+ struct v4l2_create_buffers32 __user *p32)
+{
+ if (copy_to_user(p32, p64,
+ offsetof(struct v4l2_create_buffers32, format)) ||
+ put_user(p64->capabilities, &p32->capabilities) ||
+ put_user(p64->flags, &p32->flags) ||
+ copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
+ return -EFAULT;
+ return put_v4l2_format32(&p64->format, &p32->format);
+}
+
+struct v4l2_standard32 {
+ __u32 index;
+ compat_u64 id;
+ __u8 name[24];
+ struct v4l2_fract frameperiod; /* Frames, not fields */
+ __u32 framelines;
+ __u32 reserved[4];
+};
+
+static int get_v4l2_standard32(struct v4l2_standard *p64,
+ struct v4l2_standard32 __user *p32)
+{
+ /* other fields are not set by the user, nor used by the driver */
+ return get_user(p64->index, &p32->index);
+}
+
+static int put_v4l2_standard32(struct v4l2_standard *p64,
+ struct v4l2_standard32 __user *p32)
+{
+ if (put_user(p64->index, &p32->index) ||
+ put_user(p64->id, &p32->id) ||
+ copy_to_user(p32->name, p64->name, sizeof(p32->name)) ||
+ copy_to_user(&p32->frameperiod, &p64->frameperiod,
+ sizeof(p32->frameperiod)) ||
+ put_user(p64->framelines, &p32->framelines) ||
+ copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_plane32 {
+ __u32 bytesused;
+ __u32 length;
+ union {
+ __u32 mem_offset;
+ compat_long_t userptr;
+ __s32 fd;
+ } m;
+ __u32 data_offset;
+ __u32 reserved[11];
+};
+
+/*
+ * This is correct for all architectures including i386, but not x32,
+ * which has different alignment requirements for timestamp
+ */
+struct v4l2_buffer32 {
+ __u32 index;
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 bytesused;
+ __u32 flags;
+ __u32 field; /* enum v4l2_field */
+ struct {
+ compat_s64 tv_sec;
+ compat_s64 tv_usec;
+ } timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+
+ /* memory location */
+ __u32 memory; /* enum v4l2_memory */
+ union {
+ __u32 offset;
+ compat_long_t userptr;
+ compat_caddr_t planes;
+ __s32 fd;
+ } m;
+ __u32 length;
+ __u32 reserved2;
+ __s32 request_fd;
+};
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+struct v4l2_buffer32_time32 {
+ __u32 index;
+ __u32 type; /* enum v4l2_buf_type */
+ __u32 bytesused;
+ __u32 flags;
+ __u32 field; /* enum v4l2_field */
+ struct old_timeval32 timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+
+ /* memory location */
+ __u32 memory; /* enum v4l2_memory */
+ union {
+ __u32 offset;
+ compat_long_t userptr;
+ compat_caddr_t planes;
+ __s32 fd;
+ } m;
+ __u32 length;
+ __u32 reserved2;
+ __s32 request_fd;
+};
+#endif
+
+static int get_v4l2_plane32(struct v4l2_plane *p64,
+ struct v4l2_plane32 __user *p32,
+ enum v4l2_memory memory)
+{
+ struct v4l2_plane32 plane32;
+ typeof(p64->m) m = {};
+
+ if (copy_from_user(&plane32, p32, sizeof(plane32)))
+ return -EFAULT;
+
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ m.mem_offset = plane32.m.mem_offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ m.userptr = (unsigned long)compat_ptr(plane32.m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ m.fd = plane32.m.fd;
+ break;
+ }
+
+ memset(p64, 0, sizeof(*p64));
+ *p64 = (struct v4l2_plane) {
+ .bytesused = plane32.bytesused,
+ .length = plane32.length,
+ .m = m,
+ .data_offset = plane32.data_offset,
+ };
+
+ return 0;
+}
+
+static int put_v4l2_plane32(struct v4l2_plane *p64,
+ struct v4l2_plane32 __user *p32,
+ enum v4l2_memory memory)
+{
+ struct v4l2_plane32 plane32;
+
+ memset(&plane32, 0, sizeof(plane32));
+ plane32 = (struct v4l2_plane32) {
+ .bytesused = p64->bytesused,
+ .length = p64->length,
+ .data_offset = p64->data_offset,
+ };
+
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ plane32.m.mem_offset = p64->m.mem_offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ plane32.m.userptr = (uintptr_t)(p64->m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ plane32.m.fd = p64->m.fd;
+ break;
+ }
+
+ if (copy_to_user(p32, &plane32, sizeof(plane32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_v4l2_buffer32(struct v4l2_buffer *vb,
+ struct v4l2_buffer32 __user *arg)
+{
+ struct v4l2_buffer32 vb32;
+
+ if (copy_from_user(&vb32, arg, sizeof(vb32)))
+ return -EFAULT;
+
+ memset(vb, 0, sizeof(*vb));
+ *vb = (struct v4l2_buffer) {
+ .index = vb32.index,
+ .type = vb32.type,
+ .bytesused = vb32.bytesused,
+ .flags = vb32.flags,
+ .field = vb32.field,
+ .timestamp.tv_sec = vb32.timestamp.tv_sec,
+ .timestamp.tv_usec = vb32.timestamp.tv_usec,
+ .timecode = vb32.timecode,
+ .sequence = vb32.sequence,
+ .memory = vb32.memory,
+ .m.offset = vb32.m.offset,
+ .length = vb32.length,
+ .request_fd = vb32.request_fd,
+ };
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ vb->m.offset = vb32.m.offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ vb->m.fd = vb32.m.fd;
+ break;
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
+ vb->m.planes = (void __force *)
+ compat_ptr(vb32.m.planes);
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+static int get_v4l2_buffer32_time32(struct v4l2_buffer *vb,
+ struct v4l2_buffer32_time32 __user *arg)
+{
+ struct v4l2_buffer32_time32 vb32;
+
+ if (copy_from_user(&vb32, arg, sizeof(vb32)))
+ return -EFAULT;
+
+ *vb = (struct v4l2_buffer) {
+ .index = vb32.index,
+ .type = vb32.type,
+ .bytesused = vb32.bytesused,
+ .flags = vb32.flags,
+ .field = vb32.field,
+ .timestamp.tv_sec = vb32.timestamp.tv_sec,
+ .timestamp.tv_usec = vb32.timestamp.tv_usec,
+ .timecode = vb32.timecode,
+ .sequence = vb32.sequence,
+ .memory = vb32.memory,
+ .m.offset = vb32.m.offset,
+ .length = vb32.length,
+ .request_fd = vb32.request_fd,
+ };
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ vb->m.offset = vb32.m.offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ vb->m.userptr = (unsigned long)compat_ptr(vb32.m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ vb->m.fd = vb32.m.fd;
+ break;
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
+ vb->m.planes = (void __force *)
+ compat_ptr(vb32.m.planes);
+
+ return 0;
+}
+#endif
+
+static int put_v4l2_buffer32(struct v4l2_buffer *vb,
+ struct v4l2_buffer32 __user *arg)
+{
+ struct v4l2_buffer32 vb32;
+
+ memset(&vb32, 0, sizeof(vb32));
+ vb32 = (struct v4l2_buffer32) {
+ .index = vb->index,
+ .type = vb->type,
+ .bytesused = vb->bytesused,
+ .flags = vb->flags,
+ .field = vb->field,
+ .timestamp.tv_sec = vb->timestamp.tv_sec,
+ .timestamp.tv_usec = vb->timestamp.tv_usec,
+ .timecode = vb->timecode,
+ .sequence = vb->sequence,
+ .memory = vb->memory,
+ .m.offset = vb->m.offset,
+ .length = vb->length,
+ .request_fd = vb->request_fd,
+ };
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ vb32.m.offset = vb->m.offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ vb32.m.userptr = (uintptr_t)(vb->m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ vb32.m.fd = vb->m.fd;
+ break;
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
+ vb32.m.planes = (uintptr_t)vb->m.planes;
+
+ if (copy_to_user(arg, &vb32, sizeof(vb32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+static int put_v4l2_buffer32_time32(struct v4l2_buffer *vb,
+ struct v4l2_buffer32_time32 __user *arg)
+{
+ struct v4l2_buffer32_time32 vb32;
+
+ memset(&vb32, 0, sizeof(vb32));
+ vb32 = (struct v4l2_buffer32_time32) {
+ .index = vb->index,
+ .type = vb->type,
+ .bytesused = vb->bytesused,
+ .flags = vb->flags,
+ .field = vb->field,
+ .timestamp.tv_sec = vb->timestamp.tv_sec,
+ .timestamp.tv_usec = vb->timestamp.tv_usec,
+ .timecode = vb->timecode,
+ .sequence = vb->sequence,
+ .memory = vb->memory,
+ .m.offset = vb->m.offset,
+ .length = vb->length,
+ .request_fd = vb->request_fd,
+ };
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_OVERLAY:
+ vb32.m.offset = vb->m.offset;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ vb32.m.userptr = (uintptr_t)(vb->m.userptr);
+ break;
+ case V4L2_MEMORY_DMABUF:
+ vb32.m.fd = vb->m.fd;
+ break;
+ }
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(vb->type))
+ vb32.m.planes = (uintptr_t)vb->m.planes;
+
+ if (copy_to_user(arg, &vb32, sizeof(vb32)))
+ return -EFAULT;
+
+ return 0;
+}
+#endif
+
+struct v4l2_framebuffer32 {
+ __u32 capability;
+ __u32 flags;
+ compat_caddr_t base;
+ struct {
+ __u32 width;
+ __u32 height;
+ __u32 pixelformat;
+ __u32 field;
+ __u32 bytesperline;
+ __u32 sizeimage;
+ __u32 colorspace;
+ __u32 priv;
+ } fmt;
+};
+
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer *p64,
+ struct v4l2_framebuffer32 __user *p32)
+{
+ if (get_user(p64->capability, &p32->capability) ||
+ get_user(p64->flags, &p32->flags) ||
+ copy_from_user(&p64->fmt, &p32->fmt, sizeof(p64->fmt)))
+ return -EFAULT;
+ p64->base = NULL;
+
+ return 0;
+}
+
+static int put_v4l2_framebuffer32(struct v4l2_framebuffer *p64,
+ struct v4l2_framebuffer32 __user *p32)
+{
+ if (put_user((uintptr_t)p64->base, &p32->base) ||
+ put_user(p64->capability, &p32->capability) ||
+ put_user(p64->flags, &p32->flags) ||
+ copy_to_user(&p32->fmt, &p64->fmt, sizeof(p64->fmt)))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct v4l2_input32 {
+ __u32 index; /* Which input */
+ __u8 name[32]; /* Label */
+ __u32 type; /* Type of input */
+ __u32 audioset; /* Associated audios (bitfield) */
+ __u32 tuner; /* Associated tuner */
+ compat_u64 std;
+ __u32 status;
+ __u32 capabilities;
+ __u32 reserved[3];
+};
+
+/*
+ * The 64-bit v4l2_input struct has extra padding at the end of the struct.
+ * Otherwise it is identical to the 32-bit version.
+ */
+static inline int get_v4l2_input32(struct v4l2_input *p64,
+ struct v4l2_input32 __user *p32)
+{
+ if (copy_from_user(p64, p32, sizeof(*p32)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_input32(struct v4l2_input *p64,
+ struct v4l2_input32 __user *p32)
+{
+ if (copy_to_user(p32, p64, sizeof(*p32)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_ext_controls32 {
+ __u32 which;
+ __u32 count;
+ __u32 error_idx;
+ __s32 request_fd;
+ __u32 reserved[1];
+ compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
+};
+
+struct v4l2_ext_control32 {
+ __u32 id;
+ __u32 size;
+ __u32 reserved2[1];
+ union {
+ __s32 value;
+ __s64 value64;
+ compat_caddr_t string; /* actually char * */
+ };
+} __attribute__ ((packed));
+
+/* Return true if this control is a pointer type. */
+static inline bool ctrl_is_pointer(struct file *file, u32 id)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_fh *fh = NULL;
+ struct v4l2_ctrl_handler *hdl = NULL;
+ struct v4l2_query_ext_ctrl qec = { id };
+ const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
+ fh = file->private_data;
+
+ if (fh && fh->ctrl_handler)
+ hdl = fh->ctrl_handler;
+ else if (vdev->ctrl_handler)
+ hdl = vdev->ctrl_handler;
+
+ if (hdl) {
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id);
+
+ return ctrl && ctrl->is_ptr;
+ }
+
+ if (!ops || !ops->vidioc_query_ext_ctrl)
+ return false;
+
+ return !ops->vidioc_query_ext_ctrl(file, fh, &qec) &&
+ (qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
+}
+
+static int get_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
+ struct v4l2_ext_controls32 __user *p32)
+{
+ struct v4l2_ext_controls32 ec32;
+
+ if (copy_from_user(&ec32, p32, sizeof(ec32)))
+ return -EFAULT;
+
+ *p64 = (struct v4l2_ext_controls) {
+ .which = ec32.which,
+ .count = ec32.count,
+ .error_idx = ec32.error_idx,
+ .request_fd = ec32.request_fd,
+ .reserved[0] = ec32.reserved[0],
+ .controls = (void __force *)compat_ptr(ec32.controls),
+ };
+
+ return 0;
+}
+
+static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
+ struct v4l2_ext_controls32 __user *p32)
+{
+ struct v4l2_ext_controls32 ec32;
+
+ memset(&ec32, 0, sizeof(ec32));
+ ec32 = (struct v4l2_ext_controls32) {
+ .which = p64->which,
+ .count = p64->count,
+ .error_idx = p64->error_idx,
+ .request_fd = p64->request_fd,
+ .reserved[0] = p64->reserved[0],
+ .controls = (uintptr_t)p64->controls,
+ };
+
+ if (copy_to_user(p32, &ec32, sizeof(ec32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#ifdef CONFIG_X86_64
+/*
+ * x86 is the only compat architecture with different struct alignment
+ * between 32-bit and 64-bit tasks.
+ */
+struct v4l2_event32 {
+ __u32 type;
+ union {
+ compat_s64 value64;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct {
+ compat_s64 tv_sec;
+ compat_s64 tv_nsec;
+ } timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+static int put_v4l2_event32(struct v4l2_event *p64,
+ struct v4l2_event32 __user *p32)
+{
+ if (put_user(p64->type, &p32->type) ||
+ copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) ||
+ put_user(p64->pending, &p32->pending) ||
+ put_user(p64->sequence, &p32->sequence) ||
+ put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) ||
+ put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) ||
+ put_user(p64->id, &p32->id) ||
+ copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
+ return -EFAULT;
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+struct v4l2_event32_time32 {
+ __u32 type;
+ union {
+ compat_s64 value64;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct old_timespec32 timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+static int put_v4l2_event32_time32(struct v4l2_event *p64,
+ struct v4l2_event32_time32 __user *p32)
+{
+ if (put_user(p64->type, &p32->type) ||
+ copy_to_user(&p32->u, &p64->u, sizeof(p64->u)) ||
+ put_user(p64->pending, &p32->pending) ||
+ put_user(p64->sequence, &p32->sequence) ||
+ put_user(p64->timestamp.tv_sec, &p32->timestamp.tv_sec) ||
+ put_user(p64->timestamp.tv_nsec, &p32->timestamp.tv_nsec) ||
+ put_user(p64->id, &p32->id) ||
+ copy_to_user(p32->reserved, p64->reserved, sizeof(p32->reserved)))
+ return -EFAULT;
+ return 0;
+}
+#endif
+
+struct v4l2_edid32 {
+ __u32 pad;
+ __u32 start_block;
+ __u32 blocks;
+ __u32 reserved[5];
+ compat_caddr_t edid;
+};
+
+static int get_v4l2_edid32(struct v4l2_edid *p64,
+ struct v4l2_edid32 __user *p32)
+{
+ compat_uptr_t edid;
+
+ if (copy_from_user(p64, p32, offsetof(struct v4l2_edid32, edid)) ||
+ get_user(edid, &p32->edid))
+ return -EFAULT;
+
+ p64->edid = (void __force *)compat_ptr(edid);
+ return 0;
+}
+
+static int put_v4l2_edid32(struct v4l2_edid *p64,
+ struct v4l2_edid32 __user *p32)
+{
+ if (copy_to_user(p32, p64, offsetof(struct v4l2_edid32, edid)))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * List of ioctls that require 32-bits/64-bits conversion
+ *
+ * The V4L2 ioctls that aren't listed there don't have pointer arguments
+ * and the struct size is identical for both 32 and 64 bits versions, so
+ * they don't need translations.
+ */
+
+#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
+#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
+#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32)
+#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32)
+#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32)
+#define VIDIOC_QBUF32 _IOWR('V', 15, struct v4l2_buffer32)
+#define VIDIOC_DQBUF32 _IOWR('V', 17, struct v4l2_buffer32)
+#define VIDIOC_ENUMSTD32 _IOWR('V', 25, struct v4l2_standard32)
+#define VIDIOC_ENUMINPUT32 _IOWR('V', 26, struct v4l2_input32)
+#define VIDIOC_G_EDID32 _IOWR('V', 40, struct v4l2_edid32)
+#define VIDIOC_S_EDID32 _IOWR('V', 41, struct v4l2_edid32)
+#define VIDIOC_TRY_FMT32 _IOWR('V', 64, struct v4l2_format32)
+#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
+#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
+#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
+#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32)
+#define VIDIOC_CREATE_BUFS32 _IOWR('V', 92, struct v4l2_create_buffers32)
+#define VIDIOC_PREPARE_BUF32 _IOWR('V', 93, struct v4l2_buffer32)
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32)
+#define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32)
+#define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32)
+#define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32)
+#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32)
+#endif
+
+unsigned int v4l2_compat_translate_cmd(unsigned int cmd)
+{
+ switch (cmd) {
+ case VIDIOC_G_FMT32:
+ return VIDIOC_G_FMT;
+ case VIDIOC_S_FMT32:
+ return VIDIOC_S_FMT;
+ case VIDIOC_TRY_FMT32:
+ return VIDIOC_TRY_FMT;
+ case VIDIOC_G_FBUF32:
+ return VIDIOC_G_FBUF;
+ case VIDIOC_S_FBUF32:
+ return VIDIOC_S_FBUF;
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ return VIDIOC_QUERYBUF;
+ case VIDIOC_QBUF32_TIME32:
+ return VIDIOC_QBUF;
+ case VIDIOC_DQBUF32_TIME32:
+ return VIDIOC_DQBUF;
+ case VIDIOC_PREPARE_BUF32_TIME32:
+ return VIDIOC_PREPARE_BUF;
+#endif
+ case VIDIOC_QUERYBUF32:
+ return VIDIOC_QUERYBUF;
+ case VIDIOC_QBUF32:
+ return VIDIOC_QBUF;
+ case VIDIOC_DQBUF32:
+ return VIDIOC_DQBUF;
+ case VIDIOC_CREATE_BUFS32:
+ return VIDIOC_CREATE_BUFS;
+ case VIDIOC_G_EXT_CTRLS32:
+ return VIDIOC_G_EXT_CTRLS;
+ case VIDIOC_S_EXT_CTRLS32:
+ return VIDIOC_S_EXT_CTRLS;
+ case VIDIOC_TRY_EXT_CTRLS32:
+ return VIDIOC_TRY_EXT_CTRLS;
+ case VIDIOC_PREPARE_BUF32:
+ return VIDIOC_PREPARE_BUF;
+ case VIDIOC_ENUMSTD32:
+ return VIDIOC_ENUMSTD;
+ case VIDIOC_ENUMINPUT32:
+ return VIDIOC_ENUMINPUT;
+ case VIDIOC_G_EDID32:
+ return VIDIOC_G_EDID;
+ case VIDIOC_S_EDID32:
+ return VIDIOC_S_EDID;
+#ifdef CONFIG_X86_64
+ case VIDIOC_DQEVENT32:
+ return VIDIOC_DQEVENT;
+#endif
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_DQEVENT32_TIME32:
+ return VIDIOC_DQEVENT;
+#endif
+ }
+ return cmd;
+}
+
+int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd)
+{
+ switch (cmd) {
+ case VIDIOC_G_FMT32:
+ case VIDIOC_S_FMT32:
+ case VIDIOC_TRY_FMT32:
+ return get_v4l2_format32(parg, arg);
+
+ case VIDIOC_S_FBUF32:
+ return get_v4l2_framebuffer32(parg, arg);
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+ return get_v4l2_buffer32_time32(parg, arg);
+#endif
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
+ case VIDIOC_PREPARE_BUF32:
+ return get_v4l2_buffer32(parg, arg);
+
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32:
+ return get_v4l2_ext_controls32(parg, arg);
+
+ case VIDIOC_CREATE_BUFS32:
+ return get_v4l2_create32(parg, arg);
+
+ case VIDIOC_ENUMSTD32:
+ return get_v4l2_standard32(parg, arg);
+
+ case VIDIOC_ENUMINPUT32:
+ return get_v4l2_input32(parg, arg);
+
+ case VIDIOC_G_EDID32:
+ case VIDIOC_S_EDID32:
+ return get_v4l2_edid32(parg, arg);
+ }
+ return 0;
+}
+
+int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd)
+{
+ switch (cmd) {
+ case VIDIOC_G_FMT32:
+ case VIDIOC_S_FMT32:
+ case VIDIOC_TRY_FMT32:
+ return put_v4l2_format32(parg, arg);
+
+ case VIDIOC_G_FBUF32:
+ return put_v4l2_framebuffer32(parg, arg);
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+ return put_v4l2_buffer32_time32(parg, arg);
+#endif
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
+ case VIDIOC_PREPARE_BUF32:
+ return put_v4l2_buffer32(parg, arg);
+
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32:
+ return put_v4l2_ext_controls32(parg, arg);
+
+ case VIDIOC_CREATE_BUFS32:
+ return put_v4l2_create32(parg, arg);
+
+ case VIDIOC_ENUMSTD32:
+ return put_v4l2_standard32(parg, arg);
+
+ case VIDIOC_ENUMINPUT32:
+ return put_v4l2_input32(parg, arg);
+
+ case VIDIOC_G_EDID32:
+ case VIDIOC_S_EDID32:
+ return put_v4l2_edid32(parg, arg);
+#ifdef CONFIG_X86_64
+ case VIDIOC_DQEVENT32:
+ return put_v4l2_event32(parg, arg);
+#endif
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_DQEVENT32_TIME32:
+ return put_v4l2_event32_time32(parg, arg);
+#endif
+ }
+ return 0;
+}
+
+int v4l2_compat_get_array_args(struct file *file, void *mbuf,
+ void __user *user_ptr, size_t array_size,
+ unsigned int cmd, void *arg)
+{
+ int err = 0;
+
+ memset(mbuf, 0, array_size);
+
+ switch (cmd) {
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+#endif
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
+ case VIDIOC_PREPARE_BUF32: {
+ struct v4l2_buffer *b64 = arg;
+ struct v4l2_plane *p64 = mbuf;
+ struct v4l2_plane32 __user *p32 = user_ptr;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) {
+ u32 num_planes = b64->length;
+
+ if (num_planes == 0)
+ return 0;
+
+ while (num_planes--) {
+ err = get_v4l2_plane32(p64, p32, b64->memory);
+ if (err)
+ return err;
+ ++p64;
+ ++p32;
+ }
+ }
+ break;
+ }
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32: {
+ struct v4l2_ext_controls *ecs64 = arg;
+ struct v4l2_ext_control *ec64 = mbuf;
+ struct v4l2_ext_control32 __user *ec32 = user_ptr;
+ int n;
+
+ for (n = 0; n < ecs64->count; n++) {
+ if (copy_from_user(ec64, ec32, sizeof(*ec32)))
+ return -EFAULT;
+
+ if (ctrl_is_pointer(file, ec64->id)) {
+ compat_uptr_t p;
+
+ if (get_user(p, &ec32->string))
+ return -EFAULT;
+ ec64->string = compat_ptr(p);
+ }
+ ec32++;
+ ec64++;
+ }
+ break;
+ }
+ default:
+ if (copy_from_user(mbuf, user_ptr, array_size))
+ err = -EFAULT;
+ break;
+ }
+
+ return err;
+}
+
+int v4l2_compat_put_array_args(struct file *file, void __user *user_ptr,
+ void *mbuf, size_t array_size,
+ unsigned int cmd, void *arg)
+{
+ int err = 0;
+
+ switch (cmd) {
+#ifdef CONFIG_COMPAT_32BIT_TIME
+ case VIDIOC_QUERYBUF32_TIME32:
+ case VIDIOC_QBUF32_TIME32:
+ case VIDIOC_DQBUF32_TIME32:
+ case VIDIOC_PREPARE_BUF32_TIME32:
+#endif
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
+ case VIDIOC_PREPARE_BUF32: {
+ struct v4l2_buffer *b64 = arg;
+ struct v4l2_plane *p64 = mbuf;
+ struct v4l2_plane32 __user *p32 = user_ptr;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b64->type)) {
+ u32 num_planes = b64->length;
+
+ if (num_planes == 0)
+ return 0;
+
+ while (num_planes--) {
+ err = put_v4l2_plane32(p64, p32, b64->memory);
+ if (err)
+ return err;
+ ++p64;
+ ++p32;
+ }
+ }
+ break;
+ }
+ case VIDIOC_G_EXT_CTRLS32:
+ case VIDIOC_S_EXT_CTRLS32:
+ case VIDIOC_TRY_EXT_CTRLS32: {
+ struct v4l2_ext_controls *ecs64 = arg;
+ struct v4l2_ext_control *ec64 = mbuf;
+ struct v4l2_ext_control32 __user *ec32 = user_ptr;
+ int n;
+
+ for (n = 0; n < ecs64->count; n++) {
+ unsigned int size = sizeof(*ec32);
+ /*
+ * Do not modify the pointer when copying a pointer
+ * control. The contents of the pointer was changed,
+ * not the pointer itself.
+ * The structures are otherwise compatible.
+ */
+ if (ctrl_is_pointer(file, ec64->id))
+ size -= sizeof(ec32->value64);
+
+ if (copy_to_user(ec32, ec64, size))
+ return -EFAULT;
+
+ ec32++;
+ ec64++;
+ }
+ break;
+ }
+ default:
+ if (copy_to_user(user_ptr, mbuf, array_size))
+ err = -EFAULT;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * v4l2_compat_ioctl32() - Handles a compat32 ioctl call
+ *
+ * @file: pointer to &struct file with the file handler
+ * @cmd: ioctl to be called
+ * @arg: arguments passed from/to the ioctl handler
+ *
+ * This function is meant to be used as .compat_ioctl fops at v4l2-dev.c
+ * in order to deal with 32-bit calls on a 64-bits Kernel.
+ *
+ * This function calls do_video_ioctl() for non-private V4L2 ioctls.
+ * If the function is a private one it calls vdev->fops->compat_ioctl32
+ * instead.
+ */
+long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ long ret = -ENOIOCTLCMD;
+
+ if (!file->f_op->unlocked_ioctl)
+ return ret;
+
+ if (!video_is_registered(vdev))
+ return -ENODEV;
+
+ if (_IOC_TYPE(cmd) == 'V' && _IOC_NR(cmd) < BASE_VIDIOC_PRIVATE)
+ ret = file->f_op->unlocked_ioctl(file, cmd,
+ (unsigned long)compat_ptr(arg));
+ else if (vdev->fops->compat_ioctl32)
+ ret = vdev->fops->compat_ioctl32(file, cmd, arg);
+
+ if (ret == -ENOIOCTLCMD)
+ pr_debug("compat_ioctl32: unknown ioctl '%c', dir=%d, #%d (0x%08x)\n",
+ _IOC_TYPE(cmd), _IOC_DIR(cmd), _IOC_NR(cmd), cmd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_compat_ioctl32);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
new file mode 100644
index 0000000000..002ea6588e
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -0,0 +1,1305 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework uAPI implementation:
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#define pr_fmt(fmt) "v4l2-ctrls: " fmt
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "v4l2-ctrls-priv.h"
+
+/* Internal temporary helper struct, one for each v4l2_ext_control */
+struct v4l2_ctrl_helper {
+ /* Pointer to the control reference of the master control */
+ struct v4l2_ctrl_ref *mref;
+ /* The control ref corresponding to the v4l2_ext_control ID field. */
+ struct v4l2_ctrl_ref *ref;
+ /*
+ * v4l2_ext_control index of the next control belonging to the
+ * same cluster, or 0 if there isn't any.
+ */
+ u32 next;
+};
+
+/*
+ * Helper functions to copy control payload data from kernel space to
+ * user space and vice versa.
+ */
+
+/* Helper function: copy the given control value back to the caller */
+static int ptr_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr)
+{
+ u32 len;
+
+ if (ctrl->is_ptr && !ctrl->is_string)
+ return copy_to_user(c->ptr, ptr.p_const, c->size) ?
+ -EFAULT : 0;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ len = strlen(ptr.p_char);
+ if (c->size < len + 1) {
+ c->size = ctrl->elem_size;
+ return -ENOSPC;
+ }
+ return copy_to_user(c->string, ptr.p_char, len + 1) ?
+ -EFAULT : 0;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ c->value64 = *ptr.p_s64;
+ break;
+ default:
+ c->value = *ptr.p_s32;
+ break;
+ }
+ return 0;
+}
+
+/* Helper function: copy the current control value back to the caller */
+static int cur_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ return ptr_to_user(c, ctrl, ctrl->p_cur);
+}
+
+/* Helper function: copy the new control value back to the caller */
+static int new_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl)
+{
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the request value back to the caller */
+static int req_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl_ref *ref)
+{
+ return ptr_to_user(c, ref->ctrl, ref->p_req);
+}
+
+/* Helper function: copy the initial control value back to the caller */
+static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ ctrl->type_ops->init(ctrl, 0, ctrl->p_new);
+
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the caller-provider value as the new control value */
+static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ int ret;
+ u32 size;
+
+ ctrl->is_new = 0;
+ if (ctrl->is_dyn_array &&
+ c->size > ctrl->p_array_alloc_elems * ctrl->elem_size) {
+ void *old = ctrl->p_array;
+ void *tmp = kvzalloc(2 * c->size, GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size);
+ memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
+ ctrl->p_new.p = tmp;
+ ctrl->p_cur.p = tmp + c->size;
+ ctrl->p_array = tmp;
+ ctrl->p_array_alloc_elems = c->size / ctrl->elem_size;
+ kvfree(old);
+ }
+
+ if (ctrl->is_ptr && !ctrl->is_string) {
+ unsigned int elems = c->size / ctrl->elem_size;
+
+ if (copy_from_user(ctrl->p_new.p, c->ptr, c->size))
+ return -EFAULT;
+ ctrl->is_new = 1;
+ if (ctrl->is_dyn_array)
+ ctrl->new_elems = elems;
+ else if (ctrl->is_array)
+ ctrl->type_ops->init(ctrl, elems, ctrl->p_new);
+ return 0;
+ }
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ *ctrl->p_new.p_s64 = c->value64;
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ size = c->size;
+ if (size == 0)
+ return -ERANGE;
+ if (size > ctrl->maximum + 1)
+ size = ctrl->maximum + 1;
+ ret = copy_from_user(ctrl->p_new.p_char, c->string, size) ? -EFAULT : 0;
+ if (!ret) {
+ char last = ctrl->p_new.p_char[size - 1];
+
+ ctrl->p_new.p_char[size - 1] = 0;
+ /*
+ * If the string was longer than ctrl->maximum,
+ * then return an error.
+ */
+ if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
+ return -ERANGE;
+ ctrl->is_new = 1;
+ }
+ return ret;
+ default:
+ *ctrl->p_new.p_s32 = c->value;
+ break;
+ }
+ ctrl->is_new = 1;
+ return 0;
+}
+
+/*
+ * VIDIOC_G/TRY/S_EXT_CTRLS implementation
+ */
+
+/*
+ * Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
+ *
+ * It is not a fully atomic operation, just best-effort only. After all, if
+ * multiple controls have to be set through multiple i2c writes (for example)
+ * then some initial writes may succeed while others fail. Thus leaving the
+ * system in an inconsistent state. The question is how much effort you are
+ * willing to spend on trying to make something atomic that really isn't.
+ *
+ * From the point of view of an application the main requirement is that
+ * when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an
+ * error should be returned without actually affecting any controls.
+ *
+ * If all the values are correct, then it is acceptable to just give up
+ * in case of low-level errors.
+ *
+ * It is important though that the application can tell when only a partial
+ * configuration was done. The way we do that is through the error_idx field
+ * of struct v4l2_ext_controls: if that is equal to the count field then no
+ * controls were affected. Otherwise all controls before that index were
+ * successful in performing their 'get' or 'set' operation, the control at
+ * the given index failed, and you don't know what happened with the controls
+ * after the failed one. Since if they were part of a control cluster they
+ * could have been successfully processed (if a cluster member was encountered
+ * at index < error_idx), they could have failed (if a cluster member was at
+ * error_idx), or they may not have been processed yet (if the first cluster
+ * member appeared after error_idx).
+ *
+ * It is all fairly theoretical, though. In practice all you can do is to
+ * bail out. If error_idx == count, then it is an application bug. If
+ * error_idx < count then it is only an application bug if the error code was
+ * EBUSY. That usually means that something started streaming just when you
+ * tried to set the controls. In all other cases it is a driver/hardware
+ * problem and all you can do is to retry or bail out.
+ *
+ * Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that
+ * never modifies controls the error_idx is just set to whatever control
+ * has an invalid value.
+ */
+
+/*
+ * Prepare for the extended g/s/try functions.
+ * Find the controls in the control array and do some basic checks.
+ */
+static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers,
+ struct video_device *vdev,
+ bool get)
+{
+ struct v4l2_ctrl_helper *h;
+ bool have_clusters = false;
+ u32 i;
+
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ext_control *c = &cs->controls[i];
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
+ u32 id = c->id & V4L2_CTRL_ID_MASK;
+
+ cs->error_idx = i;
+
+ if (cs->which &&
+ cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
+ cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
+ V4L2_CTRL_ID2WHICH(id) != cs->which) {
+ dprintk(vdev,
+ "invalid which 0x%x or control id 0x%x\n",
+ cs->which, id);
+ return -EINVAL;
+ }
+
+ /*
+ * Old-style private controls are not allowed for
+ * extended controls.
+ */
+ if (id >= V4L2_CID_PRIVATE_BASE) {
+ dprintk(vdev,
+ "old-style private controls not allowed\n");
+ return -EINVAL;
+ }
+ ref = find_ref_lock(hdl, id);
+ if (!ref) {
+ dprintk(vdev, "cannot find control id 0x%x\n", id);
+ return -EINVAL;
+ }
+ h->ref = ref;
+ ctrl = ref->ctrl;
+ if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) {
+ dprintk(vdev, "control id 0x%x is disabled\n", id);
+ return -EINVAL;
+ }
+
+ if (ctrl->cluster[0]->ncontrols > 1)
+ have_clusters = true;
+ if (ctrl->cluster[0] != ctrl)
+ ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
+ if (ctrl->is_dyn_array) {
+ unsigned int max_size = ctrl->dims[0] * ctrl->elem_size;
+ unsigned int tot_size = ctrl->elem_size;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ tot_size *= ref->p_req_elems;
+ else
+ tot_size *= ctrl->elems;
+
+ c->size = ctrl->elem_size * (c->size / ctrl->elem_size);
+ if (get) {
+ if (c->size < tot_size) {
+ c->size = tot_size;
+ return -ENOSPC;
+ }
+ c->size = tot_size;
+ } else {
+ if (c->size > max_size) {
+ c->size = max_size;
+ return -ENOSPC;
+ }
+ if (!c->size)
+ return -EFAULT;
+ }
+ } else if (ctrl->is_ptr && !ctrl->is_string) {
+ unsigned int tot_size = ctrl->elems * ctrl->elem_size;
+
+ if (c->size < tot_size) {
+ /*
+ * In the get case the application first
+ * queries to obtain the size of the control.
+ */
+ if (get) {
+ c->size = tot_size;
+ return -ENOSPC;
+ }
+ dprintk(vdev,
+ "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n",
+ id, c->size, tot_size);
+ return -EFAULT;
+ }
+ c->size = tot_size;
+ }
+ /* Store the ref to the master control of the cluster */
+ h->mref = ref;
+ /*
+ * Initially set next to 0, meaning that there is no other
+ * control in this helper array belonging to the same
+ * cluster.
+ */
+ h->next = 0;
+ }
+
+ /*
+ * We are done if there were no controls that belong to a multi-
+ * control cluster.
+ */
+ if (!have_clusters)
+ return 0;
+
+ /*
+ * The code below figures out in O(n) time which controls in the list
+ * belong to the same cluster.
+ */
+
+ /* This has to be done with the handler lock taken. */
+ mutex_lock(hdl->lock);
+
+ /* First zero the helper field in the master control references */
+ for (i = 0; i < cs->count; i++)
+ helpers[i].mref->helper = NULL;
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ctrl_ref *mref = h->mref;
+
+ /*
+ * If the mref->helper is set, then it points to an earlier
+ * helper that belongs to the same cluster.
+ */
+ if (mref->helper) {
+ /*
+ * Set the next field of mref->helper to the current
+ * index: this means that the earlier helper now
+ * points to the next helper in the same cluster.
+ */
+ mref->helper->next = i;
+ /*
+ * mref should be set only for the first helper in the
+ * cluster, clear the others.
+ */
+ h->mref = NULL;
+ }
+ /* Point the mref helper to the current helper struct. */
+ mref->helper = h;
+ }
+ mutex_unlock(hdl->lock);
+ return 0;
+}
+
+/*
+ * Handles the corner case where cs->count == 0. It checks whether the
+ * specified control class exists. If that class ID is 0, then it checks
+ * whether there are any controls at all.
+ */
+static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
+{
+ if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL ||
+ which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ return 0;
+ return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
+}
+
+/*
+ * Get extended controls. Allocates the helpers array if needed.
+ *
+ * Note that v4l2_g_ext_ctrls_common() with 'which' set to
+ * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
+ * completed, and in that case p_req_valid is true for all controls.
+ */
+int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev)
+{
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ int ret;
+ int i, j;
+ bool is_default, is_request;
+
+ is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
+ is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
+
+ cs->error_idx = cs->count;
+ cs->which = V4L2_CTRL_ID2WHICH(cs->which);
+
+ if (!hdl)
+ return -EINVAL;
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->which);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
+ if (!helpers)
+ return -ENOMEM;
+ }
+
+ ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true);
+ cs->error_idx = cs->count;
+
+ for (i = 0; !ret && i < cs->count; i++)
+ if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ ret = -EACCES;
+
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *master;
+ bool is_volatile = false;
+ u32 idx = i;
+
+ if (!helpers[i].mref)
+ continue;
+
+ master = helpers[i].mref->ctrl;
+ cs->error_idx = i;
+
+ v4l2_ctrl_lock(master);
+
+ /*
+ * g_volatile_ctrl will update the new control values.
+ * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
+ * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
+ * it is v4l2_ctrl_request_complete() that copies the
+ * volatile controls at the time of request completion
+ * to the request, so you don't want to do that again.
+ */
+ if (!is_default && !is_request &&
+ ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
+ (master->has_volatiles && !is_cur_manual(master)))) {
+ for (j = 0; j < master->ncontrols; j++)
+ cur_to_new(master->cluster[j]);
+ ret = call_op(master, g_volatile_ctrl);
+ is_volatile = true;
+ }
+
+ if (ret) {
+ v4l2_ctrl_unlock(master);
+ break;
+ }
+
+ /*
+ * Copy the default value (if is_default is true), the
+ * request value (if is_request is true and p_req is valid),
+ * the new volatile value (if is_volatile is true) or the
+ * current value.
+ */
+ do {
+ struct v4l2_ctrl_ref *ref = helpers[idx].ref;
+
+ if (is_default)
+ ret = def_to_user(cs->controls + idx, ref->ctrl);
+ else if (is_request && ref->p_req_array_enomem)
+ ret = -ENOMEM;
+ else if (is_request && ref->p_req_valid)
+ ret = req_to_user(cs->controls + idx, ref);
+ else if (is_volatile)
+ ret = new_to_user(cs->controls + idx, ref->ctrl);
+ else
+ ret = cur_to_user(cs->controls + idx, ref->ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+
+ v4l2_ctrl_unlock(master);
+ }
+
+ if (cs->count > ARRAY_SIZE(helper))
+ kvfree(helpers);
+ return ret;
+}
+
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
+{
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ return v4l2_g_ext_ctrls_request(hdl, vdev, mdev, cs);
+
+ return v4l2_g_ext_ctrls_common(hdl, cs, vdev);
+}
+EXPORT_SYMBOL(v4l2_g_ext_ctrls);
+
+/* Validate a new control */
+static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
+{
+ return ctrl->type_ops->validate(ctrl, p_new);
+}
+
+/* Validate controls. */
+static int validate_ctrls(struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers,
+ struct video_device *vdev,
+ bool set)
+{
+ unsigned int i;
+ int ret = 0;
+
+ cs->error_idx = cs->count;
+ for (i = 0; i < cs->count; i++) {
+ struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl;
+ union v4l2_ctrl_ptr p_new;
+
+ cs->error_idx = i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) {
+ dprintk(vdev,
+ "control id 0x%x is read-only\n",
+ ctrl->id);
+ return -EACCES;
+ }
+ /*
+ * This test is also done in try_set_control_cluster() which
+ * is called in atomic context, so that has the final say,
+ * but it makes sense to do an up-front check as well. Once
+ * an error occurs in try_set_control_cluster() some other
+ * controls may have been set already and we want to do a
+ * best-effort to avoid that.
+ */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) {
+ dprintk(vdev,
+ "control id 0x%x is grabbed, cannot set\n",
+ ctrl->id);
+ return -EBUSY;
+ }
+ /*
+ * Skip validation for now if the payload needs to be copied
+ * from userspace into kernelspace. We'll validate those later.
+ */
+ if (ctrl->is_ptr)
+ continue;
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ p_new.p_s64 = &cs->controls[i].value64;
+ else
+ p_new.p_s32 = &cs->controls[i].value;
+ ret = validate_new(ctrl, p_new);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Try or try-and-set controls */
+int try_set_ext_ctrls_common(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev, bool set)
+{
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ unsigned int i, j;
+ int ret;
+
+ cs->error_idx = cs->count;
+
+ /* Default value cannot be changed */
+ if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) {
+ dprintk(vdev, "%s: cannot change default value\n",
+ video_device_node_name(vdev));
+ return -EINVAL;
+ }
+
+ cs->which = V4L2_CTRL_ID2WHICH(cs->which);
+
+ if (!hdl) {
+ dprintk(vdev, "%s: invalid null control handler\n",
+ video_device_node_name(vdev));
+ return -EINVAL;
+ }
+
+ if (cs->count == 0)
+ return class_check(hdl, cs->which);
+
+ if (cs->count > ARRAY_SIZE(helper)) {
+ helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
+ if (!helpers)
+ return -ENOMEM;
+ }
+ ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false);
+ if (!ret)
+ ret = validate_ctrls(cs, helpers, vdev, set);
+ if (ret && set)
+ cs->error_idx = cs->count;
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *master;
+ u32 idx = i;
+
+ if (!helpers[i].mref)
+ continue;
+
+ cs->error_idx = i;
+ master = helpers[i].mref->ctrl;
+ v4l2_ctrl_lock(master);
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (j = 0; j < master->ncontrols; j++)
+ if (master->cluster[j])
+ master->cluster[j]->is_new = 0;
+
+ /*
+ * For volatile autoclusters that are currently in auto mode
+ * we need to discover if it will be set to manual mode.
+ * If so, then we have to copy the current volatile values
+ * first since those will become the new manual values (which
+ * may be overwritten by explicit new values from this set
+ * of controls).
+ */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ /* Pick an initial non-manual value */
+ s32 new_auto_val = master->manual_mode_value + 1;
+ u32 tmp_idx = idx;
+
+ do {
+ /*
+ * Check if the auto control is part of the
+ * list, and remember the new value.
+ */
+ if (helpers[tmp_idx].ref->ctrl == master)
+ new_auto_val = cs->controls[tmp_idx].value;
+ tmp_idx = helpers[tmp_idx].next;
+ } while (tmp_idx);
+ /*
+ * If the new value == the manual value, then copy
+ * the current volatile values.
+ */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ /*
+ * Copy the new caller-supplied control values.
+ * user_to_new() sets 'is_new' to 1.
+ */
+ do {
+ struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl;
+
+ ret = user_to_new(cs->controls + idx, ctrl);
+ if (!ret && ctrl->is_ptr) {
+ ret = validate_new(ctrl, ctrl->p_new);
+ if (ret)
+ dprintk(vdev,
+ "failed to validate control %s (%d)\n",
+ v4l2_ctrl_get_name(ctrl->id), ret);
+ }
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+
+ if (!ret)
+ ret = try_or_set_cluster(fh, master,
+ !hdl->req_obj.req && set, 0);
+ if (!ret && hdl->req_obj.req && set) {
+ for (j = 0; j < master->ncontrols; j++) {
+ struct v4l2_ctrl_ref *ref =
+ find_ref(hdl, master->cluster[j]->id);
+
+ new_to_req(ref);
+ }
+ }
+
+ /* Copy the new values back to userspace. */
+ if (!ret) {
+ idx = i;
+ do {
+ ret = new_to_user(cs->controls + idx,
+ helpers[idx].ref->ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+ }
+ v4l2_ctrl_unlock(master);
+ }
+
+ if (cs->count > ARRAY_SIZE(helper))
+ kvfree(helpers);
+ return ret;
+}
+
+static int try_set_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set)
+{
+ int ret;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ return try_set_ext_ctrls_request(fh, hdl, vdev, mdev, cs, set);
+
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
+ if (ret)
+ dprintk(vdev,
+ "%s: try_set_ext_ctrls_common failed (%d)\n",
+ video_device_node_name(vdev), ret);
+
+ return ret;
+}
+
+int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false);
+}
+EXPORT_SYMBOL(v4l2_try_ext_ctrls);
+
+int v4l2_s_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true);
+}
+EXPORT_SYMBOL(v4l2_s_ext_ctrls);
+
+/*
+ * VIDIOC_G/S_CTRL implementation
+ */
+
+/* Helper function to get a single control */
+static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret = 0;
+ int i;
+
+ /* Compound controls are not supported. The new_to_user() and
+ * cur_to_user() calls below would need to be modified not to access
+ * userspace memory when called from get_ctrl().
+ */
+ if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64)
+ return -EINVAL;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ return -EACCES;
+
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ ret = call_op(master, g_volatile_ctrl);
+ new_to_user(c, ctrl);
+ } else {
+ cur_to_user(c, ctrl);
+ }
+ v4l2_ctrl_unlock(master);
+ return ret;
+}
+
+int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+ struct v4l2_ext_control c;
+ int ret;
+
+ if (!ctrl || !ctrl->is_int)
+ return -EINVAL;
+ ret = get_ctrl(ctrl, &c);
+ control->value = c.value;
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_g_ctrl);
+
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
+{
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int ret;
+ int i;
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (i = 0; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->is_new = 0;
+
+ ret = validate_new(ctrl, ctrl->p_new);
+ if (ret)
+ return ret;
+
+ /*
+ * For autoclusters with volatiles that are switched from auto to
+ * manual mode we have to update the current volatile values since
+ * those will become the initial manual values after such a switch.
+ */
+ if (master->is_auto && master->has_volatiles && ctrl == master &&
+ !is_cur_manual(master) && ctrl->val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+
+ ctrl->is_new = 1;
+ return try_or_set_cluster(fh, master, true, ch_flags);
+}
+
+/* Helper function for VIDIOC_S_CTRL compatibility */
+static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ struct v4l2_ext_control *c)
+{
+ int ret;
+
+ v4l2_ctrl_lock(ctrl);
+ user_to_new(c, ctrl);
+ ret = set_ctrl(fh, ctrl, 0);
+ if (!ret)
+ cur_to_user(c, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ return ret;
+}
+
+int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_control *control)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
+ struct v4l2_ext_control c = { control->id };
+ int ret;
+
+ if (!ctrl || !ctrl->is_int)
+ return -EINVAL;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+
+ c.value = control->value;
+ ret = set_ctrl_lock(fh, ctrl, &c);
+ control->value = c.value;
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_s_ctrl);
+
+/*
+ * Helper functions for drivers to get/set controls.
+ */
+
+s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ext_control c;
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(!ctrl->is_int))
+ return 0;
+ c.value = 0;
+ get_ctrl(ctrl, &c);
+ return c.value;
+}
+EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
+
+s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ext_control c;
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
+ return 0;
+ c.value64 = 0;
+ get_ctrl(ctrl, &c);
+ return c.value64;
+}
+EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64);
+
+int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(!ctrl->is_int))
+ return -EINVAL;
+ ctrl->val = val;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl);
+
+int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64))
+ return -EINVAL;
+ *ctrl->p_new.p_s64 = val;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64);
+
+int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING))
+ return -EINVAL;
+ strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1);
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+
+int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl,
+ enum v4l2_ctrl_type type, const void *p)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ if (WARN_ON(ctrl->type != type))
+ return -EINVAL;
+ /* Setting dynamic arrays is not (yet?) supported. */
+ if (WARN_ON(ctrl->is_dyn_array))
+ return -EINVAL;
+ memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size);
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound);
+
+/*
+ * Modify the range of a control.
+ */
+int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
+ s64 min, s64 max, u64 step, s64 def)
+{
+ bool value_changed;
+ bool range_changed = false;
+ int ret;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER64:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
+ if (ctrl->is_array)
+ return -EINVAL;
+ ret = check_range(ctrl->type, min, max, step, def);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ctrl->minimum != min || ctrl->maximum != max ||
+ ctrl->step != step || ctrl->default_value != def) {
+ range_changed = true;
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ }
+ cur_to_new(ctrl);
+ if (validate_new(ctrl, ctrl->p_new)) {
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ *ctrl->p_new.p_s64 = def;
+ else
+ *ctrl->p_new.p_s32 = def;
+ }
+
+ if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64)
+ value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64;
+ else
+ value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32;
+ if (value_changed)
+ ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ else if (range_changed)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE);
+ return ret;
+}
+EXPORT_SYMBOL(__v4l2_ctrl_modify_range);
+
+int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
+ u32 dims[V4L2_CTRL_MAX_DIMS])
+{
+ unsigned int elems = 1;
+ unsigned int i;
+ void *p_array;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ if (!ctrl->is_array || ctrl->is_dyn_array)
+ return -EINVAL;
+
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ elems *= dims[i];
+ if (elems == 0)
+ return -EINVAL;
+ p_array = kvzalloc(2 * elems * ctrl->elem_size, GFP_KERNEL);
+ if (!p_array)
+ return -ENOMEM;
+ kvfree(ctrl->p_array);
+ ctrl->p_array_alloc_elems = elems;
+ ctrl->elems = elems;
+ ctrl->new_elems = elems;
+ ctrl->p_array = p_array;
+ ctrl->p_new.p = p_array;
+ ctrl->p_cur.p = p_array + elems * ctrl->elem_size;
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ ctrl->dims[i] = dims[i];
+ ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
+ cur_to_new(ctrl);
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE |
+ V4L2_EVENT_CTRL_CH_DIMENSIONS);
+ return 0;
+}
+EXPORT_SYMBOL(__v4l2_ctrl_modify_dimensions);
+
+/* Implement VIDIOC_QUERY_EXT_CTRL */
+int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc)
+{
+ const unsigned int next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND;
+ u32 id = qc->id & V4L2_CTRL_ID_MASK;
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl *ctrl;
+
+ if (!hdl)
+ return -EINVAL;
+
+ mutex_lock(hdl->lock);
+
+ /* Try to find it */
+ ref = find_ref(hdl, id);
+
+ if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) {
+ bool is_compound;
+ /* Match any control that is not hidden */
+ unsigned int mask = 1;
+ bool match = false;
+
+ if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) {
+ /* Match any hidden control */
+ match = true;
+ } else if ((qc->id & next_flags) == next_flags) {
+ /* Match any control, compound or not */
+ mask = 0;
+ }
+
+ /* Find the next control with ID > qc->id */
+
+ /* Did we reach the end of the control list? */
+ if (id >= node2id(hdl->ctrl_refs.prev)) {
+ ref = NULL; /* Yes, so there is no next control */
+ } else if (ref) {
+ /*
+ * We found a control with the given ID, so just get
+ * the next valid one in the list.
+ */
+ list_for_each_entry_continue(ref, &hdl->ctrl_refs, node) {
+ is_compound = ref->ctrl->is_array ||
+ ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < ref->ctrl->id &&
+ (is_compound & mask) == match)
+ break;
+ }
+ if (&ref->node == &hdl->ctrl_refs)
+ ref = NULL;
+ } else {
+ /*
+ * No control with the given ID exists, so start
+ * searching for the next largest ID. We know there
+ * is one, otherwise the first 'if' above would have
+ * been true.
+ */
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ is_compound = ref->ctrl->is_array ||
+ ref->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES;
+ if (id < ref->ctrl->id &&
+ (is_compound & mask) == match)
+ break;
+ }
+ if (&ref->node == &hdl->ctrl_refs)
+ ref = NULL;
+ }
+ }
+ mutex_unlock(hdl->lock);
+
+ if (!ref)
+ return -EINVAL;
+
+ ctrl = ref->ctrl;
+ memset(qc, 0, sizeof(*qc));
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ qc->id = id;
+ else
+ qc->id = ctrl->id;
+ strscpy(qc->name, ctrl->name, sizeof(qc->name));
+ qc->flags = user_flags(ctrl);
+ qc->type = ctrl->type;
+ qc->elem_size = ctrl->elem_size;
+ qc->elems = ctrl->elems;
+ qc->nr_of_dims = ctrl->nr_of_dims;
+ memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0]));
+ qc->minimum = ctrl->minimum;
+ qc->maximum = ctrl->maximum;
+ qc->default_value = ctrl->default_value;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU ||
+ ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qc->step = 1;
+ else
+ qc->step = ctrl->step;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_query_ext_ctrl);
+
+/* Implement VIDIOC_QUERYCTRL */
+int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
+{
+ struct v4l2_query_ext_ctrl qec = { qc->id };
+ int rc;
+
+ rc = v4l2_query_ext_ctrl(hdl, &qec);
+ if (rc)
+ return rc;
+
+ qc->id = qec.id;
+ qc->type = qec.type;
+ qc->flags = qec.flags;
+ strscpy(qc->name, qec.name, sizeof(qc->name));
+ switch (qc->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_STRING:
+ case V4L2_CTRL_TYPE_BITMASK:
+ qc->minimum = qec.minimum;
+ qc->maximum = qec.maximum;
+ qc->step = qec.step;
+ qc->default_value = qec.default_value;
+ break;
+ default:
+ qc->minimum = 0;
+ qc->maximum = 0;
+ qc->step = 0;
+ qc->default_value = 0;
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_queryctrl);
+
+/* Implement VIDIOC_QUERYMENU */
+int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
+{
+ struct v4l2_ctrl *ctrl;
+ u32 i = qm->index;
+
+ ctrl = v4l2_ctrl_find(hdl, qm->id);
+ if (!ctrl)
+ return -EINVAL;
+
+ qm->reserved = 0;
+ /* Sanity checks */
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_MENU:
+ if (!ctrl->qmenu)
+ return -EINVAL;
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (!ctrl->qmenu_int)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (i < ctrl->minimum || i > ctrl->maximum)
+ return -EINVAL;
+
+ /* Use mask to see if this menu item should be skipped */
+ if (ctrl->menu_skip_mask & (1ULL << i))
+ return -EINVAL;
+ /* Empty menu items should also be skipped */
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
+ if (!ctrl->qmenu[i] || ctrl->qmenu[i][0] == '\0')
+ return -EINVAL;
+ strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name));
+ } else {
+ qm->value = ctrl->qmenu_int[i];
+ }
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_querymenu);
+
+/*
+ * VIDIOC_LOG_STATUS helpers
+ */
+
+int v4l2_ctrl_log_status(struct file *file, void *fh)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev)
+ v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
+ vfd->v4l2_dev->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_log_status);
+
+int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
+{
+ v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name);
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
+
+/*
+ * VIDIOC_(UN)SUBSCRIBE_EVENT implementation
+ */
+
+static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev,
+ unsigned int elems)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ if (!ctrl)
+ return -EINVAL;
+
+ v4l2_ctrl_lock(ctrl);
+ list_add_tail(&sev->node, &ctrl->ev_subs);
+ if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
+ (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL))
+ send_initial_event(sev->fh, ctrl);
+ v4l2_ctrl_unlock(ctrl);
+ return 0;
+}
+
+static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id);
+
+ if (!ctrl)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+ list_del(&sev->node);
+ v4l2_ctrl_unlock(ctrl);
+}
+
+void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.ctrl.changes;
+
+ old->u.ctrl = new->u.ctrl;
+ old->u.ctrl.changes |= old_changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_replace);
+
+void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new)
+{
+ new->u.ctrl.changes |= old->u.ctrl.changes;
+}
+EXPORT_SYMBOL(v4l2_ctrl_merge);
+
+const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = {
+ .add = v4l2_ctrl_add_event,
+ .del = v4l2_ctrl_del_event,
+ .replace = v4l2_ctrl_replace,
+ .merge = v4l2_ctrl_merge,
+};
+EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops);
+
+int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type == V4L2_EVENT_CTRL)
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(v4l2_ctrl_subscribe_event);
+
+int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (!sd->ctrl_handler)
+ return -EINVAL;
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
+
+/*
+ * poll helper
+ */
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ return EPOLLPRI;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_ctrl_poll);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
new file mode 100644
index 0000000000..a662fb60f7
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -0,0 +1,2591 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework core implementation.
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+
+#include "v4l2-ctrls-priv.h"
+
+static const union v4l2_ctrl_ptr ptr_null;
+
+static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl,
+ u32 changes)
+{
+ memset(ev, 0, sizeof(*ev));
+ ev->type = V4L2_EVENT_CTRL;
+ ev->id = ctrl->id;
+ ev->u.ctrl.changes = changes;
+ ev->u.ctrl.type = ctrl->type;
+ ev->u.ctrl.flags = user_flags(ctrl);
+ if (ctrl->is_ptr)
+ ev->u.ctrl.value64 = 0;
+ else
+ ev->u.ctrl.value64 = *ctrl->p_cur.p_s64;
+ ev->u.ctrl.minimum = ctrl->minimum;
+ ev->u.ctrl.maximum = ctrl->maximum;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU
+ || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ev->u.ctrl.step = 1;
+ else
+ ev->u.ctrl.step = ctrl->step;
+ ev->u.ctrl.default_value = ctrl->default_value;
+}
+
+void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_event ev;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+ fill_event(&ev, ctrl, changes);
+ v4l2_event_queue_fh(fh, &ev);
+}
+
+void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
+{
+ struct v4l2_event ev;
+ struct v4l2_subscribed_event *sev;
+
+ if (list_empty(&ctrl->ev_subs))
+ return;
+ fill_event(&ev, ctrl, changes);
+
+ list_for_each_entry(sev, &ctrl->ev_subs, node)
+ if (sev->fh != fh ||
+ (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))
+ v4l2_event_queue_fh(sev->fh, &ev);
+}
+
+bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2)
+{
+ unsigned int i;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_BUTTON:
+ return false;
+ case V4L2_CTRL_TYPE_STRING:
+ for (i = 0; i < ctrl->elems; i++) {
+ unsigned int idx = i * ctrl->elem_size;
+
+ /* strings are always 0-terminated */
+ if (strcmp(ptr1.p_char + idx, ptr2.p_char + idx))
+ return false;
+ }
+ return true;
+ default:
+ return !memcmp(ptr1.p_const, ptr2.p_const,
+ ctrl->elems * ctrl->elem_size);
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_equal);
+
+/* Default intra MPEG-2 quantisation coefficients, from the specification. */
+static const u8 mpeg2_intra_quant_matrix[64] = {
+ 8, 16, 16, 19, 16, 19, 22, 22,
+ 22, 22, 22, 22, 26, 24, 26, 27,
+ 27, 27, 26, 26, 26, 26, 27, 27,
+ 27, 29, 29, 29, 34, 34, 34, 29,
+ 29, 29, 27, 27, 29, 29, 32, 32,
+ 34, 34, 37, 38, 37, 35, 35, 34,
+ 35, 38, 38, 40, 40, 40, 48, 48,
+ 46, 46, 56, 56, 58, 69, 69, 83
+};
+
+static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
+ struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant;
+ struct v4l2_ctrl_vp8_frame *p_vp8_frame;
+ struct v4l2_ctrl_vp9_frame *p_vp9_frame;
+ struct v4l2_ctrl_fwht_params *p_fwht_params;
+ struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
+ struct v4l2_ctrl_av1_sequence *p_av1_sequence;
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ if (ctrl->p_def.p_const)
+ memcpy(p, ctrl->p_def.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ p_mpeg2_sequence = p;
+
+ /* 4:2:0 */
+ p_mpeg2_sequence->chroma_format = 1;
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ p_mpeg2_picture = p;
+
+ /* interlaced top field */
+ p_mpeg2_picture->picture_structure = V4L2_MPEG2_PIC_TOP_FIELD;
+ p_mpeg2_picture->picture_coding_type =
+ V4L2_MPEG2_PIC_CODING_TYPE_I;
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ p_mpeg2_quant = p;
+
+ memcpy(p_mpeg2_quant->intra_quantiser_matrix,
+ mpeg2_intra_quant_matrix,
+ ARRAY_SIZE(mpeg2_intra_quant_matrix));
+ /*
+ * The default non-intra MPEG-2 quantisation
+ * coefficients are all 16, as per the specification.
+ */
+ memset(p_mpeg2_quant->non_intra_quantiser_matrix, 16,
+ sizeof(p_mpeg2_quant->non_intra_quantiser_matrix));
+ break;
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ p_vp8_frame = p;
+ p_vp8_frame->num_dct_parts = 1;
+ break;
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ p_vp9_frame = p;
+ p_vp9_frame->profile = 0;
+ p_vp9_frame->bit_depth = 8;
+ p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
+ V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING;
+ break;
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ p_av1_sequence = p;
+ p_av1_sequence->bit_depth = 8;
+ break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ p_fwht_params = p;
+ p_fwht_params->version = V4L2_FWHT_VERSION;
+ p_fwht_params->width = 1280;
+ p_fwht_params->height = 720;
+ p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
+ (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
+ break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ p_h264_scaling_matrix = p;
+ /*
+ * The default (flat) H.264 scaling matrix when none are
+ * specified in the bitstream, this is according to formulas
+ * (7-8) and (7-9) of the specification.
+ */
+ memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix));
+ break;
+ }
+}
+
+void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ unsigned int i;
+ u32 tot_elems = ctrl->elems;
+ u32 elems = tot_elems - from_idx;
+
+ if (from_idx >= tot_elems)
+ return;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_STRING:
+ for (i = from_idx; i < tot_elems; i++) {
+ unsigned int offset = i * ctrl->elem_size;
+
+ memset(ptr.p_char + offset, ' ', ctrl->minimum);
+ ptr.p_char[offset + ctrl->minimum] = '\0';
+ }
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ if (ctrl->default_value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_s64[i] = ctrl->default_value;
+ } else {
+ memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64));
+ }
+ break;
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ if (ctrl->default_value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_s32[i] = ctrl->default_value;
+ } else {
+ memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
+ }
+ break;
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ memset(ptr.p_u8 + from_idx, ctrl->default_value, elems);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ if (ctrl->default_value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_u16[i] = ctrl->default_value;
+ } else {
+ memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16));
+ }
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ if (ctrl->default_value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_u32[i] = ctrl->default_value;
+ } else {
+ memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32));
+ }
+ break;
+ default:
+ for (i = from_idx; i < tot_elems; i++)
+ std_init_compound(ctrl, i, ptr);
+ break;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_init);
+
+void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
+{
+ union v4l2_ctrl_ptr ptr = ctrl->p_cur;
+
+ if (ctrl->is_array) {
+ unsigned i;
+
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ pr_cont("[%u]", ctrl->dims[i]);
+ pr_cont(" ");
+ }
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ pr_cont("%d", *ptr.p_s32);
+ break;
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ pr_cont("%s", *ptr.p_s32 ? "true" : "false");
+ break;
+ case V4L2_CTRL_TYPE_MENU:
+ pr_cont("%s", ctrl->qmenu[*ptr.p_s32]);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]);
+ break;
+ case V4L2_CTRL_TYPE_BITMASK:
+ pr_cont("0x%08x", *ptr.p_s32);
+ break;
+ case V4L2_CTRL_TYPE_INTEGER64:
+ pr_cont("%lld", *ptr.p_s64);
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ pr_cont("%s", ptr.p_char);
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ pr_cont("%u", (unsigned)*ptr.p_u8);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ pr_cont("%u", (unsigned)*ptr.p_u16);
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ pr_cont("%u", (unsigned)*ptr.p_u32);
+ break;
+ case V4L2_CTRL_TYPE_H264_SPS:
+ pr_cont("H264_SPS");
+ break;
+ case V4L2_CTRL_TYPE_H264_PPS:
+ pr_cont("H264_PPS");
+ break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ pr_cont("H264_SCALING_MATRIX");
+ break;
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ pr_cont("H264_SLICE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ pr_cont("H264_DECODE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ pr_cont("H264_PRED_WEIGHTS");
+ break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ pr_cont("FWHT_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ pr_cont("VP8_FRAME");
+ break;
+ case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
+ pr_cont("HDR10_CLL_INFO");
+ break;
+ case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
+ pr_cont("HDR10_MASTERING_DISPLAY");
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ pr_cont("MPEG2_QUANTISATION");
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ pr_cont("MPEG2_SEQUENCE");
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ pr_cont("MPEG2_PICTURE");
+ break;
+ case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
+ pr_cont("VP9_COMPRESSED_HDR");
+ break;
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ pr_cont("VP9_FRAME");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ pr_cont("HEVC_SPS");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ pr_cont("HEVC_PPS");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ pr_cont("HEVC_SLICE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ pr_cont("HEVC_SCALING_MATRIX");
+ break;
+ case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
+ pr_cont("HEVC_DECODE_PARAMS");
+ break;
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ pr_cont("AV1_SEQUENCE");
+ break;
+ case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
+ pr_cont("AV1_TILE_GROUP_ENTRY");
+ break;
+ case V4L2_CTRL_TYPE_AV1_FRAME:
+ pr_cont("AV1_FRAME");
+ break;
+ case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
+ pr_cont("AV1_FILM_GRAIN");
+ break;
+
+ default:
+ pr_cont("unknown type %d", ctrl->type);
+ break;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_log);
+
+/*
+ * Round towards the closest legal value. Be careful when we are
+ * close to the maximum range of the control type to prevent
+ * wrap-arounds.
+ */
+#define ROUND_TO_RANGE(val, offset_type, ctrl) \
+({ \
+ offset_type offset; \
+ if ((ctrl)->maximum >= 0 && \
+ val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \
+ val = (ctrl)->maximum; \
+ else \
+ val += (s32)((ctrl)->step / 2); \
+ val = clamp_t(typeof(val), val, \
+ (ctrl)->minimum, (ctrl)->maximum); \
+ offset = (val) - (ctrl)->minimum; \
+ offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \
+ val = (ctrl)->minimum + offset; \
+ 0; \
+})
+
+/* Validate a new control */
+
+#define zero_padding(s) \
+ memset(&(s).padding, 0, sizeof((s).padding))
+#define zero_reserved(s) \
+ memset(&(s).reserved, 0, sizeof((s).reserved))
+
+static int
+validate_vp9_lf_params(struct v4l2_vp9_loop_filter *lf)
+{
+ unsigned int i;
+
+ if (lf->flags & ~(V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED |
+ V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE))
+ return -EINVAL;
+
+ /* That all values are in the accepted range. */
+ if (lf->level > GENMASK(5, 0))
+ return -EINVAL;
+
+ if (lf->sharpness > GENMASK(2, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++)
+ if (lf->ref_deltas[i] < -63 || lf->ref_deltas[i] > 63)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++)
+ if (lf->mode_deltas[i] < -63 || lf->mode_deltas[i] > 63)
+ return -EINVAL;
+
+ zero_reserved(*lf);
+ return 0;
+}
+
+static int
+validate_vp9_quant_params(struct v4l2_vp9_quantization *quant)
+{
+ if (quant->delta_q_y_dc < -15 || quant->delta_q_y_dc > 15 ||
+ quant->delta_q_uv_dc < -15 || quant->delta_q_uv_dc > 15 ||
+ quant->delta_q_uv_ac < -15 || quant->delta_q_uv_ac > 15)
+ return -EINVAL;
+
+ zero_reserved(*quant);
+ return 0;
+}
+
+static int
+validate_vp9_seg_params(struct v4l2_vp9_segmentation *seg)
+{
+ unsigned int i, j;
+
+ if (seg->flags & ~(V4L2_VP9_SEGMENTATION_FLAG_ENABLED |
+ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP |
+ V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE |
+ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA |
+ V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(seg->feature_enabled); i++) {
+ if (seg->feature_enabled[i] &
+ ~V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(seg->feature_data); i++) {
+ static const int range[] = { 255, 63, 3, 0 };
+
+ for (j = 0; j < ARRAY_SIZE(seg->feature_data[j]); j++) {
+ if (seg->feature_data[i][j] < -range[j] ||
+ seg->feature_data[i][j] > range[j])
+ return -EINVAL;
+ }
+ }
+
+ zero_reserved(*seg);
+ return 0;
+}
+
+static int
+validate_vp9_compressed_hdr(struct v4l2_ctrl_vp9_compressed_hdr *hdr)
+{
+ if (hdr->tx_mode > V4L2_VP9_TX_MODE_SELECT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame)
+{
+ int ret;
+
+ /* Make sure we're not passed invalid flags. */
+ if (frame->flags & ~(V4L2_VP9_FRAME_FLAG_KEY_FRAME |
+ V4L2_VP9_FRAME_FLAG_SHOW_FRAME |
+ V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT |
+ V4L2_VP9_FRAME_FLAG_INTRA_ONLY |
+ V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV |
+ V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX |
+ V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE |
+ V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING |
+ V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING |
+ V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING))
+ return -EINVAL;
+
+ if (frame->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT &&
+ frame->flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX)
+ return -EINVAL;
+
+ if (frame->profile > V4L2_VP9_PROFILE_MAX)
+ return -EINVAL;
+
+ if (frame->reset_frame_context > V4L2_VP9_RESET_FRAME_CTX_ALL)
+ return -EINVAL;
+
+ if (frame->frame_context_idx >= V4L2_VP9_NUM_FRAME_CTX)
+ return -EINVAL;
+
+ /*
+ * Profiles 0 and 1 only support 8-bit depth, profiles 2 and 3 only 10
+ * and 12 bit depths.
+ */
+ if ((frame->profile < 2 && frame->bit_depth != 8) ||
+ (frame->profile >= 2 &&
+ (frame->bit_depth != 10 && frame->bit_depth != 12)))
+ return -EINVAL;
+
+ /* Profile 0 and 2 only accept YUV 4:2:0. */
+ if ((frame->profile == 0 || frame->profile == 2) &&
+ (!(frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) ||
+ !(frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING)))
+ return -EINVAL;
+
+ /* Profile 1 and 3 only accept YUV 4:2:2, 4:4:0 and 4:4:4. */
+ if ((frame->profile == 1 || frame->profile == 3) &&
+ ((frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) &&
+ (frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING)))
+ return -EINVAL;
+
+ if (frame->interpolation_filter > V4L2_VP9_INTERP_FILTER_SWITCHABLE)
+ return -EINVAL;
+
+ /*
+ * According to the spec, tile_cols_log2 shall be less than or equal
+ * to 6.
+ */
+ if (frame->tile_cols_log2 > 6)
+ return -EINVAL;
+
+ if (frame->reference_mode > V4L2_VP9_REFERENCE_MODE_SELECT)
+ return -EINVAL;
+
+ ret = validate_vp9_lf_params(&frame->lf);
+ if (ret)
+ return ret;
+
+ ret = validate_vp9_quant_params(&frame->quant);
+ if (ret)
+ return ret;
+
+ ret = validate_vp9_seg_params(&frame->seg);
+ if (ret)
+ return ret;
+
+ zero_reserved(*frame);
+ return 0;
+}
+
+static int validate_av1_quantization(struct v4l2_av1_quantization *q)
+{
+ if (q->flags > GENMASK(2, 0))
+ return -EINVAL;
+
+ if (q->delta_q_y_dc < -64 || q->delta_q_y_dc > 63 ||
+ q->delta_q_u_dc < -64 || q->delta_q_u_dc > 63 ||
+ q->delta_q_v_dc < -64 || q->delta_q_v_dc > 63 ||
+ q->delta_q_u_ac < -64 || q->delta_q_u_ac > 63 ||
+ q->delta_q_v_ac < -64 || q->delta_q_v_ac > 63 ||
+ q->delta_q_res > GENMASK(1, 0))
+ return -EINVAL;
+
+ if (q->qm_y > GENMASK(3, 0) ||
+ q->qm_u > GENMASK(3, 0) ||
+ q->qm_v > GENMASK(3, 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_segmentation(struct v4l2_av1_segmentation *s)
+{
+ u32 i;
+ u32 j;
+
+ if (s->flags > GENMASK(4, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(s->feature_data); i++) {
+ static const int segmentation_feature_signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 };
+ static const int segmentation_feature_max[] = { 255, 63, 63, 63, 63, 7, 0, 0};
+
+ for (j = 0; j < ARRAY_SIZE(s->feature_data[j]); j++) {
+ s32 limit = segmentation_feature_max[j];
+
+ if (segmentation_feature_signed[j]) {
+ if (s->feature_data[i][j] < -limit ||
+ s->feature_data[i][j] > limit)
+ return -EINVAL;
+ } else {
+ if (s->feature_data[i][j] < 0 || s->feature_data[i][j] > limit)
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int validate_av1_loop_filter(struct v4l2_av1_loop_filter *lf)
+{
+ u32 i;
+
+ if (lf->flags > GENMASK(3, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->level); i++) {
+ if (lf->level[i] > GENMASK(5, 0))
+ return -EINVAL;
+ }
+
+ if (lf->sharpness > GENMASK(2, 0))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) {
+ if (lf->ref_deltas[i] < -64 || lf->ref_deltas[i] > 63)
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) {
+ if (lf->mode_deltas[i] < -64 || lf->mode_deltas[i] > 63)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_av1_cdef(struct v4l2_av1_cdef *cdef)
+{
+ u32 i;
+
+ if (cdef->damping_minus_3 > GENMASK(1, 0) ||
+ cdef->bits > GENMASK(1, 0))
+ return -EINVAL;
+
+ for (i = 0; i < 1 << cdef->bits; i++) {
+ if (cdef->y_pri_strength[i] > GENMASK(3, 0) ||
+ cdef->y_sec_strength[i] > 4 ||
+ cdef->uv_pri_strength[i] > GENMASK(3, 0) ||
+ cdef->uv_sec_strength[i] > 4)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_av1_loop_restauration(struct v4l2_av1_loop_restoration *lr)
+{
+ if (lr->lr_unit_shift > 3 || lr->lr_uv_shift > 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_film_grain(struct v4l2_ctrl_av1_film_grain *fg)
+{
+ u32 i;
+
+ if (fg->flags > GENMASK(4, 0))
+ return -EINVAL;
+
+ if (fg->film_grain_params_ref_idx > GENMASK(2, 0) ||
+ fg->num_y_points > 14 ||
+ fg->num_cb_points > 10 ||
+ fg->num_cr_points > GENMASK(3, 0) ||
+ fg->grain_scaling_minus_8 > GENMASK(1, 0) ||
+ fg->ar_coeff_lag > GENMASK(1, 0) ||
+ fg->ar_coeff_shift_minus_6 > GENMASK(1, 0) ||
+ fg->grain_scale_shift > GENMASK(1, 0))
+ return -EINVAL;
+
+ if (!(fg->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN))
+ return 0;
+
+ for (i = 1; i < fg->num_y_points; i++)
+ if (fg->point_y_value[i] <= fg->point_y_value[i - 1])
+ return -EINVAL;
+
+ for (i = 1; i < fg->num_cb_points; i++)
+ if (fg->point_cb_value[i] <= fg->point_cb_value[i - 1])
+ return -EINVAL;
+
+ for (i = 1; i < fg->num_cr_points; i++)
+ if (fg->point_cr_value[i] <= fg->point_cr_value[i - 1])
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_frame(struct v4l2_ctrl_av1_frame *f)
+{
+ int ret = 0;
+
+ ret = validate_av1_quantization(&f->quantization);
+ if (ret)
+ return ret;
+ ret = validate_av1_segmentation(&f->segmentation);
+ if (ret)
+ return ret;
+ ret = validate_av1_loop_filter(&f->loop_filter);
+ if (ret)
+ return ret;
+ ret = validate_av1_cdef(&f->cdef);
+ if (ret)
+ return ret;
+ ret = validate_av1_loop_restauration(&f->loop_restoration);
+ if (ret)
+ return ret;
+
+ if (f->flags &
+ ~(V4L2_AV1_FRAME_FLAG_SHOW_FRAME |
+ V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME |
+ V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE |
+ V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE |
+ V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS |
+ V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV |
+ V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC |
+ V4L2_AV1_FRAME_FLAG_USE_SUPERRES |
+ V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV |
+ V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE |
+ V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS |
+ V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF |
+ V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION |
+ V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT |
+ V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET |
+ V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED |
+ V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT |
+ V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE |
+ V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT |
+ V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING))
+ return -EINVAL;
+
+ if (f->superres_denom > GENMASK(2, 0) + 9)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int validate_av1_sequence(struct v4l2_ctrl_av1_sequence *s)
+{
+ if (s->flags &
+ ~(V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE |
+ V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF |
+ V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION |
+ V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME |
+ V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE |
+ V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X |
+ V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y |
+ V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT |
+ V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q))
+ return -EINVAL;
+
+ if (s->seq_profile == 1 && s->flags & V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME)
+ return -EINVAL;
+
+ /* reserved */
+ if (s->seq_profile > 2)
+ return -EINVAL;
+
+ /* TODO: PROFILES */
+ return 0;
+}
+
+/*
+ * Compound controls validation requires setting unused fields/flags to zero
+ * in order to properly detect unchanged controls with v4l2_ctrl_type_op_equal's
+ * memcmp.
+ */
+static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
+ struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
+ struct v4l2_ctrl_vp8_frame *p_vp8_frame;
+ struct v4l2_ctrl_fwht_params *p_fwht_params;
+ struct v4l2_ctrl_h264_sps *p_h264_sps;
+ struct v4l2_ctrl_h264_pps *p_h264_pps;
+ struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights;
+ struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
+ struct v4l2_ctrl_h264_decode_params *p_h264_dec_params;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
+ struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params;
+ struct v4l2_area *area;
+ void *p = ptr.p + idx * ctrl->elem_size;
+ unsigned int i;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ p_mpeg2_sequence = p;
+
+ switch (p_mpeg2_sequence->chroma_format) {
+ case 1: /* 4:2:0 */
+ case 2: /* 4:2:2 */
+ case 3: /* 4:4:4 */
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ p_mpeg2_picture = p;
+
+ switch (p_mpeg2_picture->intra_dc_precision) {
+ case 0: /* 8 bits */
+ case 1: /* 9 bits */
+ case 2: /* 10 bits */
+ case 3: /* 11 bits */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_picture->picture_structure) {
+ case V4L2_MPEG2_PIC_TOP_FIELD:
+ case V4L2_MPEG2_PIC_BOTTOM_FIELD:
+ case V4L2_MPEG2_PIC_FRAME:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_picture->picture_coding_type) {
+ case V4L2_MPEG2_PIC_CODING_TYPE_I:
+ case V4L2_MPEG2_PIC_CODING_TYPE_P:
+ case V4L2_MPEG2_PIC_CODING_TYPE_B:
+ break;
+ default:
+ return -EINVAL;
+ }
+ zero_reserved(*p_mpeg2_picture);
+ break;
+
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ break;
+
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ p_fwht_params = p;
+ if (p_fwht_params->version < V4L2_FWHT_VERSION)
+ return -EINVAL;
+ if (!p_fwht_params->width || !p_fwht_params->height)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SPS:
+ p_h264_sps = p;
+
+ /* Some syntax elements are only conditionally valid */
+ if (p_h264_sps->pic_order_cnt_type != 0) {
+ p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 = 0;
+ } else if (p_h264_sps->pic_order_cnt_type != 1) {
+ p_h264_sps->num_ref_frames_in_pic_order_cnt_cycle = 0;
+ p_h264_sps->offset_for_non_ref_pic = 0;
+ p_h264_sps->offset_for_top_to_bottom_field = 0;
+ memset(&p_h264_sps->offset_for_ref_frame, 0,
+ sizeof(p_h264_sps->offset_for_ref_frame));
+ }
+
+ if (!V4L2_H264_SPS_HAS_CHROMA_FORMAT(p_h264_sps)) {
+ p_h264_sps->chroma_format_idc = 1;
+ p_h264_sps->bit_depth_luma_minus8 = 0;
+ p_h264_sps->bit_depth_chroma_minus8 = 0;
+
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS;
+
+ if (p_h264_sps->chroma_format_idc < 3)
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
+ }
+
+ if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)
+ p_h264_sps->flags &=
+ ~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD;
+
+ /*
+ * Chroma 4:2:2 format require at least High 4:2:2 profile.
+ *
+ * The H264 specification and well-known parser implementations
+ * use profile-idc values directly, as that is clearer and
+ * less ambiguous. We do the same here.
+ */
+ if (p_h264_sps->profile_idc < 122 &&
+ p_h264_sps->chroma_format_idc > 1)
+ return -EINVAL;
+ /* Chroma 4:4:4 format require at least High 4:2:2 profile */
+ if (p_h264_sps->profile_idc < 244 &&
+ p_h264_sps->chroma_format_idc > 2)
+ return -EINVAL;
+ if (p_h264_sps->chroma_format_idc > 3)
+ return -EINVAL;
+
+ if (p_h264_sps->bit_depth_luma_minus8 > 6)
+ return -EINVAL;
+ if (p_h264_sps->bit_depth_chroma_minus8 > 6)
+ return -EINVAL;
+ if (p_h264_sps->log2_max_frame_num_minus4 > 12)
+ return -EINVAL;
+ if (p_h264_sps->pic_order_cnt_type > 2)
+ return -EINVAL;
+ if (p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 > 12)
+ return -EINVAL;
+ if (p_h264_sps->max_num_ref_frames > V4L2_H264_REF_LIST_LEN)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_PPS:
+ p_h264_pps = p;
+
+ if (p_h264_pps->num_slice_groups_minus1 > 7)
+ return -EINVAL;
+ if (p_h264_pps->num_ref_idx_l0_default_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ if (p_h264_pps->num_ref_idx_l1_default_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ if (p_h264_pps->weighted_bipred_idc > 2)
+ return -EINVAL;
+ /*
+ * pic_init_qp_minus26 shall be in the range of
+ * -(26 + QpBdOffset_y) to +25, inclusive,
+ * where QpBdOffset_y is 6 * bit_depth_luma_minus8
+ */
+ if (p_h264_pps->pic_init_qp_minus26 < -62 ||
+ p_h264_pps->pic_init_qp_minus26 > 25)
+ return -EINVAL;
+ if (p_h264_pps->pic_init_qs_minus26 < -26 ||
+ p_h264_pps->pic_init_qs_minus26 > 25)
+ return -EINVAL;
+ if (p_h264_pps->chroma_qp_index_offset < -12 ||
+ p_h264_pps->chroma_qp_index_offset > 12)
+ return -EINVAL;
+ if (p_h264_pps->second_chroma_qp_index_offset < -12 ||
+ p_h264_pps->second_chroma_qp_index_offset > 12)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ break;
+
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ p_h264_pred_weights = p;
+
+ if (p_h264_pred_weights->luma_log2_weight_denom > 7)
+ return -EINVAL;
+ if (p_h264_pred_weights->chroma_log2_weight_denom > 7)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ p_h264_slice_params = p;
+
+ if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
+ p_h264_slice_params->flags &=
+ ~V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED;
+
+ if (p_h264_slice_params->colour_plane_id > 2)
+ return -EINVAL;
+ if (p_h264_slice_params->cabac_init_idc > 2)
+ return -EINVAL;
+ if (p_h264_slice_params->disable_deblocking_filter_idc > 2)
+ return -EINVAL;
+ if (p_h264_slice_params->slice_alpha_c0_offset_div2 < -6 ||
+ p_h264_slice_params->slice_alpha_c0_offset_div2 > 6)
+ return -EINVAL;
+ if (p_h264_slice_params->slice_beta_offset_div2 < -6 ||
+ p_h264_slice_params->slice_beta_offset_div2 > 6)
+ return -EINVAL;
+
+ if (p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_I ||
+ p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_SI)
+ p_h264_slice_params->num_ref_idx_l0_active_minus1 = 0;
+ if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B)
+ p_h264_slice_params->num_ref_idx_l1_active_minus1 = 0;
+
+ if (p_h264_slice_params->num_ref_idx_l0_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ if (p_h264_slice_params->num_ref_idx_l1_active_minus1 >
+ (V4L2_H264_REF_LIST_LEN - 1))
+ return -EINVAL;
+ zero_reserved(*p_h264_slice_params);
+ break;
+
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ p_h264_dec_params = p;
+
+ if (p_h264_dec_params->nal_ref_idc > 3)
+ return -EINVAL;
+ for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
+ struct v4l2_h264_dpb_entry *dpb_entry =
+ &p_h264_dec_params->dpb[i];
+
+ zero_reserved(*dpb_entry);
+ }
+ zero_reserved(*p_h264_dec_params);
+ break;
+
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ p_vp8_frame = p;
+
+ switch (p_vp8_frame->num_dct_parts) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ zero_padding(p_vp8_frame->segment);
+ zero_padding(p_vp8_frame->lf);
+ zero_padding(p_vp8_frame->quant);
+ zero_padding(p_vp8_frame->entropy);
+ zero_padding(p_vp8_frame->coder_state);
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ p_hevc_sps = p;
+
+ if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) {
+ p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0;
+ p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0;
+ p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
+ p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0;
+ }
+
+ if (!(p_hevc_sps->flags &
+ V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT))
+ p_hevc_sps->num_long_term_ref_pics_sps = 0;
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ p_hevc_pps = p;
+
+ if (!(p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED))
+ p_hevc_pps->diff_cu_qp_delta_depth = 0;
+
+ if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) {
+ p_hevc_pps->num_tile_columns_minus1 = 0;
+ p_hevc_pps->num_tile_rows_minus1 = 0;
+ memset(&p_hevc_pps->column_width_minus1, 0,
+ sizeof(p_hevc_pps->column_width_minus1));
+ memset(&p_hevc_pps->row_height_minus1, 0,
+ sizeof(p_hevc_pps->row_height_minus1));
+
+ p_hevc_pps->flags &=
+ ~V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED;
+ }
+
+ if (p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) {
+ p_hevc_pps->pps_beta_offset_div2 = 0;
+ p_hevc_pps->pps_tc_offset_div2 = 0;
+ }
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
+ p_hevc_decode_params = p;
+
+ if (p_hevc_decode_params->num_active_dpb_entries >
+ V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
+ return -EINVAL;
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ break;
+
+ case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
+ break;
+
+ case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
+ p_hdr10_mastering = p;
+
+ for (i = 0; i < 3; ++i) {
+ if (p_hdr10_mastering->display_primaries_x[i] <
+ V4L2_HDR10_MASTERING_PRIMARIES_X_LOW ||
+ p_hdr10_mastering->display_primaries_x[i] >
+ V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH ||
+ p_hdr10_mastering->display_primaries_y[i] <
+ V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW ||
+ p_hdr10_mastering->display_primaries_y[i] >
+ V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH)
+ return -EINVAL;
+ }
+
+ if (p_hdr10_mastering->white_point_x <
+ V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW ||
+ p_hdr10_mastering->white_point_x >
+ V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH ||
+ p_hdr10_mastering->white_point_y <
+ V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW ||
+ p_hdr10_mastering->white_point_y >
+ V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH)
+ return -EINVAL;
+
+ if (p_hdr10_mastering->max_display_mastering_luminance <
+ V4L2_HDR10_MASTERING_MAX_LUMA_LOW ||
+ p_hdr10_mastering->max_display_mastering_luminance >
+ V4L2_HDR10_MASTERING_MAX_LUMA_HIGH ||
+ p_hdr10_mastering->min_display_mastering_luminance <
+ V4L2_HDR10_MASTERING_MIN_LUMA_LOW ||
+ p_hdr10_mastering->min_display_mastering_luminance >
+ V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
+ return -EINVAL;
+
+ /* The following restriction comes from ITU-T Rec. H.265 spec */
+ if (p_hdr10_mastering->max_display_mastering_luminance ==
+ V4L2_HDR10_MASTERING_MAX_LUMA_LOW &&
+ p_hdr10_mastering->min_display_mastering_luminance ==
+ V4L2_HDR10_MASTERING_MIN_LUMA_HIGH)
+ return -EINVAL;
+
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ break;
+
+ case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
+ return validate_vp9_compressed_hdr(p);
+
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ return validate_vp9_frame(p);
+ case V4L2_CTRL_TYPE_AV1_FRAME:
+ return validate_av1_frame(p);
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ return validate_av1_sequence(p);
+ case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
+ break;
+ case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
+ return validate_av1_film_grain(p);
+
+ case V4L2_CTRL_TYPE_AREA:
+ area = p;
+ if (!area->width || !area->height)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ size_t len;
+ u64 offset;
+ s64 val;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
+ case V4L2_CTRL_TYPE_INTEGER64:
+ /*
+ * We can't use the ROUND_TO_RANGE define here due to
+ * the u64 divide that needs special care.
+ */
+ val = ptr.p_s64[idx];
+ if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2))
+ val = ctrl->maximum;
+ else
+ val += (s64)(ctrl->step / 2);
+ val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum);
+ offset = val - ctrl->minimum;
+ do_div(offset, ctrl->step);
+ ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step;
+ return 0;
+ case V4L2_CTRL_TYPE_U8:
+ return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl);
+ case V4L2_CTRL_TYPE_U16:
+ return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl);
+ case V4L2_CTRL_TYPE_U32:
+ return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl);
+
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ ptr.p_s32[idx] = !!ptr.p_s32[idx];
+ return 0;
+
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum)
+ return -ERANGE;
+ if (ptr.p_s32[idx] < BITS_PER_LONG_LONG &&
+ (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx])))
+ return -EINVAL;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU &&
+ ctrl->qmenu[ptr.p_s32[idx]][0] == '\0')
+ return -EINVAL;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BITMASK:
+ ptr.p_s32[idx] &= ctrl->maximum;
+ return 0;
+
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ ptr.p_s32[idx] = 0;
+ return 0;
+
+ case V4L2_CTRL_TYPE_STRING:
+ idx *= ctrl->elem_size;
+ len = strlen(ptr.p_char + idx);
+ if (len < ctrl->minimum)
+ return -ERANGE;
+ if ((len - (u32)ctrl->minimum) % (u32)ctrl->step)
+ return -ERANGE;
+ return 0;
+
+ default:
+ return std_validate_compound(ctrl, idx, ptr);
+ }
+}
+
+int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr ptr)
+{
+ unsigned int i;
+ int ret = 0;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_U8:
+ if (ctrl->maximum == 0xff && ctrl->minimum == 0 && ctrl->step == 1)
+ return 0;
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ if (ctrl->maximum == 0xffff && ctrl->minimum == 0 && ctrl->step == 1)
+ return 0;
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ if (ctrl->maximum == 0xffffffff && ctrl->minimum == 0 && ctrl->step == 1)
+ return 0;
+ break;
+
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32));
+ return 0;
+ }
+
+ for (i = 0; !ret && i < ctrl->new_elems; i++)
+ ret = std_validate_elem(ctrl, i, ptr);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_validate);
+
+static const struct v4l2_ctrl_type_ops std_type_ops = {
+ .equal = v4l2_ctrl_type_op_equal,
+ .init = v4l2_ctrl_type_op_init,
+ .log = v4l2_ctrl_type_op_log,
+ .validate = v4l2_ctrl_type_op_validate,
+};
+
+void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
+{
+ if (!ctrl)
+ return;
+ if (!notify) {
+ ctrl->call_notify = 0;
+ return;
+ }
+ if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify))
+ return;
+ ctrl->handler->notify = notify;
+ ctrl->handler->notify_priv = priv;
+ ctrl->call_notify = 1;
+}
+EXPORT_SYMBOL(v4l2_ctrl_notify);
+
+/* Copy the one value to another. */
+static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
+ union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to,
+ unsigned int elems)
+{
+ if (ctrl == NULL)
+ return;
+ memcpy(to.p, from.p_const, elems * ctrl->elem_size);
+}
+
+/* Copy the new value to the current value. */
+void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
+{
+ bool changed;
+
+ if (ctrl == NULL)
+ return;
+
+ /* has_changed is set by cluster_changed */
+ changed = ctrl->has_changed;
+ if (changed) {
+ if (ctrl->is_dyn_array)
+ ctrl->elems = ctrl->new_elems;
+ ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur, ctrl->elems);
+ }
+
+ if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) {
+ /* Note: CH_FLAGS is only set for auto clusters. */
+ ctrl->flags &=
+ ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE);
+ if (!is_cur_manual(ctrl->cluster[0])) {
+ ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ if (ctrl->cluster[0]->has_volatiles)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ }
+ fh = NULL;
+ }
+ if (changed || ch_flags) {
+ /* If a control was changed that was not one of the controls
+ modified by the application, then send the event to all. */
+ if (!ctrl->is_new)
+ fh = NULL;
+ send_event(fh, ctrl,
+ (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags);
+ if (ctrl->call_notify && changed && ctrl->handler->notify)
+ ctrl->handler->notify(ctrl, ctrl->handler->notify_priv);
+ }
+}
+
+/* Copy the current value to the new value */
+void cur_to_new(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl == NULL)
+ return;
+ if (ctrl->is_dyn_array)
+ ctrl->new_elems = ctrl->elems;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems);
+}
+
+static bool req_alloc_array(struct v4l2_ctrl_ref *ref, u32 elems)
+{
+ void *tmp;
+
+ if (elems == ref->p_req_array_alloc_elems)
+ return true;
+ if (ref->ctrl->is_dyn_array &&
+ elems < ref->p_req_array_alloc_elems)
+ return true;
+
+ tmp = kvmalloc(elems * ref->ctrl->elem_size, GFP_KERNEL);
+
+ if (!tmp) {
+ ref->p_req_array_enomem = true;
+ return false;
+ }
+ ref->p_req_array_enomem = false;
+ kvfree(ref->p_req.p);
+ ref->p_req.p = tmp;
+ ref->p_req_array_alloc_elems = elems;
+ return true;
+}
+
+/* Copy the new value to the request value */
+void new_to_req(struct v4l2_ctrl_ref *ref)
+{
+ struct v4l2_ctrl *ctrl;
+
+ if (!ref)
+ return;
+
+ ctrl = ref->ctrl;
+ if (ctrl->is_array && !req_alloc_array(ref, ctrl->new_elems))
+ return;
+
+ ref->p_req_elems = ctrl->new_elems;
+ ptr_to_ptr(ctrl, ctrl->p_new, ref->p_req, ref->p_req_elems);
+ ref->p_req_valid = true;
+}
+
+/* Copy the current value to the request value */
+void cur_to_req(struct v4l2_ctrl_ref *ref)
+{
+ struct v4l2_ctrl *ctrl;
+
+ if (!ref)
+ return;
+
+ ctrl = ref->ctrl;
+ if (ctrl->is_array && !req_alloc_array(ref, ctrl->elems))
+ return;
+
+ ref->p_req_elems = ctrl->elems;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req, ctrl->elems);
+ ref->p_req_valid = true;
+}
+
+/* Copy the request value to the new value */
+int req_to_new(struct v4l2_ctrl_ref *ref)
+{
+ struct v4l2_ctrl *ctrl;
+
+ if (!ref)
+ return 0;
+
+ ctrl = ref->ctrl;
+
+ /*
+ * This control was never set in the request, so just use the current
+ * value.
+ */
+ if (!ref->p_req_valid) {
+ if (ctrl->is_dyn_array)
+ ctrl->new_elems = ctrl->elems;
+ ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems);
+ return 0;
+ }
+
+ /* Not an array, so just copy the request value */
+ if (!ctrl->is_array) {
+ ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems);
+ return 0;
+ }
+
+ /* Sanity check, should never happen */
+ if (WARN_ON(!ref->p_req_array_alloc_elems))
+ return -ENOMEM;
+
+ if (!ctrl->is_dyn_array &&
+ ref->p_req_elems != ctrl->p_array_alloc_elems)
+ return -ENOMEM;
+
+ /*
+ * Check if the number of elements in the request is more than the
+ * elements in ctrl->p_array. If so, attempt to realloc ctrl->p_array.
+ * Note that p_array is allocated with twice the number of elements
+ * in the dynamic array since it has to store both the current and
+ * new value of such a control.
+ */
+ if (ref->p_req_elems > ctrl->p_array_alloc_elems) {
+ unsigned int sz = ref->p_req_elems * ctrl->elem_size;
+ void *old = ctrl->p_array;
+ void *tmp = kvzalloc(2 * sz, GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size);
+ memcpy(tmp + sz, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
+ ctrl->p_new.p = tmp;
+ ctrl->p_cur.p = tmp + sz;
+ ctrl->p_array = tmp;
+ ctrl->p_array_alloc_elems = ref->p_req_elems;
+ kvfree(old);
+ }
+
+ ctrl->new_elems = ref->p_req_elems;
+ ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems);
+ return 0;
+}
+
+/* Control range checking */
+int check_range(enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def)
+{
+ switch (type) {
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ if (step != 1 || max > 1 || min < 0)
+ return -ERANGE;
+ fallthrough;
+ case V4L2_CTRL_TYPE_U8:
+ case V4L2_CTRL_TYPE_U16:
+ case V4L2_CTRL_TYPE_U32:
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_INTEGER64:
+ if (step == 0 || min > max || def < min || def > max)
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (step || min || !max || (def & ~max))
+ return -ERANGE;
+ return 0;
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_INTEGER_MENU:
+ if (min > max || def < min || def > max)
+ return -ERANGE;
+ /* Note: step == menu_skip_mask for menu controls.
+ So here we check if the default value is masked out. */
+ if (step && ((1 << def) & step))
+ return -EINVAL;
+ return 0;
+ case V4L2_CTRL_TYPE_STRING:
+ if (min > max || min < 0 || step < 1 || def)
+ return -ERANGE;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+/* Set the handler's error code if it wasn't set earlier already */
+static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err)
+{
+ if (hdl->error == 0)
+ hdl->error = err;
+ return err;
+}
+
+/* Initialize the handler */
+int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
+ unsigned nr_of_controls_hint,
+ struct lock_class_key *key, const char *name)
+{
+ mutex_init(&hdl->_lock);
+ hdl->lock = &hdl->_lock;
+ lockdep_set_class_and_name(hdl->lock, key, name);
+ INIT_LIST_HEAD(&hdl->ctrls);
+ INIT_LIST_HEAD(&hdl->ctrl_refs);
+ hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
+ hdl->buckets = kvcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]),
+ GFP_KERNEL);
+ hdl->error = hdl->buckets ? 0 : -ENOMEM;
+ v4l2_ctrl_handler_init_request(hdl);
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
+
+/* Free all controls and control refs */
+void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl_ref *ref, *next_ref;
+ struct v4l2_ctrl *ctrl, *next_ctrl;
+ struct v4l2_subscribed_event *sev, *next_sev;
+
+ if (hdl == NULL || hdl->buckets == NULL)
+ return;
+
+ v4l2_ctrl_handler_free_request(hdl);
+
+ mutex_lock(hdl->lock);
+ /* Free all nodes */
+ list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
+ list_del(&ref->node);
+ if (ref->p_req_array_alloc_elems)
+ kvfree(ref->p_req.p);
+ kfree(ref);
+ }
+ /* Free all controls owned by the handler */
+ list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
+ list_del(&ctrl->node);
+ list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
+ list_del(&sev->node);
+ kvfree(ctrl->p_array);
+ kvfree(ctrl);
+ }
+ kvfree(hdl->buckets);
+ hdl->buckets = NULL;
+ hdl->cached = NULL;
+ hdl->error = 0;
+ mutex_unlock(hdl->lock);
+ mutex_destroy(&hdl->_lock);
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_free);
+
+/* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer
+ be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing
+ with applications that do not use the NEXT_CTRL flag.
+
+ We just find the n-th private user control. It's O(N), but that should not
+ be an issue in this particular case. */
+static struct v4l2_ctrl_ref *find_private_ref(
+ struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+
+ id -= V4L2_CID_PRIVATE_BASE;
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ /* Search for private user controls that are compatible with
+ VIDIOC_G/S_CTRL. */
+ if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER &&
+ V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) {
+ if (!ref->ctrl->is_int)
+ continue;
+ if (id == 0)
+ return ref;
+ id--;
+ }
+ }
+ return NULL;
+}
+
+/* Find a control with the given ID. */
+struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref;
+ int bucket;
+
+ id &= V4L2_CTRL_ID_MASK;
+
+ /* Old-style private controls need special handling */
+ if (id >= V4L2_CID_PRIVATE_BASE)
+ return find_private_ref(hdl, id);
+ bucket = id % hdl->nr_of_buckets;
+
+ /* Simple optimization: cache the last control found */
+ if (hdl->cached && hdl->cached->ctrl->id == id)
+ return hdl->cached;
+
+ /* Not in cache, search the hash */
+ ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
+ while (ref && ref->ctrl->id != id)
+ ref = ref->next;
+
+ if (ref)
+ hdl->cached = ref; /* cache it! */
+ return ref;
+}
+
+/* Find a control with the given ID. Take the handler's lock first. */
+struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = NULL;
+
+ if (hdl) {
+ mutex_lock(hdl->lock);
+ ref = find_ref(hdl, id);
+ mutex_unlock(hdl->lock);
+ }
+ return ref;
+}
+
+/* Find a control with the given ID. */
+struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return ref ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL(v4l2_ctrl_find);
+
+/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
+int handler_new_ref(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl,
+ struct v4l2_ctrl_ref **ctrl_ref,
+ bool from_other_dev, bool allocate_req)
+{
+ struct v4l2_ctrl_ref *ref;
+ struct v4l2_ctrl_ref *new_ref;
+ u32 id = ctrl->id;
+ u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1;
+ int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
+ unsigned int size_extra_req = 0;
+
+ if (ctrl_ref)
+ *ctrl_ref = NULL;
+
+ /*
+ * Automatically add the control class if it is not yet present and
+ * the new control is not a compound control.
+ */
+ if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES &&
+ id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL)
+ if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0))
+ return hdl->error;
+
+ if (hdl->error)
+ return hdl->error;
+
+ if (allocate_req && !ctrl->is_array)
+ size_extra_req = ctrl->elems * ctrl->elem_size;
+ new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
+ if (!new_ref)
+ return handler_set_err(hdl, -ENOMEM);
+ new_ref->ctrl = ctrl;
+ new_ref->from_other_dev = from_other_dev;
+ if (size_extra_req)
+ new_ref->p_req.p = &new_ref[1];
+
+ INIT_LIST_HEAD(&new_ref->node);
+
+ mutex_lock(hdl->lock);
+
+ /* Add immediately at the end of the list if the list is empty, or if
+ the last element in the list has a lower ID.
+ This ensures that when elements are added in ascending order the
+ insertion is an O(1) operation. */
+ if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) {
+ list_add_tail(&new_ref->node, &hdl->ctrl_refs);
+ goto insert_in_hash;
+ }
+
+ /* Find insert position in sorted list */
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ if (ref->ctrl->id < id)
+ continue;
+ /* Don't add duplicates */
+ if (ref->ctrl->id == id) {
+ kfree(new_ref);
+ goto unlock;
+ }
+ list_add(&new_ref->node, ref->node.prev);
+ break;
+ }
+
+insert_in_hash:
+ /* Insert the control node in the hash */
+ new_ref->next = hdl->buckets[bucket];
+ hdl->buckets[bucket] = new_ref;
+ if (ctrl_ref)
+ *ctrl_ref = new_ref;
+ if (ctrl->handler == hdl) {
+ /* By default each control starts in a cluster of its own.
+ * new_ref->ctrl is basically a cluster array with one
+ * element, so that's perfect to use as the cluster pointer.
+ * But only do this for the handler that owns the control.
+ */
+ ctrl->cluster = &new_ref->ctrl;
+ ctrl->ncontrols = 1;
+ }
+
+unlock:
+ mutex_unlock(hdl->lock);
+ return 0;
+}
+
+/* Add a new control */
+static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ const struct v4l2_ctrl_type_ops *type_ops,
+ u32 id, const char *name, enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def,
+ const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
+ u32 flags, const char * const *qmenu,
+ const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
+ void *priv)
+{
+ struct v4l2_ctrl *ctrl;
+ unsigned sz_extra;
+ unsigned nr_of_dims = 0;
+ unsigned elems = 1;
+ bool is_array;
+ unsigned tot_ctrl_size;
+ void *data;
+ int err;
+
+ if (hdl->error)
+ return NULL;
+
+ while (dims && dims[nr_of_dims]) {
+ elems *= dims[nr_of_dims];
+ nr_of_dims++;
+ if (nr_of_dims == V4L2_CTRL_MAX_DIMS)
+ break;
+ }
+ is_array = nr_of_dims > 0;
+
+ /* Prefill elem_size for all types handled by std_type_ops */
+ switch ((u32)type) {
+ case V4L2_CTRL_TYPE_INTEGER64:
+ elem_size = sizeof(s64);
+ break;
+ case V4L2_CTRL_TYPE_STRING:
+ elem_size = max + 1;
+ break;
+ case V4L2_CTRL_TYPE_U8:
+ elem_size = sizeof(u8);
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ elem_size = sizeof(u16);
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ elem_size = sizeof(u32);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_SEQUENCE:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_sequence);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_PICTURE:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_picture);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTISATION:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantisation);
+ break;
+ case V4L2_CTRL_TYPE_FWHT_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_fwht_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_sps);
+ break;
+ case V4L2_CTRL_TYPE_H264_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_pps);
+ break;
+ case V4L2_CTRL_TYPE_H264_SCALING_MATRIX:
+ elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix);
+ break;
+ case V4L2_CTRL_TYPE_H264_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_DECODE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_decode_params);
+ break;
+ case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS:
+ elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights);
+ break;
+ case V4L2_CTRL_TYPE_VP8_FRAME:
+ elem_size = sizeof(struct v4l2_ctrl_vp8_frame);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_sps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_pps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params);
+ break;
+ case V4L2_CTRL_TYPE_HDR10_CLL_INFO:
+ elem_size = sizeof(struct v4l2_ctrl_hdr10_cll_info);
+ break;
+ case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY:
+ elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display);
+ break;
+ case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR:
+ elem_size = sizeof(struct v4l2_ctrl_vp9_compressed_hdr);
+ break;
+ case V4L2_CTRL_TYPE_VP9_FRAME:
+ elem_size = sizeof(struct v4l2_ctrl_vp9_frame);
+ break;
+ case V4L2_CTRL_TYPE_AV1_SEQUENCE:
+ elem_size = sizeof(struct v4l2_ctrl_av1_sequence);
+ break;
+ case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY:
+ elem_size = sizeof(struct v4l2_ctrl_av1_tile_group_entry);
+ break;
+ case V4L2_CTRL_TYPE_AV1_FRAME:
+ elem_size = sizeof(struct v4l2_ctrl_av1_frame);
+ break;
+ case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
+ elem_size = sizeof(struct v4l2_ctrl_av1_film_grain);
+ break;
+ case V4L2_CTRL_TYPE_AREA:
+ elem_size = sizeof(struct v4l2_area);
+ break;
+ default:
+ if (type < V4L2_CTRL_COMPOUND_TYPES)
+ elem_size = sizeof(s32);
+ break;
+ }
+
+ /* Sanity checks */
+ if (id == 0 || name == NULL || !elem_size ||
+ id >= V4L2_CID_PRIVATE_BASE ||
+ (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
+ (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
+ err = check_range(type, min, max, step, def);
+ if (err) {
+ handler_set_err(hdl, err);
+ return NULL;
+ }
+ if (is_array &&
+ (type == V4L2_CTRL_TYPE_BUTTON ||
+ type == V4L2_CTRL_TYPE_CTRL_CLASS)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ if (flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) {
+ /*
+ * For now only support this for one-dimensional arrays only.
+ *
+ * This can be relaxed in the future, but this will
+ * require more effort.
+ */
+ if (nr_of_dims != 1) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ /* Start with just 1 element */
+ elems = 1;
+ }
+
+ tot_ctrl_size = elem_size * elems;
+ sz_extra = 0;
+ if (type == V4L2_CTRL_TYPE_BUTTON)
+ flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ else if (!is_array &&
+ (type == V4L2_CTRL_TYPE_INTEGER64 ||
+ type == V4L2_CTRL_TYPE_STRING ||
+ type >= V4L2_CTRL_COMPOUND_TYPES))
+ sz_extra += 2 * tot_ctrl_size;
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
+ sz_extra += elem_size;
+
+ ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
+ if (ctrl == NULL) {
+ handler_set_err(hdl, -ENOMEM);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ctrl->node);
+ INIT_LIST_HEAD(&ctrl->ev_subs);
+ ctrl->handler = hdl;
+ ctrl->ops = ops;
+ ctrl->type_ops = type_ops ? type_ops : &std_type_ops;
+ ctrl->id = id;
+ ctrl->name = name;
+ ctrl->type = type;
+ ctrl->flags = flags;
+ ctrl->minimum = min;
+ ctrl->maximum = max;
+ ctrl->step = step;
+ ctrl->default_value = def;
+ ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING;
+ ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string;
+ ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64;
+ ctrl->is_array = is_array;
+ ctrl->is_dyn_array = !!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY);
+ ctrl->elems = elems;
+ ctrl->new_elems = elems;
+ ctrl->nr_of_dims = nr_of_dims;
+ if (nr_of_dims)
+ memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0]));
+ ctrl->elem_size = elem_size;
+ if (type == V4L2_CTRL_TYPE_MENU)
+ ctrl->qmenu = qmenu;
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ ctrl->qmenu_int = qmenu_int;
+ ctrl->priv = priv;
+ ctrl->cur.val = ctrl->val = def;
+ data = &ctrl[1];
+
+ if (ctrl->is_array) {
+ ctrl->p_array_alloc_elems = elems;
+ ctrl->p_array = kvzalloc(2 * elems * elem_size, GFP_KERNEL);
+ if (!ctrl->p_array) {
+ kvfree(ctrl);
+ return NULL;
+ }
+ data = ctrl->p_array;
+ }
+
+ if (!ctrl->is_int) {
+ ctrl->p_new.p = data;
+ ctrl->p_cur.p = data + tot_ctrl_size;
+ } else {
+ ctrl->p_new.p = &ctrl->val;
+ ctrl->p_cur.p = &ctrl->cur.val;
+ }
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
+ if (ctrl->is_array)
+ ctrl->p_def.p = &ctrl[1];
+ else
+ ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
+ memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
+ }
+
+ ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
+ cur_to_new(ctrl);
+
+ if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
+ kvfree(ctrl->p_array);
+ kvfree(ctrl);
+ return NULL;
+ }
+ mutex_lock(hdl->lock);
+ list_add_tail(&ctrl->node, &hdl->ctrls);
+ mutex_unlock(hdl->lock);
+ return ctrl;
+}
+
+struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_config *cfg, void *priv)
+{
+ bool is_menu;
+ struct v4l2_ctrl *ctrl;
+ const char *name = cfg->name;
+ const char * const *qmenu = cfg->qmenu;
+ const s64 *qmenu_int = cfg->qmenu_int;
+ enum v4l2_ctrl_type type = cfg->type;
+ u32 flags = cfg->flags;
+ s64 min = cfg->min;
+ s64 max = cfg->max;
+ u64 step = cfg->step;
+ s64 def = cfg->def;
+
+ if (name == NULL)
+ v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
+ &def, &flags);
+
+ is_menu = (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ if (is_menu)
+ WARN_ON(step);
+ else
+ WARN_ON(cfg->menu_skip_mask);
+ if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
+ qmenu = v4l2_ctrl_get_menu(cfg->id);
+ } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name,
+ type, min, max,
+ is_menu ? cfg->menu_skip_mask : step, def,
+ cfg->dims, cfg->elem_size,
+ flags, qmenu, qmenu_int, cfg->p_def, priv);
+ if (ctrl)
+ ctrl->is_private = cfg->is_private;
+ return ctrl;
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_custom);
+
+/* Helper function for standard non-menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, s64 min, s64 max, u64 step, s64 def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type == V4L2_CTRL_TYPE_MENU ||
+ type == V4L2_CTRL_TYPE_INTEGER_MENU ||
+ type >= V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, ptr_null, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std);
+
+/* Helper function for standard menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, u8 _max, u64 mask, u8 _def)
+{
+ const char * const *qmenu = NULL;
+ const s64 *qmenu_int = NULL;
+ unsigned int qmenu_int_len = 0;
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s64 min;
+ s64 max = _max;
+ s64 def = _def;
+ u64 step;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+
+ if (type == V4L2_CTRL_TYPE_MENU)
+ qmenu = v4l2_ctrl_get_menu(id);
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
+
+ if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, mask, def, NULL, 0,
+ flags, qmenu, qmenu_int, ptr_null, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
+
+/* Helper function for standard menu controls with driver defined menu */
+struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id, u8 _max,
+ u64 mask, u8 _def, const char * const *qmenu)
+{
+ enum v4l2_ctrl_type type;
+ const char *name;
+ u32 flags;
+ u64 step;
+ s64 min;
+ s64 max = _max;
+ s64 def = _def;
+
+ /* v4l2_ctrl_new_std_menu_items() should only be called for
+ * standard controls without a standard menu.
+ */
+ if (v4l2_ctrl_get_menu(id)) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, mask, def, NULL, 0,
+ flags, qmenu, NULL, ptr_null, NULL);
+
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
+
+/* Helper function for standard compound controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id,
+ const union v4l2_ctrl_ptr p_def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+ s64 min, max, step, def;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type < V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, p_def, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
+
+/* Helper function for standard integer menu controls */
+struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id, u8 _max, u8 _def, const s64 *qmenu_int)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ s64 min;
+ u64 step;
+ s64 max = _max;
+ s64 def = _def;
+ u32 flags;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type != V4L2_CTRL_TYPE_INTEGER_MENU) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ 0, max, 0, def, NULL, 0,
+ flags, NULL, qmenu_int, ptr_null, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
+
+/* Add the controls from another handler to our own. */
+int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *add,
+ bool (*filter)(const struct v4l2_ctrl *ctrl),
+ bool from_other_dev)
+{
+ struct v4l2_ctrl_ref *ref;
+ int ret = 0;
+
+ /* Do nothing if either handler is NULL or if they are the same */
+ if (!hdl || !add || hdl == add)
+ return 0;
+ if (hdl->error)
+ return hdl->error;
+ mutex_lock(add->lock);
+ list_for_each_entry(ref, &add->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+
+ /* Skip handler-private controls. */
+ if (ctrl->is_private)
+ continue;
+ /* And control classes */
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ continue;
+ /* Filter any unwanted controls */
+ if (filter && !filter(ctrl))
+ continue;
+ ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false);
+ if (ret)
+ break;
+ }
+ mutex_unlock(add->lock);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_add_handler);
+
+bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
+{
+ if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
+ return true;
+ if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
+ return true;
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+EXPORT_SYMBOL(v4l2_ctrl_radio_filter);
+
+/* Cluster controls */
+void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
+{
+ bool has_volatiles = false;
+ int i;
+
+ /* The first control is the master control and it must not be NULL */
+ if (WARN_ON(ncontrols == 0 || controls[0] == NULL))
+ return;
+
+ for (i = 0; i < ncontrols; i++) {
+ if (controls[i]) {
+ controls[i]->cluster = controls;
+ controls[i]->ncontrols = ncontrols;
+ if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE)
+ has_volatiles = true;
+ }
+ }
+ controls[0]->has_volatiles = has_volatiles;
+}
+EXPORT_SYMBOL(v4l2_ctrl_cluster);
+
+void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
+ u8 manual_val, bool set_volatile)
+{
+ struct v4l2_ctrl *master = controls[0];
+ u32 flag = 0;
+ int i;
+
+ v4l2_ctrl_cluster(ncontrols, controls);
+ WARN_ON(ncontrols <= 1);
+ WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
+ WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl));
+ master->is_auto = true;
+ master->has_volatiles = set_volatile;
+ master->manual_mode_value = manual_val;
+ master->flags |= V4L2_CTRL_FLAG_UPDATE;
+
+ if (!is_cur_manual(master))
+ flag = V4L2_CTRL_FLAG_INACTIVE |
+ (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0);
+
+ for (i = 1; i < ncontrols; i++)
+ if (controls[i])
+ controls[i]->flags |= flag;
+}
+EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
+
+/*
+ * Obtain the current volatile values of an autocluster and mark them
+ * as new.
+ */
+void update_from_auto_cluster(struct v4l2_ctrl *master)
+{
+ int i;
+
+ for (i = 1; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ if (!call_op(master, g_volatile_ctrl))
+ for (i = 1; i < master->ncontrols; i++)
+ if (master->cluster[i])
+ master->cluster[i]->is_new = 1;
+}
+
+/*
+ * Return non-zero if one or more of the controls in the cluster has a new
+ * value that differs from the current value.
+ */
+static int cluster_changed(struct v4l2_ctrl *master)
+{
+ bool changed = false;
+ int i;
+
+ for (i = 0; i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+ bool ctrl_changed = false;
+
+ if (!ctrl)
+ continue;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE) {
+ changed = true;
+ ctrl_changed = true;
+ }
+
+ /*
+ * Set has_changed to false to avoid generating
+ * the event V4L2_EVENT_CTRL_CH_VALUE
+ */
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ ctrl->has_changed = false;
+ continue;
+ }
+
+ if (ctrl->elems != ctrl->new_elems)
+ ctrl_changed = true;
+ if (!ctrl_changed)
+ ctrl_changed = !ctrl->type_ops->equal(ctrl,
+ ctrl->p_cur, ctrl->p_new);
+ ctrl->has_changed = ctrl_changed;
+ changed |= ctrl->has_changed;
+ }
+ return changed;
+}
+
+/*
+ * Core function that calls try/s_ctrl and ensures that the new value is
+ * copied to the current value on a set.
+ * Must be called with ctrl->handler->lock held.
+ */
+int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags)
+{
+ bool update_flag;
+ int ret;
+ int i;
+
+ /*
+ * Go through the cluster and either validate the new value or
+ * (if no new value was set), copy the current value to the new
+ * value, ensuring a consistent view for the control ops when
+ * called.
+ */
+ for (i = 0; i < master->ncontrols; i++) {
+ struct v4l2_ctrl *ctrl = master->cluster[i];
+
+ if (!ctrl)
+ continue;
+
+ if (!ctrl->is_new) {
+ cur_to_new(ctrl);
+ continue;
+ }
+ /*
+ * Check again: it may have changed since the
+ * previous check in try_or_set_ext_ctrls().
+ */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ return -EBUSY;
+ }
+
+ ret = call_op(master, try_ctrl);
+
+ /* Don't set if there is no change */
+ if (ret || !set || !cluster_changed(master))
+ return ret;
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ return ret;
+
+ /* If OK, then make the new values permanent. */
+ update_flag = is_cur_manual(master) != is_new_manual(master);
+
+ for (i = 0; i < master->ncontrols; i++) {
+ /*
+ * If we switch from auto to manual mode, and this cluster
+ * contains volatile controls, then all non-master controls
+ * have to be marked as changed. The 'new' value contains
+ * the volatile value (obtained by update_from_auto_cluster),
+ * which now has to become the current value.
+ */
+ if (i && update_flag && is_new_manual(master) &&
+ master->has_volatiles && master->cluster[i])
+ master->cluster[i]->has_changed = true;
+
+ new_to_cur(fh, master->cluster[i], ch_flags |
+ ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ }
+ return 0;
+}
+
+/* Activate/deactivate a control. */
+void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
+{
+ /* invert since the actual flag is called 'inactive' */
+ bool inactive = !active;
+ bool old;
+
+ if (ctrl == NULL)
+ return;
+
+ if (inactive)
+ /* set V4L2_CTRL_FLAG_INACTIVE */
+ old = test_and_set_bit(4, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_INACTIVE */
+ old = test_and_clear_bit(4, &ctrl->flags);
+ if (old != inactive)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+}
+EXPORT_SYMBOL(v4l2_ctrl_activate);
+
+void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
+{
+ bool old;
+
+ if (ctrl == NULL)
+ return;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ if (grabbed)
+ /* set V4L2_CTRL_FLAG_GRABBED */
+ old = test_and_set_bit(1, &ctrl->flags);
+ else
+ /* clear V4L2_CTRL_FLAG_GRABBED */
+ old = test_and_clear_bit(1, &ctrl->flags);
+ if (old != grabbed)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_grab);
+
+/* Call s_ctrl for all controls owned by the handler */
+int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret = 0;
+
+ if (hdl == NULL)
+ return 0;
+
+ lockdep_assert_held(hdl->lock);
+
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ ctrl->done = false;
+
+ list_for_each_entry(ctrl, &hdl->ctrls, node) {
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ int i;
+
+ /* Skip if this control was already handled by a cluster. */
+ /* Skip button controls and read-only controls. */
+ if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ cur_to_new(master->cluster[i]);
+ master->cluster[i]->is_new = 1;
+ master->cluster[i]->done = true;
+ }
+ }
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup);
+
+int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+{
+ int ret;
+
+ if (hdl == NULL)
+ return 0;
+
+ mutex_lock(hdl->lock);
+ ret = __v4l2_ctrl_handler_setup(hdl);
+ mutex_unlock(hdl->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
+
+/* Log the control name and value */
+static void log_ctrl(const struct v4l2_ctrl *ctrl,
+ const char *prefix, const char *colon)
+{
+ if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY))
+ return;
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ return;
+
+ pr_info("%s%s%s: ", prefix, colon, ctrl->name);
+
+ ctrl->type_ops->log(ctrl);
+
+ if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE |
+ V4L2_CTRL_FLAG_GRABBED |
+ V4L2_CTRL_FLAG_VOLATILE)) {
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ pr_cont(" inactive");
+ if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)
+ pr_cont(" grabbed");
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE)
+ pr_cont(" volatile");
+ }
+ pr_cont("\n");
+}
+
+/* Log all controls owned by the handler */
+void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl,
+ const char *prefix)
+{
+ struct v4l2_ctrl *ctrl;
+ const char *colon = "";
+ int len;
+
+ if (!hdl)
+ return;
+ if (!prefix)
+ prefix = "";
+ len = strlen(prefix);
+ if (len && prefix[len - 1] != ' ')
+ colon = ": ";
+ mutex_lock(hdl->lock);
+ list_for_each_entry(ctrl, &hdl->ctrls, node)
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED))
+ log_ctrl(ctrl, prefix, colon);
+ mutex_unlock(hdl->lock);
+}
+EXPORT_SYMBOL(v4l2_ctrl_handler_log_status);
+
+int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ctrl_ops,
+ const struct v4l2_fwnode_device_properties *p)
+{
+ if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) {
+ u32 orientation_ctrl;
+
+ switch (p->orientation) {
+ case V4L2_FWNODE_ORIENTATION_FRONT:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT;
+ break;
+ case V4L2_FWNODE_ORIENTATION_BACK:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK;
+ break;
+ case V4L2_FWNODE_ORIENTATION_EXTERNAL:
+ orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops,
+ V4L2_CID_CAMERA_ORIENTATION,
+ V4L2_CAMERA_ORIENTATION_EXTERNAL, 0,
+ orientation_ctrl))
+ return hdl->error;
+ }
+
+ if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) {
+ if (!v4l2_ctrl_new_std(hdl, ctrl_ops,
+ V4L2_CID_CAMERA_SENSOR_ROTATION,
+ p->rotation, p->rotation, 1,
+ p->rotation))
+ return hdl->error;
+ }
+
+ return hdl->error;
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
new file mode 100644
index 0000000000..8696eb1cdd
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -0,0 +1,1680 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework control definitions.
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#include <linux/export.h>
+#include <media/v4l2-ctrls.h>
+
+/*
+ * Returns NULL or a character pointer array containing the menu for
+ * the given control ID. The pointer array ends with a NULL pointer.
+ * An empty string signifies a menu entry that is invalid. This allows
+ * drivers to disable certain options if it is not supported.
+ */
+const char * const *v4l2_ctrl_get_menu(u32 id)
+{
+ static const char * const mpeg_audio_sampling_freq[] = {
+ "44.1 kHz",
+ "48 kHz",
+ "32 kHz",
+ NULL
+ };
+ static const char * const mpeg_audio_encoding[] = {
+ "MPEG-1/2 Layer I",
+ "MPEG-1/2 Layer II",
+ "MPEG-1/2 Layer III",
+ "MPEG-2/4 AAC",
+ "AC-3",
+ NULL
+ };
+ static const char * const mpeg_audio_l1_bitrate[] = {
+ "32 kbps",
+ "64 kbps",
+ "96 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "288 kbps",
+ "320 kbps",
+ "352 kbps",
+ "384 kbps",
+ "416 kbps",
+ "448 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_l2_bitrate[] = {
+ "32 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_l3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_ac3_bitrate[] = {
+ "32 kbps",
+ "40 kbps",
+ "48 kbps",
+ "56 kbps",
+ "64 kbps",
+ "80 kbps",
+ "96 kbps",
+ "112 kbps",
+ "128 kbps",
+ "160 kbps",
+ "192 kbps",
+ "224 kbps",
+ "256 kbps",
+ "320 kbps",
+ "384 kbps",
+ "448 kbps",
+ "512 kbps",
+ "576 kbps",
+ "640 kbps",
+ NULL
+ };
+ static const char * const mpeg_audio_mode[] = {
+ "Stereo",
+ "Joint Stereo",
+ "Dual",
+ "Mono",
+ NULL
+ };
+ static const char * const mpeg_audio_mode_extension[] = {
+ "Bound 4",
+ "Bound 8",
+ "Bound 12",
+ "Bound 16",
+ NULL
+ };
+ static const char * const mpeg_audio_emphasis[] = {
+ "No Emphasis",
+ "50/15 us",
+ "CCITT J17",
+ NULL
+ };
+ static const char * const mpeg_audio_crc[] = {
+ "No CRC",
+ "16-bit CRC",
+ NULL
+ };
+ static const char * const mpeg_audio_dec_playback[] = {
+ "Auto",
+ "Stereo",
+ "Left",
+ "Right",
+ "Mono",
+ "Swapped Stereo",
+ NULL
+ };
+ static const char * const mpeg_video_encoding[] = {
+ "MPEG-1",
+ "MPEG-2",
+ "MPEG-4 AVC",
+ NULL
+ };
+ static const char * const mpeg_video_aspect[] = {
+ "1x1",
+ "4x3",
+ "16x9",
+ "2.21x1",
+ NULL
+ };
+ static const char * const mpeg_video_bitrate_mode[] = {
+ "Variable Bitrate",
+ "Constant Bitrate",
+ "Constant Quality",
+ NULL
+ };
+ static const char * const mpeg_stream_type[] = {
+ "MPEG-2 Program Stream",
+ "MPEG-2 Transport Stream",
+ "MPEG-1 System Stream",
+ "MPEG-2 DVD-compatible Stream",
+ "MPEG-1 VCD-compatible Stream",
+ "MPEG-2 SVCD-compatible Stream",
+ NULL
+ };
+ static const char * const mpeg_stream_vbi_fmt[] = {
+ "No VBI",
+ "Private Packet, IVTV Format",
+ NULL
+ };
+ static const char * const camera_power_line_frequency[] = {
+ "Disabled",
+ "50 Hz",
+ "60 Hz",
+ "Auto",
+ NULL
+ };
+ static const char * const camera_exposure_auto[] = {
+ "Auto Mode",
+ "Manual Mode",
+ "Shutter Priority Mode",
+ "Aperture Priority Mode",
+ NULL
+ };
+ static const char * const camera_exposure_metering[] = {
+ "Average",
+ "Center Weighted",
+ "Spot",
+ "Matrix",
+ NULL
+ };
+ static const char * const camera_auto_focus_range[] = {
+ "Auto",
+ "Normal",
+ "Macro",
+ "Infinity",
+ NULL
+ };
+ static const char * const colorfx[] = {
+ "None",
+ "Black & White",
+ "Sepia",
+ "Negative",
+ "Emboss",
+ "Sketch",
+ "Sky Blue",
+ "Grass Green",
+ "Skin Whiten",
+ "Vivid",
+ "Aqua",
+ "Art Freeze",
+ "Silhouette",
+ "Solarization",
+ "Antique",
+ "Set Cb/Cr",
+ NULL
+ };
+ static const char * const auto_n_preset_white_balance[] = {
+ "Manual",
+ "Auto",
+ "Incandescent",
+ "Fluorescent",
+ "Fluorescent H",
+ "Horizon",
+ "Daylight",
+ "Flash",
+ "Cloudy",
+ "Shade",
+ NULL,
+ };
+ static const char * const camera_iso_sensitivity_auto[] = {
+ "Manual",
+ "Auto",
+ NULL
+ };
+ static const char * const scene_mode[] = {
+ "None",
+ "Backlight",
+ "Beach/Snow",
+ "Candle Light",
+ "Dusk/Dawn",
+ "Fall Colors",
+ "Fireworks",
+ "Landscape",
+ "Night",
+ "Party/Indoor",
+ "Portrait",
+ "Sports",
+ "Sunset",
+ "Text",
+ NULL
+ };
+ static const char * const tune_emphasis[] = {
+ "None",
+ "50 Microseconds",
+ "75 Microseconds",
+ NULL,
+ };
+ static const char * const header_mode[] = {
+ "Separate Buffer",
+ "Joined With 1st Frame",
+ NULL,
+ };
+ static const char * const multi_slice[] = {
+ "Single",
+ "Max Macroblocks",
+ "Max Bytes",
+ NULL,
+ };
+ static const char * const entropy_mode[] = {
+ "CAVLC",
+ "CABAC",
+ NULL,
+ };
+ static const char * const mpeg_h264_level[] = {
+ "1",
+ "1b",
+ "1.1",
+ "1.2",
+ "1.3",
+ "2",
+ "2.1",
+ "2.2",
+ "3",
+ "3.1",
+ "3.2",
+ "4",
+ "4.1",
+ "4.2",
+ "5",
+ "5.1",
+ "5.2",
+ "6.0",
+ "6.1",
+ "6.2",
+ NULL,
+ };
+ static const char * const h264_loop_filter[] = {
+ "Enabled",
+ "Disabled",
+ "Disabled at Slice Boundary",
+ NULL,
+ };
+ static const char * const h264_profile[] = {
+ "Baseline",
+ "Constrained Baseline",
+ "Main",
+ "Extended",
+ "High",
+ "High 10",
+ "High 422",
+ "High 444 Predictive",
+ "High 10 Intra",
+ "High 422 Intra",
+ "High 444 Intra",
+ "CAVLC 444 Intra",
+ "Scalable Baseline",
+ "Scalable High",
+ "Scalable High Intra",
+ "Stereo High",
+ "Multiview High",
+ "Constrained High",
+ NULL,
+ };
+ static const char * const vui_sar_idc[] = {
+ "Unspecified",
+ "1:1",
+ "12:11",
+ "10:11",
+ "16:11",
+ "40:33",
+ "24:11",
+ "20:11",
+ "32:11",
+ "80:33",
+ "18:11",
+ "15:11",
+ "64:33",
+ "160:99",
+ "4:3",
+ "3:2",
+ "2:1",
+ "Extended SAR",
+ NULL,
+ };
+ static const char * const h264_fp_arrangement_type[] = {
+ "Checkerboard",
+ "Column",
+ "Row",
+ "Side by Side",
+ "Top Bottom",
+ "Temporal",
+ NULL,
+ };
+ static const char * const h264_fmo_map_type[] = {
+ "Interleaved Slices",
+ "Scattered Slices",
+ "Foreground with Leftover",
+ "Box Out",
+ "Raster Scan",
+ "Wipe Scan",
+ "Explicit",
+ NULL,
+ };
+ static const char * const h264_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const h264_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
+ static const char * const h264_hierarchical_coding_type[] = {
+ "Hier Coding B",
+ "Hier Coding P",
+ NULL,
+ };
+ static const char * const mpeg_mpeg2_level[] = {
+ "Low",
+ "Main",
+ "High 1440",
+ "High",
+ NULL,
+ };
+ static const char * const mpeg2_profile[] = {
+ "Simple",
+ "Main",
+ "SNR Scalable",
+ "Spatially Scalable",
+ "High",
+ NULL,
+ };
+ static const char * const mpeg_mpeg4_level[] = {
+ "0",
+ "0b",
+ "1",
+ "2",
+ "3",
+ "3b",
+ "4",
+ "5",
+ NULL,
+ };
+ static const char * const mpeg4_profile[] = {
+ "Simple",
+ "Advanced Simple",
+ "Core",
+ "Simple Scalable",
+ "Advanced Coding Efficiency",
+ NULL,
+ };
+
+ static const char * const vpx_golden_frame_sel[] = {
+ "Use Previous Frame",
+ "Use Previous Specific Frame",
+ NULL,
+ };
+ static const char * const vp8_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
+ static const char * const vp9_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
+ static const char * const vp9_level[] = {
+ "1",
+ "1.1",
+ "2",
+ "2.1",
+ "3",
+ "3.1",
+ "4",
+ "4.1",
+ "5",
+ "5.1",
+ "5.2",
+ "6",
+ "6.1",
+ "6.2",
+ NULL,
+ };
+
+ static const char * const flash_led_mode[] = {
+ "Off",
+ "Flash",
+ "Torch",
+ NULL,
+ };
+ static const char * const flash_strobe_source[] = {
+ "Software",
+ "External",
+ NULL,
+ };
+
+ static const char * const jpeg_chroma_subsampling[] = {
+ "4:4:4",
+ "4:2:2",
+ "4:2:0",
+ "4:1:1",
+ "4:1:0",
+ "Gray",
+ NULL,
+ };
+ static const char * const dv_tx_mode[] = {
+ "DVI-D",
+ "HDMI",
+ NULL,
+ };
+ static const char * const dv_rgb_range[] = {
+ "Automatic",
+ "RGB Limited Range (16-235)",
+ "RGB Full Range (0-255)",
+ NULL,
+ };
+ static const char * const dv_it_content_type[] = {
+ "Graphics",
+ "Photo",
+ "Cinema",
+ "Game",
+ "No IT Content",
+ NULL,
+ };
+ static const char * const detect_md_mode[] = {
+ "Disabled",
+ "Global",
+ "Threshold Grid",
+ "Region Grid",
+ NULL,
+ };
+
+ static const char * const av1_profile[] = {
+ "Main",
+ "High",
+ "Professional",
+ NULL,
+ };
+ static const char * const av1_level[] = {
+ "2.0",
+ "2.1",
+ "2.2",
+ "2.3",
+ "3.0",
+ "3.1",
+ "3.2",
+ "3.3",
+ "4.0",
+ "4.1",
+ "4.2",
+ "4.3",
+ "5.0",
+ "5.1",
+ "5.2",
+ "5.3",
+ "6.0",
+ "6.1",
+ "6.2",
+ "6.3",
+ "7.0",
+ "7.1",
+ "7.2",
+ "7.3",
+ NULL,
+ };
+
+ static const char * const hevc_profile[] = {
+ "Main",
+ "Main Still Picture",
+ "Main 10",
+ NULL,
+ };
+ static const char * const hevc_level[] = {
+ "1",
+ "2",
+ "2.1",
+ "3",
+ "3.1",
+ "4",
+ "4.1",
+ "5",
+ "5.1",
+ "5.2",
+ "6",
+ "6.1",
+ "6.2",
+ NULL,
+ };
+ static const char * const hevc_hierarchial_coding_type[] = {
+ "B",
+ "P",
+ NULL,
+ };
+ static const char * const hevc_refresh_type[] = {
+ "None",
+ "CRA",
+ "IDR",
+ NULL,
+ };
+ static const char * const hevc_size_of_length_field[] = {
+ "0",
+ "1",
+ "2",
+ "4",
+ NULL,
+ };
+ static const char * const hevc_tier[] = {
+ "Main",
+ "High",
+ NULL,
+ };
+ static const char * const hevc_loop_filter_mode[] = {
+ "Disabled",
+ "Enabled",
+ "Disabled at slice boundary",
+ "NULL",
+ };
+ static const char * const hevc_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const hevc_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
+ static const char * const camera_orientation[] = {
+ "Front",
+ "Back",
+ "External",
+ NULL,
+ };
+ static const char * const mpeg_video_frame_skip[] = {
+ "Disabled",
+ "Level Limit",
+ "VBV/CPB Limit",
+ NULL,
+ };
+ static const char * const intra_refresh_period_type[] = {
+ "Random",
+ "Cyclic",
+ NULL,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ return mpeg_audio_sampling_freq;
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ return mpeg_audio_encoding;
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ return mpeg_audio_l1_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ return mpeg_audio_l2_bitrate;
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ return mpeg_audio_l3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ return mpeg_audio_ac3_bitrate;
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ return mpeg_audio_mode;
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ return mpeg_audio_mode_extension;
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ return mpeg_audio_emphasis;
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ return mpeg_audio_crc;
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
+ return mpeg_audio_dec_playback;
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ return mpeg_video_encoding;
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ return mpeg_video_aspect;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ return mpeg_video_bitrate_mode;
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ return mpeg_stream_type;
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ return mpeg_stream_vbi_fmt;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ return camera_power_line_frequency;
+ case V4L2_CID_EXPOSURE_AUTO:
+ return camera_exposure_auto;
+ case V4L2_CID_EXPOSURE_METERING:
+ return camera_exposure_metering;
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ return camera_auto_focus_range;
+ case V4L2_CID_COLORFX:
+ return colorfx;
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ return auto_n_preset_white_balance;
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ return camera_iso_sensitivity_auto;
+ case V4L2_CID_SCENE_MODE:
+ return scene_mode;
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ return tune_emphasis;
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ return tune_emphasis;
+ case V4L2_CID_FLASH_LED_MODE:
+ return flash_led_mode;
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ return flash_strobe_source;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ return header_mode;
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ return mpeg_video_frame_skip;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ return multi_slice;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ return entropy_mode;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ return mpeg_h264_level;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ return h264_loop_filter;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ return h264_profile;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ return vui_sar_idc;
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ return h264_fp_arrangement_type;
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ return h264_fmo_map_type;
+ case V4L2_CID_STATELESS_H264_DECODE_MODE:
+ return h264_decode_mode;
+ case V4L2_CID_STATELESS_H264_START_CODE:
+ return h264_start_code;
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
+ return h264_hierarchical_coding_type;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
+ return mpeg_mpeg2_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
+ return mpeg2_profile;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ return mpeg_mpeg4_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ return mpeg4_profile;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ return vpx_golden_frame_sel;
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ return vp8_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ return vp9_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ return vp9_level;
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ return jpeg_chroma_subsampling;
+ case V4L2_CID_DV_TX_MODE:
+ return dv_tx_mode;
+ case V4L2_CID_DV_TX_RGB_RANGE:
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ return dv_rgb_range;
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ return dv_it_content_type;
+ case V4L2_CID_DETECT_MD_MODE:
+ return detect_md_mode;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ return hevc_profile;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ return hevc_level;
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
+ return hevc_hierarchial_coding_type;
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
+ return hevc_refresh_type;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
+ return hevc_size_of_length_field;
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
+ return hevc_tier;
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ return hevc_loop_filter_mode;
+ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
+ return av1_profile;
+ case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
+ return av1_level;
+ case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
+ return hevc_decode_mode;
+ case V4L2_CID_STATELESS_HEVC_START_CODE:
+ return hevc_start_code;
+ case V4L2_CID_CAMERA_ORIENTATION:
+ return camera_orientation;
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE:
+ return intra_refresh_period_type;
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_menu);
+
+#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); (arr); })
+/*
+ * Returns NULL or an s64 type array containing the menu for given
+ * control ID. The total number of the menu items is returned in @len.
+ */
+const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
+{
+ static const s64 qmenu_int_vpx_num_partitions[] = {
+ 1, 2, 4, 8,
+ };
+
+ static const s64 qmenu_int_vpx_num_ref_frames[] = {
+ 1, 2, 3,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len);
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len);
+ default:
+ *len = 0;
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
+
+/* Return the control name. */
+const char *v4l2_ctrl_get_name(u32 id)
+{
+ switch (id) {
+ /* USER controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_USER_CLASS: return "User Controls";
+ case V4L2_CID_BRIGHTNESS: return "Brightness";
+ case V4L2_CID_CONTRAST: return "Contrast";
+ case V4L2_CID_SATURATION: return "Saturation";
+ case V4L2_CID_HUE: return "Hue";
+ case V4L2_CID_AUDIO_VOLUME: return "Volume";
+ case V4L2_CID_AUDIO_BALANCE: return "Balance";
+ case V4L2_CID_AUDIO_BASS: return "Bass";
+ case V4L2_CID_AUDIO_TREBLE: return "Treble";
+ case V4L2_CID_AUDIO_MUTE: return "Mute";
+ case V4L2_CID_AUDIO_LOUDNESS: return "Loudness";
+ case V4L2_CID_BLACK_LEVEL: return "Black Level";
+ case V4L2_CID_AUTO_WHITE_BALANCE: return "White Balance, Automatic";
+ case V4L2_CID_DO_WHITE_BALANCE: return "Do White Balance";
+ case V4L2_CID_RED_BALANCE: return "Red Balance";
+ case V4L2_CID_BLUE_BALANCE: return "Blue Balance";
+ case V4L2_CID_GAMMA: return "Gamma";
+ case V4L2_CID_EXPOSURE: return "Exposure";
+ case V4L2_CID_AUTOGAIN: return "Gain, Automatic";
+ case V4L2_CID_GAIN: return "Gain";
+ case V4L2_CID_HFLIP: return "Horizontal Flip";
+ case V4L2_CID_VFLIP: return "Vertical Flip";
+ case V4L2_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency";
+ case V4L2_CID_HUE_AUTO: return "Hue, Automatic";
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE: return "White Balance Temperature";
+ case V4L2_CID_SHARPNESS: return "Sharpness";
+ case V4L2_CID_BACKLIGHT_COMPENSATION: return "Backlight Compensation";
+ case V4L2_CID_CHROMA_AGC: return "Chroma AGC";
+ case V4L2_CID_COLOR_KILLER: return "Color Killer";
+ case V4L2_CID_COLORFX: return "Color Effects";
+ case V4L2_CID_AUTOBRIGHTNESS: return "Brightness, Automatic";
+ case V4L2_CID_BAND_STOP_FILTER: return "Band-Stop Filter";
+ case V4L2_CID_ROTATE: return "Rotate";
+ case V4L2_CID_BG_COLOR: return "Background Color";
+ case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
+ case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
+ case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Min Number of Capture Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Min Number of Output Buffers";
+ case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
+ case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
+ case V4L2_CID_COLORFX_RGB: return "Color Effects, RGB";
+
+ /*
+ * Codec controls
+ *
+ * The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical.
+ *
+ * Keep the order of the 'case's the same as in videodev2.h!
+ */
+ case V4L2_CID_CODEC_CLASS: return "Codec Controls";
+ case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
+ case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_VIDEO: return "Stream Video Program ID";
+ case V4L2_CID_MPEG_STREAM_PID_PCR: return "Stream PCR Program ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_AUDIO: return "Stream PES Audio ID";
+ case V4L2_CID_MPEG_STREAM_PES_ID_VIDEO: return "Stream PES Video ID";
+ case V4L2_CID_MPEG_STREAM_VBI_FMT: return "Stream VBI Format";
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return "Audio Sampling Frequency";
+ case V4L2_CID_MPEG_AUDIO_ENCODING: return "Audio Encoding";
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE: return "Audio Layer I Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE: return "Audio Layer II Bitrate";
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return "Audio Layer III Bitrate";
+ case V4L2_CID_MPEG_AUDIO_MODE: return "Audio Stereo Mode";
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: return "Audio Stereo Mode Extension";
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS: return "Audio Emphasis";
+ case V4L2_CID_MPEG_AUDIO_CRC: return "Audio CRC";
+ case V4L2_CID_MPEG_AUDIO_MUTE: return "Audio Mute";
+ case V4L2_CID_MPEG_AUDIO_AAC_BITRATE: return "Audio AAC Bitrate";
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: return "Audio AC-3 Bitrate";
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK: return "Audio Playback";
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK: return "Audio Multilingual Playback";
+ case V4L2_CID_MPEG_VIDEO_ENCODING: return "Video Encoding";
+ case V4L2_CID_MPEG_VIDEO_ASPECT: return "Video Aspect";
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES: return "Video B Frames";
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return "Video GOP Size";
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return "Video GOP Closure";
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN: return "Video Pulldown";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: return "Video Bitrate Mode";
+ case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY: return "Constant Quality";
+ case V4L2_CID_MPEG_VIDEO_BITRATE: return "Video Bitrate";
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: return "Video Peak Bitrate";
+ case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
+ case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
+ case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "Number of Intra Refresh MBs";
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE: return "Intra Refresh Period Type";
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD: return "Intra Refresh Period";
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
+ case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "Max Number of Reference Pics";
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE: return "Frame Skip Mode";
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY: return "Display Delay";
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE: return "Display Delay Enable";
+ case V4L2_CID_MPEG_VIDEO_AU_DELIMITER: return "Generate Access Unit Delimiters";
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entropy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I-Frame Period";
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING: return "H264 Enable Frame Packing SEI";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0: return "H264 Set Curr. Frame as Frame0";
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE: return "H264 FP Arrangement Type";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO: return "H264 Flexible MB Ordering";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE: return "H264 Map Type for FMO";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP: return "H264 FMO Number of Slice Groups";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION: return "H264 FMO Direction of Change";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE: return "H264 FMO Size of 1st Slice Grp";
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH: return "H264 FMO No. of Consecutive MBs";
+ case V4L2_CID_MPEG_VIDEO_H264_ASO: return "H264 Arbitrary Slice Ordering";
+ case V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER: return "H264 ASO Slice Order";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING: return "Enable H264 Hierarchical Coding";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE: return "H264 Hierarchical Coding Type";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers";
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP:
+ return "H264 Set QP Value for HC Layers";
+ case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
+ return "H264 Constrained Intra Pred";
+ case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP: return "H264 I-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL: return "MPEG2 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE: return "MPEG2 Profile";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "Maximum Bytes in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "Number of MBs in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "Slice Partitioning Method";
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_DEC_PTS: return "Video Decoder PTS";
+ case V4L2_CID_MPEG_VIDEO_DEC_FRAME: return "Video Decoder Frame Count";
+ case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR: return "Video Decoder Conceal Color";
+ case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE: return "Horizontal MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
+ case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID";
+ case V4L2_CID_MPEG_VIDEO_LTR_COUNT: return "LTR Count";
+ case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX: return "Frame LTR Index";
+ case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES: return "Use LTR Frames";
+ case V4L2_CID_FWHT_I_FRAME_QP: return "FWHT I-Frame QP Value";
+ case V4L2_CID_FWHT_P_FRAME_QP: return "FWHT P-Frame QP Value";
+
+ /* VPX controls */
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
+ case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable";
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: return "VPX Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL: return "VP9 Level";
+
+ /* HEVC controls */
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP: return "HEVC I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP: return "HEVC P-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level";
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier";
+ case V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION: return "HEVC Frame Rate Resolution";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH: return "HEVC Maximum Coding Unit Depth";
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE: return "HEVC Refresh Type";
+ case V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED: return "HEVC Constant Intra Prediction";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU: return "HEVC Lossless Encoding";
+ case V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT: return "HEVC Wavefront";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE: return "HEVC Loop Filter";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP: return "HEVC QP Values";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE: return "HEVC Hierarchical Coding Type";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER: return "HEVC Hierarchical Coding Layer";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP: return "HEVC Hierarchical Layer 0 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP: return "HEVC Hierarchical Layer 1 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP: return "HEVC Hierarchical Layer 2 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP: return "HEVC Hierarchical Layer 3 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP: return "HEVC Hierarchical Layer 4 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP: return "HEVC Hierarchical Layer 5 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP: return "HEVC Hierarchical Layer 6 QP";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR: return "HEVC Hierarchical Lay 0 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR: return "HEVC Hierarchical Lay 1 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR: return "HEVC Hierarchical Lay 2 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR: return "HEVC Hierarchical Lay 3 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR: return "HEVC Hierarchical Lay 4 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR: return "HEVC Hierarchical Lay 5 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR: return "HEVC Hierarchical Lay 6 BitRate";
+ case V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB: return "HEVC General PB";
+ case V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID: return "HEVC Temporal ID";
+ case V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING: return "HEVC Strong Intra Smoothing";
+ case V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT: return "HEVC Intra PU Split";
+ case V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION: return "HEVC TMV Prediction";
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1: return "HEVC Max Num of Candidate MVs";
+ case V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE: return "HEVC ENC Without Startcode";
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD: return "HEVC Num of I-Frame b/w 2 IDR";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2: return "HEVC Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2: return "HEVC Loop Filter TC Offset";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
+ case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
+ case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
+
+ /* AV1 controls */
+ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE: return "AV1 Profile";
+ case V4L2_CID_MPEG_VIDEO_AV1_LEVEL: return "AV1 Level";
+
+ /* CAMERA controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
+ case V4L2_CID_EXPOSURE_AUTO: return "Auto Exposure";
+ case V4L2_CID_EXPOSURE_ABSOLUTE: return "Exposure Time, Absolute";
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY: return "Exposure, Dynamic Framerate";
+ case V4L2_CID_PAN_RELATIVE: return "Pan, Relative";
+ case V4L2_CID_TILT_RELATIVE: return "Tilt, Relative";
+ case V4L2_CID_PAN_RESET: return "Pan, Reset";
+ case V4L2_CID_TILT_RESET: return "Tilt, Reset";
+ case V4L2_CID_PAN_ABSOLUTE: return "Pan, Absolute";
+ case V4L2_CID_TILT_ABSOLUTE: return "Tilt, Absolute";
+ case V4L2_CID_FOCUS_ABSOLUTE: return "Focus, Absolute";
+ case V4L2_CID_FOCUS_RELATIVE: return "Focus, Relative";
+ case V4L2_CID_FOCUS_AUTO: return "Focus, Automatic Continuous";
+ case V4L2_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute";
+ case V4L2_CID_ZOOM_RELATIVE: return "Zoom, Relative";
+ case V4L2_CID_ZOOM_CONTINUOUS: return "Zoom, Continuous";
+ case V4L2_CID_PRIVACY: return "Privacy";
+ case V4L2_CID_IRIS_ABSOLUTE: return "Iris, Absolute";
+ case V4L2_CID_IRIS_RELATIVE: return "Iris, Relative";
+ case V4L2_CID_AUTO_EXPOSURE_BIAS: return "Auto Exposure, Bias";
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: return "White Balance, Auto & Preset";
+ case V4L2_CID_WIDE_DYNAMIC_RANGE: return "Wide Dynamic Range";
+ case V4L2_CID_IMAGE_STABILIZATION: return "Image Stabilization";
+ case V4L2_CID_ISO_SENSITIVITY: return "ISO Sensitivity";
+ case V4L2_CID_ISO_SENSITIVITY_AUTO: return "ISO Sensitivity, Auto";
+ case V4L2_CID_EXPOSURE_METERING: return "Exposure, Metering Mode";
+ case V4L2_CID_SCENE_MODE: return "Scene Mode";
+ case V4L2_CID_3A_LOCK: return "3A Lock";
+ case V4L2_CID_AUTO_FOCUS_START: return "Auto Focus, Start";
+ case V4L2_CID_AUTO_FOCUS_STOP: return "Auto Focus, Stop";
+ case V4L2_CID_AUTO_FOCUS_STATUS: return "Auto Focus, Status";
+ case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
+ case V4L2_CID_PAN_SPEED: return "Pan, Speed";
+ case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
+ case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
+ case V4L2_CID_CAMERA_ORIENTATION: return "Camera Orientation";
+ case V4L2_CID_CAMERA_SENSOR_ROTATION: return "Camera Sensor Rotation";
+ case V4L2_CID_HDR_SENSOR_MODE: return "HDR Sensor Mode";
+
+ /* FM Radio Modulator controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_FM_TX_CLASS: return "FM Radio Modulator Controls";
+ case V4L2_CID_RDS_TX_DEVIATION: return "RDS Signal Deviation";
+ case V4L2_CID_RDS_TX_PI: return "RDS Program ID";
+ case V4L2_CID_RDS_TX_PTY: return "RDS Program Type";
+ case V4L2_CID_RDS_TX_PS_NAME: return "RDS PS Name";
+ case V4L2_CID_RDS_TX_RADIO_TEXT: return "RDS Radio Text";
+ case V4L2_CID_RDS_TX_MONO_STEREO: return "RDS Stereo";
+ case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD: return "RDS Artificial Head";
+ case V4L2_CID_RDS_TX_COMPRESSED: return "RDS Compressed";
+ case V4L2_CID_RDS_TX_DYNAMIC_PTY: return "RDS Dynamic PTY";
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH: return "RDS Music";
+ case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE: return "RDS Enable Alt Frequencies";
+ case V4L2_CID_RDS_TX_ALT_FREQS: return "RDS Alternate Frequencies";
+ case V4L2_CID_AUDIO_LIMITER_ENABLED: return "Audio Limiter Feature Enabled";
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION: return "Audio Limiter Deviation";
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN: return "Audio Compression Gain";
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME: return "Audio Compression Release Time";
+ case V4L2_CID_PILOT_TONE_ENABLED: return "Pilot Tone Feature Enabled";
+ case V4L2_CID_PILOT_TONE_DEVIATION: return "Pilot Tone Deviation";
+ case V4L2_CID_PILOT_TONE_FREQUENCY: return "Pilot Tone Frequency";
+ case V4L2_CID_TUNE_PREEMPHASIS: return "Pre-Emphasis";
+ case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
+
+ /* Flash controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_FLASH_CLASS: return "Flash Controls";
+ case V4L2_CID_FLASH_LED_MODE: return "LED Mode";
+ case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe Source";
+ case V4L2_CID_FLASH_STROBE: return "Strobe";
+ case V4L2_CID_FLASH_STROBE_STOP: return "Stop Strobe";
+ case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe Status";
+ case V4L2_CID_FLASH_TIMEOUT: return "Strobe Timeout";
+ case V4L2_CID_FLASH_INTENSITY: return "Intensity, Flash Mode";
+ case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, Torch Mode";
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
+ case V4L2_CID_FLASH_FAULT: return "Faults";
+ case V4L2_CID_FLASH_CHARGE: return "Charge";
+ case V4L2_CID_FLASH_READY: return "Ready to Strobe";
+
+ /* JPEG encoder controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_JPEG_CLASS: return "JPEG Compression Controls";
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: return "Chroma Subsampling";
+ case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval";
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
+ case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
+
+ /* Image source controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_IMAGE_SOURCE_CLASS: return "Image Source Controls";
+ case V4L2_CID_VBLANK: return "Vertical Blanking";
+ case V4L2_CID_HBLANK: return "Horizontal Blanking";
+ case V4L2_CID_ANALOGUE_GAIN: return "Analogue Gain";
+ case V4L2_CID_TEST_PATTERN_RED: return "Red Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
+ case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
+ case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
+ case V4L2_CID_NOTIFY_GAINS: return "Notify Gains";
+
+ /* Image processing controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_IMAGE_PROC_CLASS: return "Image Processing Controls";
+ case V4L2_CID_LINK_FREQ: return "Link Frequency";
+ case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
+ case V4L2_CID_TEST_PATTERN: return "Test Pattern";
+ case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode";
+ case V4L2_CID_DIGITAL_GAIN: return "Digital Gain";
+
+ /* DV controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_DV_CLASS: return "Digital Video Controls";
+ case V4L2_CID_DV_TX_HOTPLUG: return "Hotplug Present";
+ case V4L2_CID_DV_TX_RXSENSE: return "RxSense Present";
+ case V4L2_CID_DV_TX_EDID_PRESENT: return "EDID Present";
+ case V4L2_CID_DV_TX_MODE: return "Transmit Mode";
+ case V4L2_CID_DV_TX_RGB_RANGE: return "Tx RGB Quantization Range";
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE: return "Tx IT Content Type";
+ case V4L2_CID_DV_RX_POWER_PRESENT: return "Power Present";
+ case V4L2_CID_DV_RX_RGB_RANGE: return "Rx RGB Quantization Range";
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE: return "Rx IT Content Type";
+
+ case V4L2_CID_FM_RX_CLASS: return "FM Radio Receiver Controls";
+ case V4L2_CID_TUNE_DEEMPHASIS: return "De-Emphasis";
+ case V4L2_CID_RDS_RECEPTION: return "RDS Reception";
+ case V4L2_CID_RF_TUNER_CLASS: return "RF Tuner Controls";
+ case V4L2_CID_RF_TUNER_RF_GAIN: return "RF Gain";
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: return "LNA Gain, Auto";
+ case V4L2_CID_RF_TUNER_LNA_GAIN: return "LNA Gain";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: return "Mixer Gain, Auto";
+ case V4L2_CID_RF_TUNER_MIXER_GAIN: return "Mixer Gain";
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO: return "IF Gain, Auto";
+ case V4L2_CID_RF_TUNER_IF_GAIN: return "IF Gain";
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: return "Bandwidth, Auto";
+ case V4L2_CID_RF_TUNER_BANDWIDTH: return "Bandwidth";
+ case V4L2_CID_RF_TUNER_PLL_LOCK: return "PLL Lock";
+ case V4L2_CID_RDS_RX_PTY: return "RDS Program Type";
+ case V4L2_CID_RDS_RX_PS_NAME: return "RDS PS Name";
+ case V4L2_CID_RDS_RX_RADIO_TEXT: return "RDS Radio Text";
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT: return "RDS Traffic Announcement";
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM: return "RDS Traffic Program";
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH: return "RDS Music";
+
+ /* Detection controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_DETECT_CLASS: return "Detection Controls";
+ case V4L2_CID_DETECT_MD_MODE: return "Motion Detection Mode";
+ case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD: return "MD Global Threshold";
+ case V4L2_CID_DETECT_MD_THRESHOLD_GRID: return "MD Threshold Grid";
+ case V4L2_CID_DETECT_MD_REGION_GRID: return "MD Region Grid";
+
+ /* Stateless Codec controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_CODEC_STATELESS_CLASS: return "Stateless Codec Controls";
+ case V4L2_CID_STATELESS_H264_DECODE_MODE: return "H264 Decode Mode";
+ case V4L2_CID_STATELESS_H264_START_CODE: return "H264 Start Code";
+ case V4L2_CID_STATELESS_H264_SPS: return "H264 Sequence Parameter Set";
+ case V4L2_CID_STATELESS_H264_PPS: return "H264 Picture Parameter Set";
+ case V4L2_CID_STATELESS_H264_SCALING_MATRIX: return "H264 Scaling Matrix";
+ case V4L2_CID_STATELESS_H264_PRED_WEIGHTS: return "H264 Prediction Weight Table";
+ case V4L2_CID_STATELESS_H264_SLICE_PARAMS: return "H264 Slice Parameters";
+ case V4L2_CID_STATELESS_H264_DECODE_PARAMS: return "H264 Decode Parameters";
+ case V4L2_CID_STATELESS_FWHT_PARAMS: return "FWHT Stateless Parameters";
+ case V4L2_CID_STATELESS_VP8_FRAME: return "VP8 Frame Parameters";
+ case V4L2_CID_STATELESS_MPEG2_SEQUENCE: return "MPEG-2 Sequence Header";
+ case V4L2_CID_STATELESS_MPEG2_PICTURE: return "MPEG-2 Picture Header";
+ case V4L2_CID_STATELESS_MPEG2_QUANTISATION: return "MPEG-2 Quantisation Matrices";
+ case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR: return "VP9 Probabilities Updates";
+ case V4L2_CID_STATELESS_VP9_FRAME: return "VP9 Frame Decode Parameters";
+ case V4L2_CID_STATELESS_HEVC_SPS: return "HEVC Sequence Parameter Set";
+ case V4L2_CID_STATELESS_HEVC_PPS: return "HEVC Picture Parameter Set";
+ case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix";
+ case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters";
+ case V4L2_CID_STATELESS_HEVC_DECODE_MODE: return "HEVC Decode Mode";
+ case V4L2_CID_STATELESS_HEVC_START_CODE: return "HEVC Start Code";
+ case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS: return "HEVC Entry Point Offsets";
+ case V4L2_CID_STATELESS_AV1_SEQUENCE: return "AV1 Sequence Parameters";
+ case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY: return "AV1 Tile Group Entry";
+ case V4L2_CID_STATELESS_AV1_FRAME: return "AV1 Frame Parameters";
+ case V4L2_CID_STATELESS_AV1_FILM_GRAIN: return "AV1 Film Grain";
+
+ /* Colorimetry controls */
+ /* Keep the order of the 'case's the same as in v4l2-controls.h! */
+ case V4L2_CID_COLORIMETRY_CLASS: return "Colorimetry Controls";
+ case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO: return "HDR10 Content Light Info";
+ case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY: return "HDR10 Mastering Display";
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_name);
+
+void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
+ s64 *min, s64 *max, u64 *step, s64 *def, u32 *flags)
+{
+ *name = v4l2_ctrl_get_name(id);
+ *flags = 0;
+
+ switch (id) {
+ case V4L2_CID_AUDIO_MUTE:
+ case V4L2_CID_AUDIO_LOUDNESS:
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ case V4L2_CID_AUTOGAIN:
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ case V4L2_CID_HUE_AUTO:
+ case V4L2_CID_CHROMA_AGC:
+ case V4L2_CID_COLOR_KILLER:
+ case V4L2_CID_AUTOBRIGHTNESS:
+ case V4L2_CID_MPEG_AUDIO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_MUTE:
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+ case V4L2_CID_MPEG_VIDEO_PULLDOWN:
+ case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
+ case V4L2_CID_FOCUS_AUTO:
+ case V4L2_CID_PRIVACY:
+ case V4L2_CID_AUDIO_LIMITER_ENABLED:
+ case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
+ case V4L2_CID_PILOT_TONE_ENABLED:
+ case V4L2_CID_ILLUMINATORS_1:
+ case V4L2_CID_ILLUMINATORS_2:
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_FLASH_CHARGE:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER:
+ case V4L2_CID_MPEG_VIDEO_AU_DELIMITER:
+ case V4L2_CID_WIDE_DYNAMIC_RANGE:
+ case V4L2_CID_IMAGE_STABILIZATION:
+ case V4L2_CID_RDS_RECEPTION:
+ case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_IF_GAIN_AUTO:
+ case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ case V4L2_CID_RDS_TX_MONO_STEREO:
+ case V4L2_CID_RDS_TX_ARTIFICIAL_HEAD:
+ case V4L2_CID_RDS_TX_COMPRESSED:
+ case V4L2_CID_RDS_TX_DYNAMIC_PTY:
+ case V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_TX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_TX_MUSIC_SPEECH:
+ case V4L2_CID_RDS_TX_ALT_FREQS_ENABLE:
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH:
+ *type = V4L2_CTRL_TYPE_BOOLEAN;
+ *min = 0;
+ *max = *step = 1;
+ break;
+ case V4L2_CID_ROTATE:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE:
+ case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE:
+ case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_LTR_COUNT:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
+ *type = V4L2_CTRL_TYPE_BITMASK;
+ *flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ case V4L2_CID_PAN_RESET:
+ case V4L2_CID_TILT_RESET:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_STROBE_STOP:
+ case V4L2_CID_AUTO_FOCUS_START:
+ case V4L2_CID_AUTO_FOCUS_STOP:
+ case V4L2_CID_DO_WHITE_BALANCE:
+ *type = V4L2_CTRL_TYPE_BUTTON;
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
+ case V4L2_CID_MPEG_AUDIO_EMPHASIS:
+ case V4L2_CID_MPEG_AUDIO_CRC:
+ case V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK:
+ case V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK:
+ case V4L2_CID_MPEG_VIDEO_ENCODING:
+ case V4L2_CID_MPEG_VIDEO_ASPECT:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ case V4L2_CID_MPEG_STREAM_VBI_FMT:
+ case V4L2_CID_EXPOSURE_AUTO:
+ case V4L2_CID_AUTO_FOCUS_RANGE:
+ case V4L2_CID_COLORFX:
+ case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
+ case V4L2_CID_TUNE_PREEMPHASIS:
+ case V4L2_CID_FLASH_LED_MODE:
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ case V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE:
+ case V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE:
+ case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
+ case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ case V4L2_CID_ISO_SENSITIVITY_AUTO:
+ case V4L2_CID_EXPOSURE_METERING:
+ case V4L2_CID_SCENE_MODE:
+ case V4L2_CID_DV_TX_MODE:
+ case V4L2_CID_DV_TX_RGB_RANGE:
+ case V4L2_CID_DV_TX_IT_CONTENT_TYPE:
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ case V4L2_CID_TEST_PATTERN:
+ case V4L2_CID_DEINTERLACING_MODE:
+ case V4L2_CID_TUNE_DEEMPHASIS:
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
+ case V4L2_CID_DETECT_MD_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
+ case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
+ case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
+ case V4L2_CID_STATELESS_HEVC_DECODE_MODE:
+ case V4L2_CID_STATELESS_HEVC_START_CODE:
+ case V4L2_CID_STATELESS_H264_DECODE_MODE:
+ case V4L2_CID_STATELESS_H264_START_CODE:
+ case V4L2_CID_CAMERA_ORIENTATION:
+ case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE:
+ case V4L2_CID_HDR_SENSOR_MODE:
+ *type = V4L2_CTRL_TYPE_MENU;
+ break;
+ case V4L2_CID_LINK_FREQ:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
+ case V4L2_CID_RDS_TX_PS_NAME:
+ case V4L2_CID_RDS_TX_RADIO_TEXT:
+ case V4L2_CID_RDS_RX_PS_NAME:
+ case V4L2_CID_RDS_RX_RADIO_TEXT:
+ *type = V4L2_CTRL_TYPE_STRING;
+ break;
+ case V4L2_CID_ISO_SENSITIVITY:
+ case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ *type = V4L2_CTRL_TYPE_INTEGER_MENU;
+ break;
+ case V4L2_CID_USER_CLASS:
+ case V4L2_CID_CAMERA_CLASS:
+ case V4L2_CID_CODEC_CLASS:
+ case V4L2_CID_FM_TX_CLASS:
+ case V4L2_CID_FLASH_CLASS:
+ case V4L2_CID_JPEG_CLASS:
+ case V4L2_CID_IMAGE_SOURCE_CLASS:
+ case V4L2_CID_IMAGE_PROC_CLASS:
+ case V4L2_CID_DV_CLASS:
+ case V4L2_CID_FM_RX_CLASS:
+ case V4L2_CID_RF_TUNER_CLASS:
+ case V4L2_CID_DETECT_CLASS:
+ case V4L2_CID_CODEC_STATELESS_CLASS:
+ case V4L2_CID_COLORIMETRY_CLASS:
+ *type = V4L2_CTRL_TYPE_CTRL_CLASS;
+ /* You can neither read nor write these */
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
+ *min = *max = *step = *def = 0;
+ break;
+ case V4L2_CID_BG_COLOR:
+ case V4L2_CID_COLORFX_RGB:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *step = 1;
+ *min = 0;
+ /* Max is calculated as RGB888 that is 2^24 - 1 */
+ *max = 0xffffff;
+ break;
+ case V4L2_CID_COLORFX_CBCR:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *step = 1;
+ *min = 0;
+ *max = 0xffff;
+ break;
+ case V4L2_CID_FLASH_FAULT:
+ case V4L2_CID_JPEG_ACTIVE_MARKER:
+ case V4L2_CID_3A_LOCK:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
+ case V4L2_CID_DV_TX_HOTPLUG:
+ case V4L2_CID_DV_TX_RXSENSE:
+ case V4L2_CID_DV_TX_EDID_PRESENT:
+ case V4L2_CID_DV_RX_POWER_PRESENT:
+ *type = V4L2_CTRL_TYPE_BITMASK;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_PTS:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *def = 0;
+ *max = 0x1ffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_FRAME:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY;
+ *min = *def = 0;
+ *max = 0x7fffffffffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *min = 0;
+ /* default for 8 bit black, luma is 16, chroma is 128 */
+ *def = 0x8000800010LL;
+ *max = 0xffffffffffffLL;
+ *step = 1;
+ break;
+ case V4L2_CID_PIXEL_RATE:
+ *type = V4L2_CTRL_TYPE_INTEGER64;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_DETECT_MD_REGION_GRID:
+ *type = V4L2_CTRL_TYPE_U8;
+ break;
+ case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
+ *type = V4L2_CTRL_TYPE_U16;
+ break;
+ case V4L2_CID_RDS_TX_ALT_FREQS:
+ *type = V4L2_CTRL_TYPE_U32;
+ break;
+ case V4L2_CID_STATELESS_MPEG2_SEQUENCE:
+ *type = V4L2_CTRL_TYPE_MPEG2_SEQUENCE;
+ break;
+ case V4L2_CID_STATELESS_MPEG2_PICTURE:
+ *type = V4L2_CTRL_TYPE_MPEG2_PICTURE;
+ break;
+ case V4L2_CID_STATELESS_MPEG2_QUANTISATION:
+ *type = V4L2_CTRL_TYPE_MPEG2_QUANTISATION;
+ break;
+ case V4L2_CID_STATELESS_FWHT_PARAMS:
+ *type = V4L2_CTRL_TYPE_FWHT_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_H264_SPS:
+ *type = V4L2_CTRL_TYPE_H264_SPS;
+ break;
+ case V4L2_CID_STATELESS_H264_PPS:
+ *type = V4L2_CTRL_TYPE_H264_PPS;
+ break;
+ case V4L2_CID_STATELESS_H264_SCALING_MATRIX:
+ *type = V4L2_CTRL_TYPE_H264_SCALING_MATRIX;
+ break;
+ case V4L2_CID_STATELESS_H264_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_H264_SLICE_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_H264_DECODE_PARAMS:
+ *type = V4L2_CTRL_TYPE_H264_DECODE_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_H264_PRED_WEIGHTS:
+ *type = V4L2_CTRL_TYPE_H264_PRED_WEIGHTS;
+ break;
+ case V4L2_CID_STATELESS_VP8_FRAME:
+ *type = V4L2_CTRL_TYPE_VP8_FRAME;
+ break;
+ case V4L2_CID_STATELESS_HEVC_SPS:
+ *type = V4L2_CTRL_TYPE_HEVC_SPS;
+ break;
+ case V4L2_CID_STATELESS_HEVC_PPS:
+ *type = V4L2_CTRL_TYPE_HEVC_PPS;
+ break;
+ case V4L2_CID_STATELESS_HEVC_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
+ *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
+ break;
+ case V4L2_CID_STATELESS_HEVC_SCALING_MATRIX:
+ *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX;
+ break;
+ case V4L2_CID_STATELESS_HEVC_DECODE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS;
+ break;
+ case V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS:
+ *type = V4L2_CTRL_TYPE_U32;
+ *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
+ break;
+ case V4L2_CID_STATELESS_VP9_COMPRESSED_HDR:
+ *type = V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR;
+ break;
+ case V4L2_CID_STATELESS_VP9_FRAME:
+ *type = V4L2_CTRL_TYPE_VP9_FRAME;
+ break;
+ case V4L2_CID_STATELESS_AV1_SEQUENCE:
+ *type = V4L2_CTRL_TYPE_AV1_SEQUENCE;
+ break;
+ case V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY:
+ *type = V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY;
+ *flags |= V4L2_CTRL_FLAG_DYNAMIC_ARRAY;
+ break;
+ case V4L2_CID_STATELESS_AV1_FRAME:
+ *type = V4L2_CTRL_TYPE_AV1_FRAME;
+ break;
+ case V4L2_CID_STATELESS_AV1_FILM_GRAIN:
+ *type = V4L2_CTRL_TYPE_AV1_FILM_GRAIN;
+ break;
+ case V4L2_CID_UNIT_CELL_SIZE:
+ *type = V4L2_CTRL_TYPE_AREA;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO:
+ *type = V4L2_CTRL_TYPE_HDR10_CLL_INFO;
+ break;
+ case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY:
+ *type = V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY;
+ break;
+ default:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ break;
+ }
+ switch (id) {
+ case V4L2_CID_MPEG_AUDIO_ENCODING:
+ case V4L2_CID_MPEG_AUDIO_MODE:
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ case V4L2_CID_MPEG_STREAM_TYPE:
+ *flags |= V4L2_CTRL_FLAG_UPDATE;
+ break;
+ case V4L2_CID_AUDIO_VOLUME:
+ case V4L2_CID_AUDIO_BALANCE:
+ case V4L2_CID_AUDIO_BASS:
+ case V4L2_CID_AUDIO_TREBLE:
+ case V4L2_CID_BRIGHTNESS:
+ case V4L2_CID_CONTRAST:
+ case V4L2_CID_SATURATION:
+ case V4L2_CID_HUE:
+ case V4L2_CID_RED_BALANCE:
+ case V4L2_CID_BLUE_BALANCE:
+ case V4L2_CID_GAMMA:
+ case V4L2_CID_SHARPNESS:
+ case V4L2_CID_CHROMA_GAIN:
+ case V4L2_CID_RDS_TX_DEVIATION:
+ case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
+ case V4L2_CID_AUDIO_LIMITER_DEVIATION:
+ case V4L2_CID_AUDIO_COMPRESSION_GAIN:
+ case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
+ case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
+ case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
+ case V4L2_CID_PILOT_TONE_DEVIATION:
+ case V4L2_CID_PILOT_TONE_FREQUENCY:
+ case V4L2_CID_TUNE_POWER_LEVEL:
+ case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
+ case V4L2_CID_RF_TUNER_RF_GAIN:
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ case V4L2_CID_RF_TUNER_MIXER_GAIN:
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ case V4L2_CID_RF_TUNER_BANDWIDTH:
+ case V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD:
+ *flags |= V4L2_CTRL_FLAG_SLIDER;
+ break;
+ case V4L2_CID_PAN_RELATIVE:
+ case V4L2_CID_TILT_RELATIVE:
+ case V4L2_CID_FOCUS_RELATIVE:
+ case V4L2_CID_IRIS_RELATIVE:
+ case V4L2_CID_ZOOM_RELATIVE:
+ *flags |= V4L2_CTRL_FLAG_WRITE_ONLY |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ break;
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_AUTO_FOCUS_STATUS:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_DV_TX_HOTPLUG:
+ case V4L2_CID_DV_TX_RXSENSE:
+ case V4L2_CID_DV_TX_EDID_PRESENT:
+ case V4L2_CID_DV_RX_POWER_PRESENT:
+ case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
+ case V4L2_CID_RDS_RX_PTY:
+ case V4L2_CID_RDS_RX_PS_NAME:
+ case V4L2_CID_RDS_RX_RADIO_TEXT:
+ case V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT:
+ case V4L2_CID_RDS_RX_TRAFFIC_PROGRAM:
+ case V4L2_CID_RDS_RX_MUSIC_SPEECH:
+ case V4L2_CID_CAMERA_ORIENTATION:
+ case V4L2_CID_CAMERA_SENSOR_ROTATION:
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ *flags |= V4L2_CTRL_FLAG_VOLATILE;
+ break;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_fill);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-priv.h b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
new file mode 100644
index 0000000000..aba6176fab
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * V4L2 controls framework private header.
+ *
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#ifndef _V4L2_CTRLS_PRIV_H_
+#define _V4L2_CTRLS_PRIV_H_
+
+#define dprintk(vdev, fmt, arg...) do { \
+ if (!WARN_ON(!(vdev)) && ((vdev)->dev_debug & V4L2_DEV_DEBUG_CTRL)) \
+ printk(KERN_DEBUG pr_fmt("%s: %s: " fmt), \
+ __func__, video_device_node_name(vdev), ##arg); \
+} while (0)
+
+#define has_op(master, op) \
+ ((master)->ops && (master)->ops->op)
+#define call_op(master, op) \
+ (has_op(master, op) ? (master)->ops->op(master) : 0)
+
+static inline u32 node2id(struct list_head *node)
+{
+ return list_entry(node, struct v4l2_ctrl_ref, node)->ctrl->id;
+}
+
+/*
+ * Small helper function to determine if the autocluster is set to manual
+ * mode.
+ */
+static inline bool is_cur_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->cur.val == master->manual_mode_value;
+}
+
+/*
+ * Small helper function to determine if the autocluster will be set to manual
+ * mode.
+ */
+static inline bool is_new_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->val == master->manual_mode_value;
+}
+
+static inline u32 user_flags(const struct v4l2_ctrl *ctrl)
+{
+ u32 flags = ctrl->flags;
+
+ if (ctrl->is_ptr)
+ flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+
+ return flags;
+}
+
+/* v4l2-ctrls-core.c */
+void cur_to_new(struct v4l2_ctrl *ctrl);
+void cur_to_req(struct v4l2_ctrl_ref *ref);
+void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags);
+void new_to_req(struct v4l2_ctrl_ref *ref);
+int req_to_new(struct v4l2_ctrl_ref *ref);
+void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl);
+void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes);
+int handler_new_ref(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl *ctrl,
+ struct v4l2_ctrl_ref **ctrl_ref,
+ bool from_other_dev, bool allocate_req);
+struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id);
+struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id);
+int check_range(enum v4l2_ctrl_type type,
+ s64 min, s64 max, u64 step, s64 def);
+void update_from_auto_cluster(struct v4l2_ctrl *master);
+int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master,
+ bool set, u32 ch_flags);
+
+/* v4l2-ctrls-api.c */
+int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev);
+int try_set_ext_ctrls_common(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs,
+ struct video_device *vdev, bool set);
+
+/* v4l2-ctrls-request.c */
+void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl);
+void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl);
+int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs);
+int try_set_ext_ctrls_request(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set);
+
+#endif
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-request.c b/drivers/media/v4l2-core/v4l2-ctrls-request.c
new file mode 100644
index 0000000000..c637049d7a
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ctrls-request.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * V4L2 controls framework Request API implementation.
+ *
+ * Copyright (C) 2018-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ */
+
+#define pr_fmt(fmt) "v4l2-ctrls: " fmt
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+
+#include "v4l2-ctrls-priv.h"
+
+/* Initialize the request-related fields in a control handler */
+void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl)
+{
+ INIT_LIST_HEAD(&hdl->requests);
+ INIT_LIST_HEAD(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ media_request_object_init(&hdl->req_obj);
+}
+
+/* Free the request-related fields in a control handler */
+void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl)
+{
+ struct v4l2_ctrl_handler *req, *next_req;
+
+ /*
+ * Do nothing if this isn't the main handler or the main
+ * handler is not used in any request.
+ *
+ * The main handler can be identified by having a NULL ops pointer in
+ * the request object.
+ */
+ if (hdl->req_obj.ops || list_empty(&hdl->requests))
+ return;
+
+ /*
+ * If the main handler is freed and it is used by handler objects in
+ * outstanding requests, then unbind and put those objects before
+ * freeing the main handler.
+ */
+ list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
+ media_request_object_unbind(&req->req_obj);
+ media_request_object_put(&req->req_obj);
+ }
+}
+
+static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_handler *from)
+{
+ struct v4l2_ctrl_ref *ref;
+ int err = 0;
+
+ if (WARN_ON(!hdl || hdl == from))
+ return -EINVAL;
+
+ if (hdl->error)
+ return hdl->error;
+
+ WARN_ON(hdl->lock != &hdl->_lock);
+
+ mutex_lock(from->lock);
+ list_for_each_entry(ref, &from->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl_ref *new_ref;
+
+ /* Skip refs inherited from other devices */
+ if (ref->from_other_dev)
+ continue;
+ err = handler_new_ref(hdl, ctrl, &new_ref, false, true);
+ if (err)
+ break;
+ }
+ mutex_unlock(from->lock);
+ return err;
+}
+
+static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+
+ mutex_lock(main_hdl->lock);
+ list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
+ hdl->request_is_queued = true;
+ mutex_unlock(main_hdl->lock);
+}
+
+static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+
+ mutex_lock(main_hdl->lock);
+ list_del_init(&hdl->requests);
+ if (hdl->request_is_queued) {
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ }
+ mutex_unlock(main_hdl->lock);
+}
+
+static void v4l2_ctrl_request_release(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(hdl);
+}
+
+static const struct media_request_object_ops req_ops = {
+ .queue = v4l2_ctrl_request_queue,
+ .unbind = v4l2_ctrl_request_unbind,
+ .release = v4l2_ctrl_request_release,
+};
+
+struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
+ struct v4l2_ctrl_handler *parent)
+{
+ struct media_request_object *obj;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING &&
+ req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return NULL;
+
+ obj = media_request_object_find(req, &req_ops, parent);
+ if (obj)
+ return container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find);
+
+struct v4l2_ctrl *
+v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return (ref && ref->p_req_valid) ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
+
+static int v4l2_ctrl_request_bind(struct media_request *req,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *from)
+{
+ int ret;
+
+ ret = v4l2_ctrl_request_clone(hdl, from);
+
+ if (!ret) {
+ ret = media_request_object_bind(req, &req_ops,
+ from, false, &hdl->req_obj);
+ if (!ret) {
+ mutex_lock(from->lock);
+ list_add_tail(&hdl->requests, &from->requests);
+ mutex_unlock(from->lock);
+ }
+ }
+ return ret;
+}
+
+static struct media_request_object *
+v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl,
+ struct media_request *req, bool set)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *new_hdl;
+ int ret;
+
+ if (IS_ERR(req))
+ return ERR_CAST(req);
+
+ if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
+ return ERR_PTR(-EBUSY);
+
+ obj = media_request_object_find(req, &req_ops, hdl);
+ if (obj)
+ return obj;
+ /*
+ * If there are no controls in this completed request,
+ * then that can only happen if:
+ *
+ * 1) no controls were present in the queued request, and
+ * 2) v4l2_ctrl_request_complete() could not allocate a
+ * control handler object to store the completed state in.
+ *
+ * So return ENOMEM to indicate that there was an out-of-memory
+ * error.
+ */
+ if (!set)
+ return ERR_PTR(-ENOMEM);
+
+ new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL);
+ if (!new_hdl)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &new_hdl->req_obj;
+ ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8);
+ if (!ret)
+ ret = v4l2_ctrl_request_bind(req, new_hdl, hdl);
+ if (ret) {
+ v4l2_ctrl_handler_free(new_hdl);
+ kfree(new_hdl);
+ return ERR_PTR(ret);
+ }
+
+ media_request_object_get(obj);
+ return obj;
+}
+
+int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev,
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (!mdev || cs->request_fd < 0)
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (req->state != MEDIA_REQUEST_STATE_COMPLETE) {
+ media_request_put(req);
+ return -EACCES;
+ }
+
+ ret = media_request_lock_for_access(req);
+ if (ret) {
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, false);
+ if (IS_ERR(obj)) {
+ media_request_unlock_for_access(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev);
+
+ media_request_unlock_for_access(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+ return ret;
+}
+
+int try_set_ext_ctrls_request(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct video_device *vdev,
+ struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (!mdev) {
+ dprintk(vdev, "%s: missing media device\n",
+ video_device_node_name(vdev));
+ return -EINVAL;
+ }
+
+ if (cs->request_fd < 0) {
+ dprintk(vdev, "%s: invalid request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ return -EINVAL;
+ }
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req)) {
+ dprintk(vdev, "%s: cannot find request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ return PTR_ERR(req);
+ }
+
+ ret = media_request_lock_for_update(req);
+ if (ret) {
+ dprintk(vdev, "%s: cannot lock request fd %d\n",
+ video_device_node_name(vdev), cs->request_fd);
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, set);
+ if (IS_ERR(obj)) {
+ dprintk(vdev,
+ "%s: cannot find request object for request fd %d\n",
+ video_device_node_name(vdev),
+ cs->request_fd);
+ media_request_unlock_for_update(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set);
+ if (ret)
+ dprintk(vdev,
+ "%s: try_set_ext_ctrls_common failed (%d)\n",
+ video_device_node_name(vdev), ret);
+
+ media_request_unlock_for_update(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+
+ return ret;
+}
+
+void v4l2_ctrl_request_complete(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+
+ if (!req || !main_hdl)
+ return;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj) {
+ int ret;
+
+ /* Create a new request so the driver can return controls */
+ hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
+ if (!hdl)
+ return;
+
+ ret = v4l2_ctrl_handler_init(hdl, (main_hdl->nr_of_buckets - 1) * 8);
+ if (!ret)
+ ret = v4l2_ctrl_request_bind(req, hdl, main_hdl);
+ if (ret) {
+ v4l2_ctrl_handler_free(hdl);
+ kfree(hdl);
+ return;
+ }
+ hdl->request_is_queued = true;
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ unsigned int i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ call_op(master, g_volatile_ctrl);
+ new_to_req(ref);
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+ if (ref->p_req_valid)
+ continue;
+
+ /* Copy the current control value into the request */
+ v4l2_ctrl_lock(ctrl);
+ cur_to_req(ref);
+ v4l2_ctrl_unlock(ctrl);
+ }
+
+ mutex_lock(main_hdl->lock);
+ WARN_ON(!hdl->request_is_queued);
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ mutex_unlock(main_hdl->lock);
+ media_request_object_complete(obj);
+ media_request_object_put(obj);
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_complete);
+
+int v4l2_ctrl_request_setup(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+ int ret = 0;
+
+ if (!req || !main_hdl)
+ return 0;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return -EBUSY;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj)
+ return 0;
+ if (obj->completed) {
+ media_request_object_put(obj);
+ return -EBUSY;
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node)
+ ref->req_done = false;
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ bool have_new_data = false;
+ int i;
+
+ /*
+ * Skip if this control was already handled by a cluster.
+ * Skip button controls and read-only controls.
+ */
+ if (ref->req_done || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ v4l2_ctrl_lock(master);
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ if (r->p_req_valid) {
+ have_new_data = true;
+ break;
+ }
+ }
+ }
+ if (!have_new_data) {
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ ret = req_to_new(r);
+ if (ret) {
+ v4l2_ctrl_unlock(master);
+ goto error;
+ }
+ master->cluster[i]->is_new = 1;
+ r->req_done = true;
+ }
+ }
+ /*
+ * For volatile autoclusters that are currently in auto mode
+ * we need to discover if it will be set to manual mode.
+ * If so, then we have to copy the current volatile values
+ * first since those will become the new manual values (which
+ * may be overwritten by explicit new values from this set
+ * of controls).
+ */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ s32 new_auto_val = *master->p_new.p_s32;
+
+ /*
+ * If the new value == the manual value, then copy
+ * the current volatile values.
+ */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ ret = try_or_set_cluster(NULL, master, true, 0);
+ v4l2_ctrl_unlock(master);
+
+ if (ret)
+ break;
+ }
+
+error:
+ media_request_object_put(obj);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_setup);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
new file mode 100644
index 0000000000..f812794926
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Video capture interface for Linux version 2
+ *
+ * A generic video device interface for the LINUX operating system
+ * using a set of device structures/vectors for low level operations.
+ *
+ * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
+ * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
+ *
+ * Fixes: 20000516 Claudio Matsuoka <claudio@conectiva.com>
+ * - Added procfs support
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+
+#define VIDEO_NUM_DEVICES 256
+#define VIDEO_NAME "video4linux"
+
+#define dprintk(fmt, arg...) do { \
+ printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+ __func__, ##arg); \
+} while (0)
+
+/*
+ * sysfs stuff
+ */
+
+static ssize_t index_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(cd);
+
+ return sprintf(buf, "%i\n", vdev->index);
+}
+static DEVICE_ATTR_RO(index);
+
+static ssize_t dev_debug_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(cd);
+
+ return sprintf(buf, "%i\n", vdev->dev_debug);
+}
+
+static ssize_t dev_debug_store(struct device *cd, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct video_device *vdev = to_video_device(cd);
+ int res = 0;
+ u16 value;
+
+ res = kstrtou16(buf, 0, &value);
+ if (res)
+ return res;
+
+ vdev->dev_debug = value;
+ return len;
+}
+static DEVICE_ATTR_RW(dev_debug);
+
+static ssize_t name_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(cd);
+
+ return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *video_device_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_dev_debug.attr,
+ &dev_attr_index.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(video_device);
+
+/*
+ * Active devices
+ */
+static struct video_device *video_devices[VIDEO_NUM_DEVICES];
+static DEFINE_MUTEX(videodev_lock);
+static DECLARE_BITMAP(devnode_nums[VFL_TYPE_MAX], VIDEO_NUM_DEVICES);
+
+/* Device node utility functions */
+
+/* Note: these utility functions all assume that vfl_type is in the range
+ [0, VFL_TYPE_MAX-1]. */
+
+#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
+/* Return the bitmap corresponding to vfl_type. */
+static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type)
+{
+ /* Any types not assigned to fixed minor ranges must be mapped to
+ one single bitmap for the purposes of finding a free node number
+ since all those unassigned types use the same minor range. */
+ int idx = (vfl_type > VFL_TYPE_RADIO) ? VFL_TYPE_MAX - 1 : vfl_type;
+
+ return devnode_nums[idx];
+}
+#else
+/* Return the bitmap corresponding to vfl_type. */
+static inline unsigned long *devnode_bits(enum vfl_devnode_type vfl_type)
+{
+ return devnode_nums[vfl_type];
+}
+#endif
+
+/* Mark device node number vdev->num as used */
+static inline void devnode_set(struct video_device *vdev)
+{
+ set_bit(vdev->num, devnode_bits(vdev->vfl_type));
+}
+
+/* Mark device node number vdev->num as unused */
+static inline void devnode_clear(struct video_device *vdev)
+{
+ clear_bit(vdev->num, devnode_bits(vdev->vfl_type));
+}
+
+/* Try to find a free device node number in the range [from, to> */
+static inline int devnode_find(struct video_device *vdev, int from, int to)
+{
+ return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from);
+}
+
+struct video_device *video_device_alloc(void)
+{
+ return kzalloc(sizeof(struct video_device), GFP_KERNEL);
+}
+EXPORT_SYMBOL(video_device_alloc);
+
+void video_device_release(struct video_device *vdev)
+{
+ kfree(vdev);
+}
+EXPORT_SYMBOL(video_device_release);
+
+void video_device_release_empty(struct video_device *vdev)
+{
+ /* Do nothing */
+ /* Only valid when the video_device struct is a static. */
+}
+EXPORT_SYMBOL(video_device_release_empty);
+
+static inline void video_get(struct video_device *vdev)
+{
+ get_device(&vdev->dev);
+}
+
+static inline void video_put(struct video_device *vdev)
+{
+ put_device(&vdev->dev);
+}
+
+/* Called when the last user of the video device exits. */
+static void v4l2_device_release(struct device *cd)
+{
+ struct video_device *vdev = to_video_device(cd);
+ struct v4l2_device *v4l2_dev = vdev->v4l2_dev;
+
+ mutex_lock(&videodev_lock);
+ if (WARN_ON(video_devices[vdev->minor] != vdev)) {
+ /* should not happen */
+ mutex_unlock(&videodev_lock);
+ return;
+ }
+
+ /* Free up this device for reuse */
+ video_devices[vdev->minor] = NULL;
+
+ /* Delete the cdev on this minor as well */
+ cdev_del(vdev->cdev);
+ /* Just in case some driver tries to access this from
+ the release() callback. */
+ vdev->cdev = NULL;
+
+ /* Mark device node number as free */
+ devnode_clear(vdev);
+
+ mutex_unlock(&videodev_lock);
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (v4l2_dev->mdev && vdev->vfl_dir != VFL_DIR_M2M) {
+ /* Remove interfaces and interface links */
+ media_devnode_remove(vdev->intf_devnode);
+ if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN)
+ media_device_unregister_entity(&vdev->entity);
+ }
+#endif
+
+ /* Do not call v4l2_device_put if there is no release callback set.
+ * Drivers that have no v4l2_device release callback might free the
+ * v4l2_dev instance in the video_device release callback below, so we
+ * must perform this check here.
+ *
+ * TODO: In the long run all drivers that use v4l2_device should use the
+ * v4l2_device release callback. This check will then be unnecessary.
+ */
+ if (v4l2_dev->release == NULL)
+ v4l2_dev = NULL;
+
+ /* Release video_device and perform other
+ cleanups as needed. */
+ vdev->release(vdev);
+
+ /* Decrease v4l2_device refcount */
+ if (v4l2_dev)
+ v4l2_device_put(v4l2_dev);
+}
+
+static struct class video_class = {
+ .name = VIDEO_NAME,
+ .dev_groups = video_device_groups,
+};
+
+struct video_device *video_devdata(struct file *file)
+{
+ return video_devices[iminor(file_inode(file))];
+}
+EXPORT_SYMBOL(video_devdata);
+
+
+/* Priority handling */
+
+static inline bool prio_is_valid(enum v4l2_priority prio)
+{
+ return prio == V4L2_PRIORITY_BACKGROUND ||
+ prio == V4L2_PRIORITY_INTERACTIVE ||
+ prio == V4L2_PRIORITY_RECORD;
+}
+
+void v4l2_prio_init(struct v4l2_prio_state *global)
+{
+ memset(global, 0, sizeof(*global));
+}
+EXPORT_SYMBOL(v4l2_prio_init);
+
+int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
+ enum v4l2_priority new)
+{
+ if (!prio_is_valid(new))
+ return -EINVAL;
+ if (*local == new)
+ return 0;
+
+ atomic_inc(&global->prios[new]);
+ if (prio_is_valid(*local))
+ atomic_dec(&global->prios[*local]);
+ *local = new;
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_prio_change);
+
+void v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local)
+{
+ v4l2_prio_change(global, local, V4L2_PRIORITY_DEFAULT);
+}
+EXPORT_SYMBOL(v4l2_prio_open);
+
+void v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority local)
+{
+ if (prio_is_valid(local))
+ atomic_dec(&global->prios[local]);
+}
+EXPORT_SYMBOL(v4l2_prio_close);
+
+enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global)
+{
+ if (atomic_read(&global->prios[V4L2_PRIORITY_RECORD]) > 0)
+ return V4L2_PRIORITY_RECORD;
+ if (atomic_read(&global->prios[V4L2_PRIORITY_INTERACTIVE]) > 0)
+ return V4L2_PRIORITY_INTERACTIVE;
+ if (atomic_read(&global->prios[V4L2_PRIORITY_BACKGROUND]) > 0)
+ return V4L2_PRIORITY_BACKGROUND;
+ return V4L2_PRIORITY_UNSET;
+}
+EXPORT_SYMBOL(v4l2_prio_max);
+
+int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local)
+{
+ return (local < v4l2_prio_max(global)) ? -EBUSY : 0;
+}
+EXPORT_SYMBOL(v4l2_prio_check);
+
+
+static ssize_t v4l2_read(struct file *filp, char __user *buf,
+ size_t sz, loff_t *off)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
+
+ if (!vdev->fops->read)
+ return -EINVAL;
+ if (video_is_registered(vdev))
+ ret = vdev->fops->read(filp, buf, sz, off);
+ if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
+ (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
+ dprintk("%s: read: %zd (%d)\n",
+ video_device_node_name(vdev), sz, ret);
+ return ret;
+}
+
+static ssize_t v4l2_write(struct file *filp, const char __user *buf,
+ size_t sz, loff_t *off)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
+
+ if (!vdev->fops->write)
+ return -EINVAL;
+ if (video_is_registered(vdev))
+ ret = vdev->fops->write(filp, buf, sz, off);
+ if ((vdev->dev_debug & V4L2_DEV_DEBUG_FOP) &&
+ (vdev->dev_debug & V4L2_DEV_DEBUG_STREAMING))
+ dprintk("%s: write: %zd (%d)\n",
+ video_device_node_name(vdev), sz, ret);
+ return ret;
+}
+
+static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll)
+{
+ struct video_device *vdev = video_devdata(filp);
+ __poll_t res = EPOLLERR | EPOLLHUP | EPOLLPRI;
+
+ if (video_is_registered(vdev)) {
+ if (!vdev->fops->poll)
+ res = DEFAULT_POLLMASK;
+ else
+ res = vdev->fops->poll(filp, poll);
+ }
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_POLL)
+ dprintk("%s: poll: %08x %08x\n",
+ video_device_node_name(vdev), res,
+ poll_requested_events(poll));
+ return res;
+}
+
+static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
+
+ if (vdev->fops->unlocked_ioctl) {
+ if (video_is_registered(vdev))
+ ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
+ } else
+ ret = -ENOTTY;
+
+ return ret;
+}
+
+#ifdef CONFIG_MMU
+#define v4l2_get_unmapped_area NULL
+#else
+static unsigned long v4l2_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret;
+
+ if (!vdev->fops->get_unmapped_area)
+ return -ENOSYS;
+ if (!video_is_registered(vdev))
+ return -ENODEV;
+ ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ dprintk("%s: get_unmapped_area (%d)\n",
+ video_device_node_name(vdev), ret);
+ return ret;
+}
+#endif
+
+static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = -ENODEV;
+
+ if (!vdev->fops->mmap)
+ return -ENODEV;
+ if (video_is_registered(vdev))
+ ret = vdev->fops->mmap(filp, vm);
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ dprintk("%s: mmap (%d)\n",
+ video_device_node_name(vdev), ret);
+ return ret;
+}
+
+/* Override for the open function */
+static int v4l2_open(struct inode *inode, struct file *filp)
+{
+ struct video_device *vdev;
+ int ret = 0;
+
+ /* Check if the video device is available */
+ mutex_lock(&videodev_lock);
+ vdev = video_devdata(filp);
+ /* return ENODEV if the video device has already been removed. */
+ if (vdev == NULL || !video_is_registered(vdev)) {
+ mutex_unlock(&videodev_lock);
+ return -ENODEV;
+ }
+ /* and increase the device refcount */
+ video_get(vdev);
+ mutex_unlock(&videodev_lock);
+ if (vdev->fops->open) {
+ if (video_is_registered(vdev))
+ ret = vdev->fops->open(filp);
+ else
+ ret = -ENODEV;
+ }
+
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ dprintk("%s: open (%d)\n",
+ video_device_node_name(vdev), ret);
+ /* decrease the refcount in case of an error */
+ if (ret)
+ video_put(vdev);
+ return ret;
+}
+
+/* Override for the release function */
+static int v4l2_release(struct inode *inode, struct file *filp)
+{
+ struct video_device *vdev = video_devdata(filp);
+ int ret = 0;
+
+ /*
+ * We need to serialize the release() with queueing new requests.
+ * The release() may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (vdev->fops->release) {
+ if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
+ mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ ret = vdev->fops->release(filp);
+ mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ } else {
+ ret = vdev->fops->release(filp);
+ }
+ }
+
+ if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
+ dprintk("%s: release\n",
+ video_device_node_name(vdev));
+
+ /* decrease the refcount unconditionally since the release()
+ return value is ignored. */
+ video_put(vdev);
+ return ret;
+}
+
+static const struct file_operations v4l2_fops = {
+ .owner = THIS_MODULE,
+ .read = v4l2_read,
+ .write = v4l2_write,
+ .open = v4l2_open,
+ .get_unmapped_area = v4l2_get_unmapped_area,
+ .mmap = v4l2_mmap,
+ .unlocked_ioctl = v4l2_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = v4l2_compat_ioctl32,
+#endif
+ .release = v4l2_release,
+ .poll = v4l2_poll,
+ .llseek = no_llseek,
+};
+
+/**
+ * get_index - assign stream index number based on v4l2_dev
+ * @vdev: video_device to assign index number to, vdev->v4l2_dev should be assigned
+ *
+ * Note that when this is called the new device has not yet been registered
+ * in the video_device array, but it was able to obtain a minor number.
+ *
+ * This means that we can always obtain a free stream index number since
+ * the worst case scenario is that there are VIDEO_NUM_DEVICES - 1 slots in
+ * use of the video_device array.
+ *
+ * Returns a free index number.
+ */
+static int get_index(struct video_device *vdev)
+{
+ /* This can be static since this function is called with the global
+ videodev_lock held. */
+ static DECLARE_BITMAP(used, VIDEO_NUM_DEVICES);
+ int i;
+
+ bitmap_zero(used, VIDEO_NUM_DEVICES);
+
+ for (i = 0; i < VIDEO_NUM_DEVICES; i++) {
+ if (video_devices[i] != NULL &&
+ video_devices[i]->v4l2_dev == vdev->v4l2_dev) {
+ __set_bit(video_devices[i]->index, used);
+ }
+ }
+
+ return find_first_zero_bit(used, VIDEO_NUM_DEVICES);
+}
+
+#define SET_VALID_IOCTL(ops, cmd, op) \
+ do { if ((ops)->op) __set_bit(_IOC_NR(cmd), valid_ioctls); } while (0)
+
+/* This determines which ioctls are actually implemented in the driver.
+ It's a one-time thing which simplifies video_ioctl2 as it can just do
+ a bit test.
+
+ Note that drivers can override this by setting bits to 1 in
+ vdev->valid_ioctls. If an ioctl is marked as 1 when this function is
+ called, then that ioctl will actually be marked as unimplemented.
+
+ It does that by first setting up the local valid_ioctls bitmap, and
+ at the end do a:
+
+ vdev->valid_ioctls = valid_ioctls & ~(vdev->valid_ioctls)
+ */
+static void determine_valid_ioctls(struct video_device *vdev)
+{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
+ DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
+ const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
+ bool is_vid = vdev->vfl_type == VFL_TYPE_VIDEO &&
+ (vdev->device_caps & vid_caps);
+ bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
+ bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
+ bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vdev->vfl_type == VFL_TYPE_VIDEO &&
+ (vdev->device_caps & meta_caps);
+ bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
+ bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
+ bool is_io_mc = vdev->device_caps & V4L2_CAP_IO_MC;
+ bool has_streaming = vdev->device_caps & V4L2_CAP_STREAMING;
+
+ bitmap_zero(valid_ioctls, BASE_VIDIOC_PRIVATE);
+
+ /* vfl_type and vfl_dir independent ioctls */
+
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYCAP, vidioc_querycap);
+ __set_bit(_IOC_NR(VIDIOC_G_PRIORITY), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_PRIORITY), valid_ioctls);
+
+ /* Note: the control handler can also be passed through the filehandle,
+ and that can't be tested here. If the bit for these control ioctls
+ is set, then the ioctl is valid. But if it is 0, then it can still
+ be valid if the filehandle passed the control handler. */
+ if (vdev->ctrl_handler || ops->vidioc_queryctrl)
+ __set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
+ __set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
+ __set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
+ __set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
+ __set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
+ __set_bit(_IOC_NR(VIDIOC_S_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_try_ext_ctrls)
+ __set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
+ if (vdev->ctrl_handler || ops->vidioc_querymenu)
+ __set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
+ if (!is_tch) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ }
+ SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ __set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_DBG_G_REGISTER), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_DBG_S_REGISTER), valid_ioctls);
+#endif
+ /* yes, really vidioc_subscribe_event */
+ SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
+ SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
+ if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
+ __set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
+
+ if (is_vid) {
+ /* video specific ioctls */
+ if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
+ ops->vidioc_enum_fmt_vid_overlay)) ||
+ (is_tx && ops->vidioc_enum_fmt_vid_out))
+ __set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
+ ops->vidioc_g_fmt_vid_cap_mplane ||
+ ops->vidioc_g_fmt_vid_overlay)) ||
+ (is_tx && (ops->vidioc_g_fmt_vid_out ||
+ ops->vidioc_g_fmt_vid_out_mplane ||
+ ops->vidioc_g_fmt_vid_out_overlay)))
+ __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
+ ops->vidioc_s_fmt_vid_cap_mplane ||
+ ops->vidioc_s_fmt_vid_overlay)) ||
+ (is_tx && (ops->vidioc_s_fmt_vid_out ||
+ ops->vidioc_s_fmt_vid_out_mplane ||
+ ops->vidioc_s_fmt_vid_out_overlay)))
+ __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
+ ops->vidioc_try_fmt_vid_cap_mplane ||
+ ops->vidioc_try_fmt_vid_overlay)) ||
+ (is_tx && (ops->vidioc_try_fmt_vid_out ||
+ ops->vidioc_try_fmt_vid_out_mplane ||
+ ops->vidioc_try_fmt_vid_out_overlay)))
+ __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FBUF, vidioc_s_fbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp);
+ SET_VALID_IOCTL(ops, VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp);
+ SET_VALID_IOCTL(ops, VIDIOC_G_ENC_INDEX, vidioc_g_enc_index);
+ SET_VALID_IOCTL(ops, VIDIOC_ENCODER_CMD, vidioc_encoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ if (ops->vidioc_g_selection) {
+ __set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
+ }
+ if (ops->vidioc_s_selection)
+ __set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
+ SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
+ }
+ if (is_meta && is_rx) {
+ /* metadata capture specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap);
+ } else if (is_meta && is_tx) {
+ /* metadata output specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out);
+ }
+ if (is_vbi) {
+ /* vbi specific ioctls */
+ if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
+ ops->vidioc_g_fmt_sliced_vbi_cap)) ||
+ (is_tx && (ops->vidioc_g_fmt_vbi_out ||
+ ops->vidioc_g_fmt_sliced_vbi_out)))
+ __set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_s_fmt_vbi_cap ||
+ ops->vidioc_s_fmt_sliced_vbi_cap)) ||
+ (is_tx && (ops->vidioc_s_fmt_vbi_out ||
+ ops->vidioc_s_fmt_sliced_vbi_out)))
+ __set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
+ if ((is_rx && (ops->vidioc_try_fmt_vbi_cap ||
+ ops->vidioc_try_fmt_sliced_vbi_cap)) ||
+ (is_tx && (ops->vidioc_try_fmt_vbi_out ||
+ ops->vidioc_try_fmt_sliced_vbi_out)))
+ __set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ } else if (is_tch) {
+ /* touch specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
+ } else if (is_sdr && is_rx) {
+ /* SDR receiver specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap);
+ } else if (is_sdr && is_tx) {
+ /* SDR transmitter specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out);
+ }
+
+ if (has_streaming) {
+ /* ioctls valid for streaming I/O */
+ SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
+ SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_EXPBUF, vidioc_expbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_DQBUF, vidioc_dqbuf);
+ SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
+ SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMON, vidioc_streamon);
+ SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
+ }
+
+ if (is_vid || is_vbi || is_meta) {
+ /* ioctls valid for video, vbi and metadata */
+ if (ops->vidioc_s_std)
+ __set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
+ SET_VALID_IOCTL(ops, VIDIOC_G_STD, vidioc_g_std);
+ if (is_rx) {
+ SET_VALID_IOCTL(ops, VIDIOC_QUERYSTD, vidioc_querystd);
+ if (is_io_mc) {
+ __set_bit(_IOC_NR(VIDIOC_ENUMINPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_INPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_INPUT), valid_ioctls);
+ } else {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ }
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDIO, vidioc_enumaudio);
+ SET_VALID_IOCTL(ops, VIDIOC_G_AUDIO, vidioc_g_audio);
+ SET_VALID_IOCTL(ops, VIDIOC_S_AUDIO, vidioc_s_audio);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_S_EDID, vidioc_s_edid);
+ }
+ if (is_tx) {
+ if (is_io_mc) {
+ __set_bit(_IOC_NR(VIDIOC_ENUMOUTPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_G_OUTPUT), valid_ioctls);
+ __set_bit(_IOC_NR(VIDIOC_S_OUTPUT), valid_ioctls);
+ } else {
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMOUTPUT, vidioc_enum_output);
+ SET_VALID_IOCTL(ops, VIDIOC_G_OUTPUT, vidioc_g_output);
+ SET_VALID_IOCTL(ops, VIDIOC_S_OUTPUT, vidioc_s_output);
+ }
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMAUDOUT, vidioc_enumaudout);
+ SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
+ SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
+ }
+ if (ops->vidioc_g_parm || ops->vidioc_g_std)
+ __set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_EDID, vidioc_g_edid);
+ }
+ if (is_tx && (is_radio || is_sdr)) {
+ /* radio transmitter only ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
+ SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
+ }
+ if (is_rx && !is_tch) {
+ /* receiver only ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
+ SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
+ SET_VALID_IOCTL(ops, VIDIOC_S_HW_FREQ_SEEK, vidioc_s_hw_freq_seek);
+ }
+
+ bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
+ BASE_VIDIOC_PRIVATE);
+}
+
+static int video_register_media_controller(struct video_device *vdev)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ u32 intf_type;
+ int ret;
+
+ /* Memory-to-memory devices are more complex and use
+ * their own function to register its mc entities.
+ */
+ if (!vdev->v4l2_dev->mdev || vdev->vfl_dir == VFL_DIR_M2M)
+ return 0;
+
+ vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
+ vdev->entity.function = MEDIA_ENT_F_UNKNOWN;
+
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_VIDEO:
+ intf_type = MEDIA_INTF_T_V4L_VIDEO;
+ vdev->entity.function = MEDIA_ENT_F_IO_V4L;
+ break;
+ case VFL_TYPE_VBI:
+ intf_type = MEDIA_INTF_T_V4L_VBI;
+ vdev->entity.function = MEDIA_ENT_F_IO_VBI;
+ break;
+ case VFL_TYPE_SDR:
+ intf_type = MEDIA_INTF_T_V4L_SWRADIO;
+ vdev->entity.function = MEDIA_ENT_F_IO_SWRADIO;
+ break;
+ case VFL_TYPE_TOUCH:
+ intf_type = MEDIA_INTF_T_V4L_TOUCH;
+ vdev->entity.function = MEDIA_ENT_F_IO_V4L;
+ break;
+ case VFL_TYPE_RADIO:
+ intf_type = MEDIA_INTF_T_V4L_RADIO;
+ /*
+ * Radio doesn't have an entity at the V4L2 side to represent
+ * radio input or output. Instead, the audio input/output goes
+ * via either physical wires or ALSA.
+ */
+ break;
+ case VFL_TYPE_SUBDEV:
+ intf_type = MEDIA_INTF_T_V4L_SUBDEV;
+ /* Entity will be created via v4l2_device_register_subdev() */
+ break;
+ default:
+ return 0;
+ }
+
+ if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) {
+ vdev->entity.name = vdev->name;
+
+ /* Needed just for backward compatibility with legacy MC API */
+ vdev->entity.info.dev.major = VIDEO_MAJOR;
+ vdev->entity.info.dev.minor = vdev->minor;
+
+ ret = media_device_register_entity(vdev->v4l2_dev->mdev,
+ &vdev->entity);
+ if (ret < 0) {
+ pr_warn("%s: media_device_register_entity failed\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ vdev->intf_devnode = media_devnode_create(vdev->v4l2_dev->mdev,
+ intf_type,
+ 0, VIDEO_MAJOR,
+ vdev->minor);
+ if (!vdev->intf_devnode) {
+ media_device_unregister_entity(&vdev->entity);
+ return -ENOMEM;
+ }
+
+ if (vdev->entity.function != MEDIA_ENT_F_UNKNOWN) {
+ struct media_link *link;
+
+ link = media_create_intf_link(&vdev->entity,
+ &vdev->intf_devnode->intf,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (!link) {
+ media_devnode_remove(vdev->intf_devnode);
+ media_device_unregister_entity(&vdev->entity);
+ return -ENOMEM;
+ }
+ }
+
+ /* FIXME: how to create the other interface links? */
+
+#endif
+ return 0;
+}
+
+int __video_register_device(struct video_device *vdev,
+ enum vfl_devnode_type type,
+ int nr, int warn_if_nr_in_use,
+ struct module *owner)
+{
+ int i = 0;
+ int ret;
+ int minor_offset = 0;
+ int minor_cnt = VIDEO_NUM_DEVICES;
+ const char *name_base;
+
+ /* A minor value of -1 marks this video device as never
+ having been registered */
+ vdev->minor = -1;
+
+ /* the release callback MUST be present */
+ if (WARN_ON(!vdev->release))
+ return -EINVAL;
+ /* the v4l2_dev pointer MUST be present */
+ if (WARN_ON(!vdev->v4l2_dev))
+ return -EINVAL;
+ /* the device_caps field MUST be set for all but subdevs */
+ if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps))
+ return -EINVAL;
+
+ /* v4l2_fh support */
+ spin_lock_init(&vdev->fh_lock);
+ INIT_LIST_HEAD(&vdev->fh_list);
+
+ /* Part 1: check device type */
+ switch (type) {
+ case VFL_TYPE_VIDEO:
+ name_base = "video";
+ break;
+ case VFL_TYPE_VBI:
+ name_base = "vbi";
+ break;
+ case VFL_TYPE_RADIO:
+ name_base = "radio";
+ break;
+ case VFL_TYPE_SUBDEV:
+ name_base = "v4l-subdev";
+ break;
+ case VFL_TYPE_SDR:
+ /* Use device name 'swradio' because 'sdr' was already taken. */
+ name_base = "swradio";
+ break;
+ case VFL_TYPE_TOUCH:
+ name_base = "v4l-touch";
+ break;
+ default:
+ pr_err("%s called with unknown type: %d\n",
+ __func__, type);
+ return -EINVAL;
+ }
+
+ vdev->vfl_type = type;
+ vdev->cdev = NULL;
+ if (vdev->dev_parent == NULL)
+ vdev->dev_parent = vdev->v4l2_dev->dev;
+ if (vdev->ctrl_handler == NULL)
+ vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler;
+ /* If the prio state pointer is NULL, then use the v4l2_device
+ prio state. */
+ if (vdev->prio == NULL)
+ vdev->prio = &vdev->v4l2_dev->prio;
+
+ /* Part 2: find a free minor, device node number and device index. */
+#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
+ /* Keep the ranges for the first four types for historical
+ * reasons.
+ * Newer devices (not yet in place) should use the range
+ * of 128-191 and just pick the first free minor there
+ * (new style). */
+ switch (type) {
+ case VFL_TYPE_VIDEO:
+ minor_offset = 0;
+ minor_cnt = 64;
+ break;
+ case VFL_TYPE_RADIO:
+ minor_offset = 64;
+ minor_cnt = 64;
+ break;
+ case VFL_TYPE_VBI:
+ minor_offset = 224;
+ minor_cnt = 32;
+ break;
+ default:
+ minor_offset = 128;
+ minor_cnt = 64;
+ break;
+ }
+#endif
+
+ /* Pick a device node number */
+ mutex_lock(&videodev_lock);
+ nr = devnode_find(vdev, nr == -1 ? 0 : nr, minor_cnt);
+ if (nr == minor_cnt)
+ nr = devnode_find(vdev, 0, minor_cnt);
+ if (nr == minor_cnt) {
+ pr_err("could not get a free device node number\n");
+ mutex_unlock(&videodev_lock);
+ return -ENFILE;
+ }
+#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES
+ /* 1-on-1 mapping of device node number to minor number */
+ i = nr;
+#else
+ /* The device node number and minor numbers are independent, so
+ we just find the first free minor number. */
+ for (i = 0; i < VIDEO_NUM_DEVICES; i++)
+ if (video_devices[i] == NULL)
+ break;
+ if (i == VIDEO_NUM_DEVICES) {
+ mutex_unlock(&videodev_lock);
+ pr_err("could not get a free minor\n");
+ return -ENFILE;
+ }
+#endif
+ vdev->minor = i + minor_offset;
+ vdev->num = nr;
+
+ /* Should not happen since we thought this minor was free */
+ if (WARN_ON(video_devices[vdev->minor])) {
+ mutex_unlock(&videodev_lock);
+ pr_err("video_device not empty!\n");
+ return -ENFILE;
+ }
+ devnode_set(vdev);
+ vdev->index = get_index(vdev);
+ video_devices[vdev->minor] = vdev;
+ mutex_unlock(&videodev_lock);
+
+ if (vdev->ioctl_ops)
+ determine_valid_ioctls(vdev);
+
+ /* Part 3: Initialize the character device */
+ vdev->cdev = cdev_alloc();
+ if (vdev->cdev == NULL) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ vdev->cdev->ops = &v4l2_fops;
+ vdev->cdev->owner = owner;
+ ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1);
+ if (ret < 0) {
+ pr_err("%s: cdev_add failed\n", __func__);
+ kfree(vdev->cdev);
+ vdev->cdev = NULL;
+ goto cleanup;
+ }
+
+ /* Part 4: register the device with sysfs */
+ vdev->dev.class = &video_class;
+ vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
+ vdev->dev.parent = vdev->dev_parent;
+ dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
+ ret = device_register(&vdev->dev);
+ if (ret < 0) {
+ pr_err("%s: device_register failed\n", __func__);
+ goto cleanup;
+ }
+ /* Register the release callback that will be called when the last
+ reference to the device goes away. */
+ vdev->dev.release = v4l2_device_release;
+
+ if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
+ pr_warn("%s: requested %s%d, got %s\n", __func__,
+ name_base, nr, video_device_node_name(vdev));
+
+ /* Increase v4l2_device refcount */
+ v4l2_device_get(vdev->v4l2_dev);
+
+ /* Part 5: Register the entity. */
+ ret = video_register_media_controller(vdev);
+
+ /* Part 6: Activate this minor. The char device can now be used. */
+ set_bit(V4L2_FL_REGISTERED, &vdev->flags);
+
+ return 0;
+
+cleanup:
+ mutex_lock(&videodev_lock);
+ if (vdev->cdev)
+ cdev_del(vdev->cdev);
+ video_devices[vdev->minor] = NULL;
+ devnode_clear(vdev);
+ mutex_unlock(&videodev_lock);
+ /* Mark this video device as never having been registered. */
+ vdev->minor = -1;
+ return ret;
+}
+EXPORT_SYMBOL(__video_register_device);
+
+/**
+ * video_unregister_device - unregister a video4linux device
+ * @vdev: the device to unregister
+ *
+ * This unregisters the passed device. Future open calls will
+ * be met with errors.
+ */
+void video_unregister_device(struct video_device *vdev)
+{
+ /* Check if vdev was ever registered at all */
+ if (!vdev || !video_is_registered(vdev))
+ return;
+
+ mutex_lock(&videodev_lock);
+ /* This must be in a critical section to prevent a race with v4l2_open.
+ * Once this bit has been cleared video_get may never be called again.
+ */
+ clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
+ mutex_unlock(&videodev_lock);
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
+ v4l2_event_wake_all(vdev);
+ device_unregister(&vdev->dev);
+}
+EXPORT_SYMBOL(video_unregister_device);
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+
+__must_check int video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (entity->num_pads != 1)
+ return -ENODEV;
+
+ return media_pipeline_start(&entity->pads[0], pipe);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline_start);
+
+__must_check int __video_device_pipeline_start(struct video_device *vdev,
+ struct media_pipeline *pipe)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (entity->num_pads != 1)
+ return -ENODEV;
+
+ return __media_pipeline_start(&entity->pads[0], pipe);
+}
+EXPORT_SYMBOL_GPL(__video_device_pipeline_start);
+
+void video_device_pipeline_stop(struct video_device *vdev)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (WARN_ON(entity->num_pads != 1))
+ return;
+
+ return media_pipeline_stop(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline_stop);
+
+void __video_device_pipeline_stop(struct video_device *vdev)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (WARN_ON(entity->num_pads != 1))
+ return;
+
+ return __media_pipeline_stop(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(__video_device_pipeline_stop);
+
+__must_check int video_device_pipeline_alloc_start(struct video_device *vdev)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (entity->num_pads != 1)
+ return -ENODEV;
+
+ return media_pipeline_alloc_start(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start);
+
+struct media_pipeline *video_device_pipeline(struct video_device *vdev)
+{
+ struct media_entity *entity = &vdev->entity;
+
+ if (WARN_ON(entity->num_pads != 1))
+ return NULL;
+
+ return media_pad_pipeline(&entity->pads[0]);
+}
+EXPORT_SYMBOL_GPL(video_device_pipeline);
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
+/*
+ * Initialise video for linux
+ */
+static int __init videodev_init(void)
+{
+ dev_t dev = MKDEV(VIDEO_MAJOR, 0);
+ int ret;
+
+ pr_info("Linux video capture interface: v2.00\n");
+ ret = register_chrdev_region(dev, VIDEO_NUM_DEVICES, VIDEO_NAME);
+ if (ret < 0) {
+ pr_warn("videodev: unable to get major %d\n",
+ VIDEO_MAJOR);
+ return ret;
+ }
+
+ ret = class_register(&video_class);
+ if (ret < 0) {
+ unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
+ pr_warn("video_dev: class_register failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit videodev_exit(void)
+{
+ dev_t dev = MKDEV(VIDEO_MAJOR, 0);
+
+ class_unregister(&video_class);
+ unregister_chrdev_region(dev, VIDEO_NUM_DEVICES);
+}
+
+subsys_initcall(videodev_init);
+module_exit(videodev_exit)
+
+MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@kernel.org>, Bill Dirks, Justin Schoeman, Gerd Knorr");
+MODULE_DESCRIPTION("Video4Linux2 core driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
new file mode 100644
index 0000000000..d2e58ae91f
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ V4L2 device support.
+
+ Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+
+ */
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
+{
+ if (v4l2_dev == NULL)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&v4l2_dev->subdevs);
+ spin_lock_init(&v4l2_dev->lock);
+ v4l2_prio_init(&v4l2_dev->prio);
+ kref_init(&v4l2_dev->ref);
+ get_device(dev);
+ v4l2_dev->dev = dev;
+ if (dev == NULL) {
+ /* If dev == NULL, then name must be filled in by the caller */
+ if (WARN_ON(!v4l2_dev->name[0]))
+ return -EINVAL;
+ return 0;
+ }
+
+ /* Set name to driver name + device name if it is empty. */
+ if (!v4l2_dev->name[0])
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s",
+ dev->driver->name, dev_name(dev));
+ if (!dev_get_drvdata(dev))
+ dev_set_drvdata(dev, v4l2_dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_register);
+
+static void v4l2_device_release(struct kref *ref)
+{
+ struct v4l2_device *v4l2_dev =
+ container_of(ref, struct v4l2_device, ref);
+
+ if (v4l2_dev->release)
+ v4l2_dev->release(v4l2_dev);
+}
+
+int v4l2_device_put(struct v4l2_device *v4l2_dev)
+{
+ return kref_put(&v4l2_dev->ref, v4l2_device_release);
+}
+EXPORT_SYMBOL_GPL(v4l2_device_put);
+
+int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
+ atomic_t *instance)
+{
+ int num = atomic_inc_return(instance) - 1;
+ int len = strlen(basename);
+
+ if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
+ "%s-%d", basename, num);
+ else
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
+ "%s%d", basename, num);
+ return num;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_set_name);
+
+void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
+{
+ if (v4l2_dev->dev == NULL)
+ return;
+
+ if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
+ dev_set_drvdata(v4l2_dev->dev, NULL);
+ put_device(v4l2_dev->dev);
+ v4l2_dev->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
+
+void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
+{
+ struct v4l2_subdev *sd, *next;
+
+ /* Just return if v4l2_dev is NULL or if it was already
+ * unregistered before. */
+ if (v4l2_dev == NULL || !v4l2_dev->name[0])
+ return;
+ v4l2_device_disconnect(v4l2_dev);
+
+ /* Unregister subdevs */
+ list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
+ v4l2_device_unregister_subdev(sd);
+ if (sd->flags & V4L2_SUBDEV_FL_IS_I2C)
+ v4l2_i2c_subdev_unregister(sd);
+ else if (sd->flags & V4L2_SUBDEV_FL_IS_SPI)
+ v4l2_spi_subdev_unregister(sd);
+ }
+ /* Mark as unregistered, thus preventing duplicate unregistrations */
+ v4l2_dev->name[0] = '\0';
+}
+EXPORT_SYMBOL_GPL(v4l2_device_unregister);
+
+int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *sd)
+{
+ int err;
+
+ /* Check for valid input */
+ if (!v4l2_dev || !sd || sd->v4l2_dev || !sd->name[0])
+ return -EINVAL;
+
+ /*
+ * The reason to acquire the module here is to avoid unloading
+ * a module of sub-device which is registered to a media
+ * device. To make it possible to unload modules for media
+ * devices that also register sub-devices, do not
+ * try_module_get() such sub-device owners.
+ */
+ sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver &&
+ sd->owner == v4l2_dev->dev->driver->owner;
+
+ if (!sd->owner_v4l2_dev && !try_module_get(sd->owner))
+ return -ENODEV;
+
+ sd->v4l2_dev = v4l2_dev;
+ /* This just returns 0 if either of the two args is NULL */
+ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler,
+ NULL, true);
+ if (err)
+ goto error_module;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ /* Register the entity. */
+ if (v4l2_dev->mdev) {
+ err = media_device_register_entity(v4l2_dev->mdev, &sd->entity);
+ if (err < 0)
+ goto error_module;
+ }
+#endif
+
+ if (sd->internal_ops && sd->internal_ops->registered) {
+ err = sd->internal_ops->registered(sd);
+ if (err)
+ goto error_unregister;
+ }
+
+ spin_lock(&v4l2_dev->lock);
+ list_add_tail(&sd->list, &v4l2_dev->subdevs);
+ spin_unlock(&v4l2_dev->lock);
+
+ return 0;
+
+error_unregister:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_device_unregister_entity(&sd->entity);
+#endif
+error_module:
+ if (!sd->owner_v4l2_dev)
+ module_put(sd->owner);
+ sd->v4l2_dev = NULL;
+ return err;
+}
+EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+
+static void v4l2_subdev_release(struct v4l2_subdev *sd)
+{
+ struct module *owner = !sd->owner_v4l2_dev ? sd->owner : NULL;
+
+ if (sd->internal_ops && sd->internal_ops->release)
+ sd->internal_ops->release(sd);
+ sd->devnode = NULL;
+ module_put(owner);
+}
+
+static void v4l2_device_release_subdev_node(struct video_device *vdev)
+{
+ v4l2_subdev_release(video_get_drvdata(vdev));
+ kfree(vdev);
+}
+
+int __v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev,
+ bool read_only)
+{
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ int err;
+
+ /* Register a device node for every subdev marked with the
+ * V4L2_SUBDEV_FL_HAS_DEVNODE flag.
+ */
+ list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+ continue;
+
+ if (sd->devnode)
+ continue;
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ err = -ENOMEM;
+ goto clean_up;
+ }
+
+ video_set_drvdata(vdev, sd);
+ strscpy(vdev->name, sd->name, sizeof(vdev->name));
+ vdev->dev_parent = sd->dev;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->fops = &v4l2_subdev_fops;
+ vdev->release = v4l2_device_release_subdev_node;
+ vdev->ctrl_handler = sd->ctrl_handler;
+ if (read_only)
+ set_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
+ sd->devnode = vdev;
+ err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
+ sd->owner);
+ if (err < 0) {
+ sd->devnode = NULL;
+ kfree(vdev);
+ goto clean_up;
+ }
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ sd->entity.info.dev.major = VIDEO_MAJOR;
+ sd->entity.info.dev.minor = vdev->minor;
+
+ /* Interface is created by __video_register_device() */
+ if (vdev->v4l2_dev->mdev) {
+ struct media_link *link;
+
+ link = media_create_intf_link(&sd->entity,
+ &vdev->intf_devnode->intf,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (!link) {
+ err = -ENOMEM;
+ goto clean_up;
+ }
+ }
+#endif
+ }
+ return 0;
+
+clean_up:
+ list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
+ if (!sd->devnode)
+ break;
+ video_unregister_device(sd->devnode);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(__v4l2_device_register_subdev_nodes);
+
+void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
+{
+ struct v4l2_device *v4l2_dev;
+
+ /* return if it isn't registered */
+ if (sd == NULL || sd->v4l2_dev == NULL)
+ return;
+
+ v4l2_dev = sd->v4l2_dev;
+
+ spin_lock(&v4l2_dev->lock);
+ list_del(&sd->list);
+ spin_unlock(&v4l2_dev->lock);
+
+ if (sd->internal_ops && sd->internal_ops->unregistered)
+ sd->internal_ops->unregistered(sd);
+ sd->v4l2_dev = NULL;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (v4l2_dev->mdev) {
+ /*
+ * No need to explicitly remove links, as both pads and
+ * links are removed by the function below, in the right order
+ */
+ media_device_unregister_entity(&sd->entity);
+ }
+#endif
+ if (sd->devnode)
+ video_unregister_device(sd->devnode);
+ else
+ v4l2_subdev_release(sd);
+}
+EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
new file mode 100644
index 0000000000..942d0005c5
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * v4l2-dv-timings - dv-timings helper functions
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/rational.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-dv-timings.h>
+#include <linux/math64.h>
+#include <linux/hdmi.h>
+#include <media/cec.h>
+
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
+MODULE_LICENSE("GPL");
+
+const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
+ V4L2_DV_BT_CEA_640X480P59_94,
+ V4L2_DV_BT_CEA_720X480I59_94,
+ V4L2_DV_BT_CEA_720X480P59_94,
+ V4L2_DV_BT_CEA_720X576I50,
+ V4L2_DV_BT_CEA_720X576P50,
+ V4L2_DV_BT_CEA_1280X720P24,
+ V4L2_DV_BT_CEA_1280X720P25,
+ V4L2_DV_BT_CEA_1280X720P30,
+ V4L2_DV_BT_CEA_1280X720P50,
+ V4L2_DV_BT_CEA_1280X720P60,
+ V4L2_DV_BT_CEA_1920X1080P24,
+ V4L2_DV_BT_CEA_1920X1080P25,
+ V4L2_DV_BT_CEA_1920X1080P30,
+ V4L2_DV_BT_CEA_1920X1080I50,
+ V4L2_DV_BT_CEA_1920X1080P50,
+ V4L2_DV_BT_CEA_1920X1080I60,
+ V4L2_DV_BT_CEA_1920X1080P60,
+ V4L2_DV_BT_DMT_640X350P85,
+ V4L2_DV_BT_DMT_640X400P85,
+ V4L2_DV_BT_DMT_720X400P85,
+ V4L2_DV_BT_DMT_640X480P72,
+ V4L2_DV_BT_DMT_640X480P75,
+ V4L2_DV_BT_DMT_640X480P85,
+ V4L2_DV_BT_DMT_800X600P56,
+ V4L2_DV_BT_DMT_800X600P60,
+ V4L2_DV_BT_DMT_800X600P72,
+ V4L2_DV_BT_DMT_800X600P75,
+ V4L2_DV_BT_DMT_800X600P85,
+ V4L2_DV_BT_DMT_800X600P120_RB,
+ V4L2_DV_BT_DMT_848X480P60,
+ V4L2_DV_BT_DMT_1024X768I43,
+ V4L2_DV_BT_DMT_1024X768P60,
+ V4L2_DV_BT_DMT_1024X768P70,
+ V4L2_DV_BT_DMT_1024X768P75,
+ V4L2_DV_BT_DMT_1024X768P85,
+ V4L2_DV_BT_DMT_1024X768P120_RB,
+ V4L2_DV_BT_DMT_1152X864P75,
+ V4L2_DV_BT_DMT_1280X768P60_RB,
+ V4L2_DV_BT_DMT_1280X768P60,
+ V4L2_DV_BT_DMT_1280X768P75,
+ V4L2_DV_BT_DMT_1280X768P85,
+ V4L2_DV_BT_DMT_1280X768P120_RB,
+ V4L2_DV_BT_DMT_1280X800P60_RB,
+ V4L2_DV_BT_DMT_1280X800P60,
+ V4L2_DV_BT_DMT_1280X800P75,
+ V4L2_DV_BT_DMT_1280X800P85,
+ V4L2_DV_BT_DMT_1280X800P120_RB,
+ V4L2_DV_BT_DMT_1280X960P60,
+ V4L2_DV_BT_DMT_1280X960P85,
+ V4L2_DV_BT_DMT_1280X960P120_RB,
+ V4L2_DV_BT_DMT_1280X1024P60,
+ V4L2_DV_BT_DMT_1280X1024P75,
+ V4L2_DV_BT_DMT_1280X1024P85,
+ V4L2_DV_BT_DMT_1280X1024P120_RB,
+ V4L2_DV_BT_DMT_1360X768P60,
+ V4L2_DV_BT_DMT_1360X768P120_RB,
+ V4L2_DV_BT_DMT_1366X768P60,
+ V4L2_DV_BT_DMT_1366X768P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60,
+ V4L2_DV_BT_DMT_1400X1050P75,
+ V4L2_DV_BT_DMT_1400X1050P85,
+ V4L2_DV_BT_DMT_1400X1050P120_RB,
+ V4L2_DV_BT_DMT_1440X900P60_RB,
+ V4L2_DV_BT_DMT_1440X900P60,
+ V4L2_DV_BT_DMT_1440X900P75,
+ V4L2_DV_BT_DMT_1440X900P85,
+ V4L2_DV_BT_DMT_1440X900P120_RB,
+ V4L2_DV_BT_DMT_1600X900P60_RB,
+ V4L2_DV_BT_DMT_1600X1200P60,
+ V4L2_DV_BT_DMT_1600X1200P65,
+ V4L2_DV_BT_DMT_1600X1200P70,
+ V4L2_DV_BT_DMT_1600X1200P75,
+ V4L2_DV_BT_DMT_1600X1200P85,
+ V4L2_DV_BT_DMT_1600X1200P120_RB,
+ V4L2_DV_BT_DMT_1680X1050P60_RB,
+ V4L2_DV_BT_DMT_1680X1050P60,
+ V4L2_DV_BT_DMT_1680X1050P75,
+ V4L2_DV_BT_DMT_1680X1050P85,
+ V4L2_DV_BT_DMT_1680X1050P120_RB,
+ V4L2_DV_BT_DMT_1792X1344P60,
+ V4L2_DV_BT_DMT_1792X1344P75,
+ V4L2_DV_BT_DMT_1792X1344P120_RB,
+ V4L2_DV_BT_DMT_1856X1392P60,
+ V4L2_DV_BT_DMT_1856X1392P75,
+ V4L2_DV_BT_DMT_1856X1392P120_RB,
+ V4L2_DV_BT_DMT_1920X1200P60_RB,
+ V4L2_DV_BT_DMT_1920X1200P60,
+ V4L2_DV_BT_DMT_1920X1200P75,
+ V4L2_DV_BT_DMT_1920X1200P85,
+ V4L2_DV_BT_DMT_1920X1200P120_RB,
+ V4L2_DV_BT_DMT_1920X1440P60,
+ V4L2_DV_BT_DMT_1920X1440P75,
+ V4L2_DV_BT_DMT_1920X1440P120_RB,
+ V4L2_DV_BT_DMT_2048X1152P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60,
+ V4L2_DV_BT_DMT_2560X1600P75,
+ V4L2_DV_BT_DMT_2560X1600P85,
+ V4L2_DV_BT_DMT_2560X1600P120_RB,
+ V4L2_DV_BT_CEA_3840X2160P24,
+ V4L2_DV_BT_CEA_3840X2160P25,
+ V4L2_DV_BT_CEA_3840X2160P30,
+ V4L2_DV_BT_CEA_3840X2160P50,
+ V4L2_DV_BT_CEA_3840X2160P60,
+ V4L2_DV_BT_CEA_4096X2160P24,
+ V4L2_DV_BT_CEA_4096X2160P25,
+ V4L2_DV_BT_CEA_4096X2160P30,
+ V4L2_DV_BT_CEA_4096X2160P50,
+ V4L2_DV_BT_DMT_4096X2160P59_94_RB,
+ V4L2_DV_BT_CEA_4096X2160P60,
+ { }
+};
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets);
+
+bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *dvcap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
+ u32 caps = cap->capabilities;
+ const u32 max_vert = 10240;
+ u32 max_hor = 3 * bt->width;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t->type != dvcap->type ||
+ bt->height < cap->min_height ||
+ bt->height > cap->max_height ||
+ bt->width < cap->min_width ||
+ bt->width > cap->max_width ||
+ bt->pixelclock < cap->min_pixelclock ||
+ bt->pixelclock > cap->max_pixelclock ||
+ (!(caps & V4L2_DV_BT_CAP_CUSTOM) &&
+ cap->standards && bt->standards &&
+ !(bt->standards & cap->standards)) ||
+ (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
+ (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
+ return false;
+
+ /* sanity checks for the blanking timings */
+ if (!bt->interlaced &&
+ (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
+ return false;
+ /*
+ * Some video receivers cannot properly separate the frontporch,
+ * backporch and sync values, and instead they only have the total
+ * blanking. That can be assigned to any of these three fields.
+ * So just check that none of these are way out of range.
+ */
+ if (bt->hfrontporch > max_hor ||
+ bt->hsync > max_hor || bt->hbackporch > max_hor)
+ return false;
+ if (bt->vfrontporch > max_vert ||
+ bt->vsync > max_vert || bt->vbackporch > max_vert)
+ return false;
+ if (bt->interlaced && (bt->il_vfrontporch > max_vert ||
+ bt->il_vsync > max_vert || bt->il_vbackporch > max_vert))
+ return false;
+ return fnc == NULL || fnc(t, fnc_handle);
+}
+EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
+
+int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ u32 i, idx;
+
+ memset(t->reserved, 0, sizeof(t->reserved));
+ for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ idx++ == t->index) {
+ t->timings = v4l2_dv_timings_presets[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap);
+
+bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ unsigned pclock_delta,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ int i;
+
+ if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
+ return false;
+
+ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
+ pclock_delta, false)) {
+ u32 flags = t->bt.flags & V4L2_DV_FL_REDUCED_FPS;
+
+ *t = v4l2_dv_timings_presets[i];
+ if (can_reduce_fps(&t->bt))
+ t->bt.flags |= flags;
+
+ return true;
+ }
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
+
+bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
+{
+ unsigned int i;
+
+ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ const struct v4l2_bt_timings *bt =
+ &v4l2_dv_timings_presets[i].bt;
+
+ if ((bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) &&
+ bt->cea861_vic == vic) {
+ *t = v4l2_dv_timings_presets[i];
+ return true;
+ }
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
+
+/**
+ * v4l2_match_dv_timings - check if two timings match
+ * @t1: compare this v4l2_dv_timings struct...
+ * @t2: with this struct.
+ * @pclock_delta: the allowed pixelclock deviation.
+ * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
+ * match.
+ *
+ * Compare t1 with t2 with a given margin of error for the pixelclock.
+ */
+bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1,
+ const struct v4l2_dv_timings *t2,
+ unsigned pclock_delta, bool match_reduced_fps)
+{
+ if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t1->bt.width == t2->bt.width &&
+ t1->bt.height == t2->bt.height &&
+ t1->bt.interlaced == t2->bt.interlaced &&
+ t1->bt.polarities == t2->bt.polarities &&
+ t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
+ t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
+ t1->bt.hfrontporch == t2->bt.hfrontporch &&
+ t1->bt.hsync == t2->bt.hsync &&
+ t1->bt.hbackporch == t2->bt.hbackporch &&
+ t1->bt.vfrontporch == t2->bt.vfrontporch &&
+ t1->bt.vsync == t2->bt.vsync &&
+ t1->bt.vbackporch == t2->bt.vbackporch &&
+ (!match_reduced_fps ||
+ (t1->bt.flags & V4L2_DV_FL_REDUCED_FPS) ==
+ (t2->bt.flags & V4L2_DV_FL_REDUCED_FPS)) &&
+ (!t1->bt.interlaced ||
+ (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
+ t1->bt.il_vsync == t2->bt.il_vsync &&
+ t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_match_dv_timings);
+
+void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
+ const struct v4l2_dv_timings *t, bool detailed)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ u32 htot, vtot;
+ u32 fps;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ if (bt->interlaced)
+ vtot /= 2;
+
+ fps = (htot * vtot) > 0 ? div_u64((100 * (u64)bt->pixelclock),
+ (htot * vtot)) : 0;
+
+ if (prefix == NULL)
+ prefix = "";
+
+ pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix,
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ fps / 100, fps % 100, htot, vtot);
+
+ if (!detailed)
+ return;
+
+ pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->hfrontporch,
+ (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
+ bt->hsync, bt->hbackporch);
+ pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->vfrontporch,
+ (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
+ bt->vsync, bt->vbackporch);
+ if (bt->interlaced)
+ pr_info("%s: vertical bottom field: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->il_vfrontporch,
+ (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
+ bt->il_vsync, bt->il_vbackporch);
+ pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
+ pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s%s%s%s\n",
+ dev_prefix, bt->flags,
+ (bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
+ " REDUCED_BLANKING" : "",
+ ((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
+ bt->vsync == 8) ? " (V2)" : "",
+ (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
+ " CAN_REDUCE_FPS" : "",
+ (bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
+ " REDUCED_FPS" : "",
+ (bt->flags & V4L2_DV_FL_HALF_LINE) ?
+ " HALF_LINE" : "",
+ (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
+ " CE_VIDEO" : "",
+ (bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ?
+ " FIRST_FIELD_EXTRA_LINE" : "",
+ (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) ?
+ " HAS_PICTURE_ASPECT" : "",
+ (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) ?
+ " HAS_CEA861_VIC" : "",
+ (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) ?
+ " HAS_HDMI_VIC" : "");
+ pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards,
+ (bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
+ (bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
+ (bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
+ (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "",
+ (bt->standards & V4L2_DV_BT_STD_SDI) ? " SDI" : "");
+ if (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT)
+ pr_info("%s: picture aspect (hor:vert): %u:%u\n", dev_prefix,
+ bt->picture_aspect.numerator,
+ bt->picture_aspect.denominator);
+ if (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC)
+ pr_info("%s: CEA-861 VIC: %u\n", dev_prefix, bt->cea861_vic);
+ if (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC)
+ pr_info("%s: HDMI VIC: %u\n", dev_prefix, bt->hdmi_vic);
+}
+EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
+
+struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t)
+{
+ struct v4l2_fract ratio = { 1, 1 };
+ unsigned long n, d;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return ratio;
+ if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT))
+ return ratio;
+
+ ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator;
+ ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator;
+
+ rational_best_approximation(ratio.numerator, ratio.denominator,
+ ratio.numerator, ratio.denominator, &n, &d);
+ ratio.numerator = n;
+ ratio.denominator = d;
+ return ratio;
+}
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio);
+
+/** v4l2_calc_timeperframe - helper function to calculate timeperframe based
+ * v4l2_dv_timings fields.
+ * @t - Timings for the video mode.
+ *
+ * Calculates the expected timeperframe using the pixel clock value and
+ * horizontal/vertical measures. This means that v4l2_dv_timings structure
+ * must be correctly and fully filled.
+ */
+struct v4l2_fract v4l2_calc_timeperframe(const struct v4l2_dv_timings *t)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ struct v4l2_fract fps_fract = { 1, 1 };
+ unsigned long n, d;
+ u32 htot, vtot, fps;
+ u64 pclk;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return fps_fract;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+ pclk = bt->pixelclock;
+
+ if ((bt->flags & V4L2_DV_FL_CAN_DETECT_REDUCED_FPS) &&
+ (bt->flags & V4L2_DV_FL_REDUCED_FPS))
+ pclk = div_u64(pclk * 1000ULL, 1001);
+
+ fps = (htot * vtot) > 0 ? div_u64((100 * pclk), (htot * vtot)) : 0;
+ if (!fps)
+ return fps_fract;
+
+ rational_best_approximation(fps, 100, fps, 100, &n, &d);
+
+ fps_fract.numerator = d;
+ fps_fract.denominator = n;
+ return fps_fract;
+}
+EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe);
+
+/*
+ * CVT defines
+ * Based on Coordinated Video Timings Standard
+ * version 1.1 September 10, 2003
+ */
+
+#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+#define CVT_PXL_CLK_GRAN_RB_V2 1000 /* granularity for reduced blanking v2*/
+
+/* Normal blanking */
+#define CVT_MIN_V_BPORCH 7 /* lines */
+#define CVT_MIN_V_PORCH_RND 3 /* lines */
+#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+#define CVT_HSYNC_PERCENT 8 /* nominal hsync as percentage of line */
+
+/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
+#define CVT_CELL_GRAN 8 /* character cell granularity */
+#define CVT_M 600 /* blanking formula gradient */
+#define CVT_C 40 /* blanking formula offset */
+#define CVT_K 128 /* blanking formula scaling factor */
+#define CVT_J 20 /* blanking formula scaling factor */
+#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
+#define CVT_M_PRIME (CVT_K * CVT_M / 256)
+
+/* Reduced Blanking */
+#define CVT_RB_MIN_V_BPORCH 7 /* lines */
+#define CVT_RB_V_FPORCH 3 /* lines */
+#define CVT_RB_MIN_V_BLANK 460 /* us */
+#define CVT_RB_H_SYNC 32 /* pixels */
+#define CVT_RB_H_BLANK 160 /* pixels */
+/* Reduce blanking Version 2 */
+#define CVT_RB_V2_H_BLANK 80 /* pixels */
+#define CVT_RB_MIN_V_FPORCH 3 /* lines */
+#define CVT_RB_V2_MIN_V_FPORCH 1 /* lines */
+#define CVT_RB_V_BPORCH 6 /* lines */
+
+/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @active_width - active width of image (does not include blanking). This
+ * information is needed only in case of version 2 of reduced blanking.
+ * In other cases, this parameter does not have any effect on timings.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @interlaced - if this flag is true, it indicates interlaced format
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid CVT format. If so, then it will return true, and fmt will be filled
+ * in with the found CVT timings.
+ */
+bool v4l2_detect_cvt(unsigned frame_height,
+ unsigned hfreq,
+ unsigned vsync,
+ unsigned active_width,
+ u32 polarities,
+ bool interlaced,
+ struct v4l2_dv_timings *fmt)
+{
+ int v_fp, v_bp, h_fp, h_bp, hsync;
+ int frame_width, image_height, image_width;
+ bool reduced_blanking;
+ bool rb_v2 = false;
+ unsigned pix_clk;
+
+ if (vsync < 4 || vsync > 8)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ reduced_blanking = false;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ reduced_blanking = true;
+ else
+ return false;
+
+ if (reduced_blanking && vsync == 8)
+ rb_v2 = true;
+
+ if (rb_v2 && active_width == 0)
+ return false;
+
+ if (!rb_v2 && vsync > 7)
+ return false;
+
+ if (hfreq == 0)
+ return false;
+
+ /* Vertical */
+ if (reduced_blanking) {
+ if (rb_v2) {
+ v_bp = CVT_RB_V_BPORCH;
+ v_fp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
+ v_fp -= vsync + v_bp;
+
+ if (v_fp < CVT_RB_V2_MIN_V_FPORCH)
+ v_fp = CVT_RB_V2_MIN_V_FPORCH;
+ } else {
+ v_fp = CVT_RB_V_FPORCH;
+ v_bp = (CVT_RB_MIN_V_BLANK * hfreq) / 1000000 + 1;
+ v_bp -= vsync + v_fp;
+
+ if (v_bp < CVT_RB_MIN_V_BPORCH)
+ v_bp = CVT_RB_MIN_V_BPORCH;
+ }
+ } else {
+ v_fp = CVT_MIN_V_PORCH_RND;
+ v_bp = (CVT_MIN_VSYNC_BP * hfreq) / 1000000 + 1 - vsync;
+
+ if (v_bp < CVT_MIN_V_BPORCH)
+ v_bp = CVT_MIN_V_BPORCH;
+ }
+
+ if (interlaced)
+ image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1;
+ else
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ if (image_height < 0)
+ return false;
+
+ /* Aspect ratio based on vsync */
+ switch (vsync) {
+ case 4:
+ image_width = (image_height * 4) / 3;
+ break;
+ case 5:
+ image_width = (image_height * 16) / 9;
+ break;
+ case 6:
+ image_width = (image_height * 16) / 10;
+ break;
+ case 7:
+ /* special case */
+ if (image_height == 1024)
+ image_width = (image_height * 5) / 4;
+ else if (image_height == 768)
+ image_width = (image_height * 15) / 9;
+ else
+ return false;
+ break;
+ case 8:
+ image_width = active_width;
+ break;
+ default:
+ return false;
+ }
+
+ if (!rb_v2)
+ image_width = image_width & ~7;
+
+ /* Horizontal */
+ if (reduced_blanking) {
+ int h_blank;
+ int clk_gran;
+
+ h_blank = rb_v2 ? CVT_RB_V2_H_BLANK : CVT_RB_H_BLANK;
+ clk_gran = rb_v2 ? CVT_PXL_CLK_GRAN_RB_V2 : CVT_PXL_CLK_GRAN;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = (pix_clk / clk_gran) * clk_gran;
+
+ h_bp = h_blank / 2;
+ hsync = CVT_RB_H_SYNC;
+ h_fp = h_blank - h_bp - hsync;
+
+ frame_width = image_width + h_blank;
+ } else {
+ unsigned ideal_duty_cycle_per_myriad =
+ 100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq;
+ int h_blank;
+
+ if (ideal_duty_cycle_per_myriad < 2000)
+ ideal_duty_cycle_per_myriad = 2000;
+
+ h_blank = image_width * ideal_duty_cycle_per_myriad /
+ (10000 - ideal_duty_cycle_per_myriad);
+ h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+
+ h_bp = h_blank / 2;
+ frame_width = image_width + h_blank;
+
+ hsync = frame_width * CVT_HSYNC_PERCENT / 100;
+ hsync = (hsync / CVT_CELL_GRAN) * CVT_CELL_GRAN;
+ h_fp = h_blank - hsync - h_bp;
+ }
+
+ fmt->type = V4L2_DV_BT_656_1120;
+ fmt->bt.polarities = polarities;
+ fmt->bt.width = image_width;
+ fmt->bt.height = image_height;
+ fmt->bt.hfrontporch = h_fp;
+ fmt->bt.vfrontporch = v_fp;
+ fmt->bt.hsync = hsync;
+ fmt->bt.vsync = vsync;
+ fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
+
+ if (!interlaced) {
+ fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ fmt->bt.interlaced = V4L2_DV_PROGRESSIVE;
+ } else {
+ fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp -
+ 2 * vsync) / 2;
+ fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
+ 2 * vsync - fmt->bt.vbackporch;
+ fmt->bt.il_vfrontporch = v_fp;
+ fmt->bt.il_vsync = vsync;
+ fmt->bt.flags |= V4L2_DV_FL_HALF_LINE;
+ fmt->bt.interlaced = V4L2_DV_INTERLACED;
+ }
+
+ fmt->bt.pixelclock = pix_clk;
+ fmt->bt.standards = V4L2_DV_BT_STD_CVT;
+
+ if (reduced_blanking)
+ fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+
+/*
+ * GTF defines
+ * Based on Generalized Timing Formula Standard
+ * Version 1.1 September 2, 1999
+ */
+
+#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+
+#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+#define GTF_V_FP 1 /* vertical front porch (lines) */
+#define GTF_CELL_GRAN 8 /* character cell granularity */
+
+/* Default */
+#define GTF_D_M 600 /* blanking formula gradient */
+#define GTF_D_C 40 /* blanking formula offset */
+#define GTF_D_K 128 /* blanking formula scaling factor */
+#define GTF_D_J 20 /* blanking formula scaling factor */
+#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
+#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
+
+/* Secondary */
+#define GTF_S_M 3600 /* blanking formula gradient */
+#define GTF_S_C 40 /* blanking formula offset */
+#define GTF_S_K 128 /* blanking formula scaling factor */
+#define GTF_S_J 35 /* blanking formula scaling factor */
+#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
+#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
+
+/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @interlaced - if this flag is true, it indicates interlaced format
+ * @aspect - preferred aspect ratio. GTF has no method of determining the
+ * aspect ratio in order to derive the image width from the
+ * image height, so it has to be passed explicitly. Usually
+ * the native screen aspect ratio is used for this. If it
+ * is not filled in correctly, then 16:9 will be assumed.
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid GTF format. If so, then it will return true, and fmt will be filled
+ * in with the found GTF timings.
+ */
+bool v4l2_detect_gtf(unsigned frame_height,
+ unsigned hfreq,
+ unsigned vsync,
+ u32 polarities,
+ bool interlaced,
+ struct v4l2_fract aspect,
+ struct v4l2_dv_timings *fmt)
+{
+ int pix_clk;
+ int v_fp, v_bp, h_fp, hsync;
+ int frame_width, image_height, image_width;
+ bool default_gtf;
+ int h_blank;
+
+ if (vsync != 3)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ default_gtf = true;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ default_gtf = false;
+ else
+ return false;
+
+ if (hfreq == 0)
+ return false;
+
+ /* Vertical */
+ v_fp = GTF_V_FP;
+ v_bp = (GTF_MIN_VSYNC_BP * hfreq + 500000) / 1000000 - vsync;
+ if (interlaced)
+ image_height = (frame_height - 2 * v_fp - 2 * vsync - 2 * v_bp) & ~0x1;
+ else
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ if (image_height < 0)
+ return false;
+
+ if (aspect.numerator == 0 || aspect.denominator == 0) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ }
+ image_width = ((image_height * aspect.numerator) / aspect.denominator);
+ image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
+
+ /* Horizontal */
+ if (default_gtf) {
+ u64 num;
+ u32 den;
+
+ num = ((image_width * GTF_D_C_PRIME * (u64)hfreq) -
+ ((u64)image_width * GTF_D_M_PRIME * 1000));
+ den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) *
+ (2 * GTF_CELL_GRAN);
+ h_blank = div_u64((num + (den >> 1)), den);
+ h_blank *= (2 * GTF_CELL_GRAN);
+ } else {
+ u64 num;
+ u32 den;
+
+ num = ((image_width * GTF_S_C_PRIME * (u64)hfreq) -
+ ((u64)image_width * GTF_S_M_PRIME * 1000));
+ den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) *
+ (2 * GTF_CELL_GRAN);
+ h_blank = div_u64((num + (den >> 1)), den);
+ h_blank *= (2 * GTF_CELL_GRAN);
+ }
+
+ frame_width = image_width + h_blank;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
+
+ hsync = (frame_width * 8 + 50) / 100;
+ hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN;
+
+ h_fp = h_blank / 2 - hsync;
+
+ fmt->type = V4L2_DV_BT_656_1120;
+ fmt->bt.polarities = polarities;
+ fmt->bt.width = image_width;
+ fmt->bt.height = image_height;
+ fmt->bt.hfrontporch = h_fp;
+ fmt->bt.vfrontporch = v_fp;
+ fmt->bt.hsync = hsync;
+ fmt->bt.vsync = vsync;
+ fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
+
+ if (!interlaced) {
+ fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ fmt->bt.interlaced = V4L2_DV_PROGRESSIVE;
+ } else {
+ fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp -
+ 2 * vsync) / 2;
+ fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp -
+ 2 * vsync - fmt->bt.vbackporch;
+ fmt->bt.il_vfrontporch = v_fp;
+ fmt->bt.il_vsync = vsync;
+ fmt->bt.flags |= V4L2_DV_FL_HALF_LINE;
+ fmt->bt.interlaced = V4L2_DV_INTERLACED;
+ }
+
+ fmt->bt.pixelclock = pix_clk;
+ fmt->bt.standards = V4L2_DV_BT_STD_GTF;
+
+ if (!default_gtf)
+ fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
+
+/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+ * 0x15 and 0x16 from the EDID.
+ * @hor_landscape - byte 0x15 from the EDID.
+ * @vert_portrait - byte 0x16 from the EDID.
+ *
+ * Determines the aspect ratio from the EDID.
+ * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
+ * "Horizontal and Vertical Screen Size or Aspect Ratio"
+ */
+struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
+{
+ struct v4l2_fract aspect = { 16, 9 };
+ u8 ratio;
+
+ /* Nothing filled in, fallback to 16:9 */
+ if (!hor_landscape && !vert_portrait)
+ return aspect;
+ /* Both filled in, so they are interpreted as the screen size in cm */
+ if (hor_landscape && vert_portrait) {
+ aspect.numerator = hor_landscape;
+ aspect.denominator = vert_portrait;
+ return aspect;
+ }
+ /* Only one is filled in, so interpret them as a ratio:
+ (val + 99) / 100 */
+ ratio = hor_landscape | vert_portrait;
+ /* Change some rounded values into the exact aspect ratio */
+ if (ratio == 79) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ } else if (ratio == 34) {
+ aspect.numerator = 4;
+ aspect.denominator = 3;
+ } else if (ratio == 68) {
+ aspect.numerator = 15;
+ aspect.denominator = 9;
+ } else {
+ aspect.numerator = hor_landscape + 99;
+ aspect.denominator = 100;
+ }
+ if (hor_landscape)
+ return aspect;
+ /* The aspect ratio is for portrait, so swap numerator and denominator */
+ swap(aspect.denominator, aspect.numerator);
+ return aspect;
+}
+EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
+
+/** v4l2_hdmi_rx_colorimetry - determine HDMI colorimetry information
+ * based on various InfoFrames.
+ * @avi: the AVI InfoFrame
+ * @hdmi: the HDMI Vendor InfoFrame, may be NULL
+ * @height: the frame height
+ *
+ * Determines the HDMI colorimetry information, i.e. how the HDMI
+ * pixel color data should be interpreted.
+ *
+ * Note that some of the newer features (DCI-P3, HDR) are not yet
+ * implemented: the hdmi.h header needs to be updated to the HDMI 2.0
+ * and CTA-861-G standards.
+ */
+struct v4l2_hdmi_colorimetry
+v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
+ const struct hdmi_vendor_infoframe *hdmi,
+ unsigned int height)
+{
+ struct v4l2_hdmi_colorimetry c = {
+ V4L2_COLORSPACE_SRGB,
+ V4L2_YCBCR_ENC_DEFAULT,
+ V4L2_QUANTIZATION_FULL_RANGE,
+ V4L2_XFER_FUNC_SRGB
+ };
+ bool is_ce = avi->video_code || (hdmi && hdmi->vic);
+ bool is_sdtv = height <= 576;
+ bool default_is_lim_range_rgb = avi->video_code > 1;
+
+ switch (avi->colorspace) {
+ case HDMI_COLORSPACE_RGB:
+ /* RGB pixel encoding */
+ switch (avi->colorimetry) {
+ case HDMI_COLORIMETRY_EXTENDED:
+ switch (avi->extended_colorimetry) {
+ case HDMI_EXTENDED_COLORIMETRY_OPRGB:
+ c.colorspace = V4L2_COLORSPACE_OPRGB;
+ c.xfer_func = V4L2_XFER_FUNC_OPRGB;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_BT2020:
+ c.colorspace = V4L2_COLORSPACE_BT2020;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ switch (avi->quantization_range) {
+ case HDMI_QUANTIZATION_RANGE_LIMITED:
+ c.quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ break;
+ case HDMI_QUANTIZATION_RANGE_FULL:
+ break;
+ default:
+ if (default_is_lim_range_rgb)
+ c.quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ break;
+ }
+ break;
+
+ default:
+ /* YCbCr pixel encoding */
+ c.quantization = V4L2_QUANTIZATION_LIM_RANGE;
+ switch (avi->colorimetry) {
+ case HDMI_COLORIMETRY_NONE:
+ if (!is_ce)
+ break;
+ if (is_sdtv) {
+ c.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ } else {
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_709;
+ }
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_COLORIMETRY_ITU_601:
+ c.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_COLORIMETRY_ITU_709:
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_709;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_COLORIMETRY_EXTENDED:
+ switch (avi->extended_colorimetry) {
+ case HDMI_EXTENDED_COLORIMETRY_XV_YCC_601:
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_XV709;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_XV_YCC_709:
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_XV601;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_S_YCC_601:
+ c.colorspace = V4L2_COLORSPACE_SRGB;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ c.xfer_func = V4L2_XFER_FUNC_SRGB;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_OPYCC_601:
+ c.colorspace = V4L2_COLORSPACE_OPRGB;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_601;
+ c.xfer_func = V4L2_XFER_FUNC_OPRGB;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_BT2020:
+ c.colorspace = V4L2_COLORSPACE_BT2020;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM:
+ c.colorspace = V4L2_COLORSPACE_BT2020;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_BT2020_CONST_LUM;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ default: /* fall back to ITU_709 */
+ c.colorspace = V4L2_COLORSPACE_REC709;
+ c.ycbcr_enc = V4L2_YCBCR_ENC_709;
+ c.xfer_func = V4L2_XFER_FUNC_709;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ /*
+ * YCC Quantization Range signaling is more-or-less broken,
+ * let's just ignore this.
+ */
+ break;
+ }
+ return c;
+}
+EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
+
+/**
+ * v4l2_get_edid_phys_addr() - find and return the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @offset: If not %NULL then the location of the physical address
+ * bytes in the EDID will be returned here. This is set to 0
+ * if there is no physical address found.
+ *
+ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
+ */
+u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+ if (offset)
+ *offset = loc;
+ if (loc == 0)
+ return CEC_PHYS_ADDR_INVALID;
+ return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr);
+
+/**
+ * v4l2_set_edid_phys_addr() - find and set the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @phys_addr: the new physical address
+ *
+ * This function finds the location of the physical address in the EDID
+ * and fills in the given physical address and updates the checksum
+ * at the end of the EDID block. It does nothing if the EDID doesn't
+ * contain a physical address.
+ */
+void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+ u8 sum = 0;
+ unsigned int i;
+
+ if (loc == 0)
+ return;
+ edid[loc] = phys_addr >> 8;
+ edid[loc + 1] = phys_addr & 0xff;
+ loc &= ~0x7f;
+
+ /* update the checksum */
+ for (i = loc; i < loc + 127; i++)
+ sum += edid[i];
+ edid[i] = 256 - sum;
+}
+EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr);
+
+/**
+ * v4l2_phys_addr_for_input() - calculate the PA for an input
+ *
+ * @phys_addr: the physical address of the parent
+ * @input: the number of the input port, must be between 1 and 15
+ *
+ * This function calculates a new physical address based on the input
+ * port number. For example:
+ *
+ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
+ *
+ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
+ *
+ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
+ *
+ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
+ *
+ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
+ */
+u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+ /* Check if input is sane */
+ if (WARN_ON(input == 0 || input > 0xf))
+ return CEC_PHYS_ADDR_INVALID;
+
+ if (phys_addr == 0)
+ return input << 12;
+
+ if ((phys_addr & 0x0fff) == 0)
+ return phys_addr | (input << 8);
+
+ if ((phys_addr & 0x00ff) == 0)
+ return phys_addr | (input << 4);
+
+ if ((phys_addr & 0x000f) == 0)
+ return phys_addr | input;
+
+ /*
+ * All nibbles are used so no valid physical addresses can be assigned
+ * to the input.
+ */
+ return CEC_PHYS_ADDR_INVALID;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input);
+
+/**
+ * v4l2_phys_addr_validate() - validate a physical address from an EDID
+ *
+ * @phys_addr: the physical address to validate
+ * @parent: if not %NULL, then this is filled with the parents PA.
+ * @port: if not %NULL, then this is filled with the input port.
+ *
+ * This validates a physical address as read from an EDID. If the
+ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
+ * then it will return -EINVAL.
+ *
+ * The parent PA is passed into %parent and the input port is passed into
+ * %port. For example:
+ *
+ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
+ *
+ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
+ *
+ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
+ *
+ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
+ *
+ * Return: 0 if the PA is valid, -EINVAL if not.
+ */
+int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+ int i;
+
+ if (parent)
+ *parent = phys_addr;
+ if (port)
+ *port = 0;
+ if (phys_addr == CEC_PHYS_ADDR_INVALID)
+ return 0;
+ for (i = 0; i < 16; i += 4)
+ if (phys_addr & (0xf << i))
+ break;
+ if (i == 16)
+ return 0;
+ if (parent)
+ *parent = phys_addr & (0xfff0 << i);
+ if (port)
+ *port = (phys_addr >> i) & 0xf;
+ for (i += 4; i < 16; i += 4)
+ if ((phys_addr & (0xf << i)) == 0)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate);
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
new file mode 100644
index 0000000000..c5ce9f11ad
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * v4l2-event.c
+ *
+ * V4L2 events.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ */
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+static unsigned int sev_pos(const struct v4l2_subscribed_event *sev, unsigned int idx)
+{
+ idx += sev->first;
+ return idx >= sev->elems ? idx - sev->elems : idx;
+}
+
+static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
+{
+ struct v4l2_kevent *kev;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ if (list_empty(&fh->available)) {
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ return -ENOENT;
+ }
+
+ WARN_ON(fh->navailable == 0);
+
+ kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
+ list_del(&kev->list);
+ fh->navailable--;
+
+ kev->event.pending = fh->navailable;
+ *event = kev->event;
+ ts = ns_to_timespec64(kev->ts);
+ event->timestamp.tv_sec = ts.tv_sec;
+ event->timestamp.tv_nsec = ts.tv_nsec;
+ kev->sev->first = sev_pos(kev->sev, 1);
+ kev->sev->in_use--;
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ return 0;
+}
+
+int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
+ int nonblocking)
+{
+ int ret;
+
+ if (nonblocking)
+ return __v4l2_event_dequeue(fh, event);
+
+ /* Release the vdev lock while waiting */
+ if (fh->vdev->lock)
+ mutex_unlock(fh->vdev->lock);
+
+ do {
+ ret = wait_event_interruptible(fh->wait,
+ fh->navailable != 0);
+ if (ret < 0)
+ break;
+
+ ret = __v4l2_event_dequeue(fh, event);
+ } while (ret == -ENOENT);
+
+ if (fh->vdev->lock)
+ mutex_lock(fh->vdev->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
+
+/* Caller must hold fh->vdev->fh_lock! */
+static struct v4l2_subscribed_event *v4l2_event_subscribed(
+ struct v4l2_fh *fh, u32 type, u32 id)
+{
+ struct v4l2_subscribed_event *sev;
+
+ assert_spin_locked(&fh->vdev->fh_lock);
+
+ list_for_each_entry(sev, &fh->subscribed, list)
+ if (sev->type == type && sev->id == id)
+ return sev;
+
+ return NULL;
+}
+
+static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
+ const struct v4l2_event *ev, u64 ts)
+{
+ struct v4l2_subscribed_event *sev;
+ struct v4l2_kevent *kev;
+ bool copy_payload = true;
+
+ /* Are we subscribed? */
+ sev = v4l2_event_subscribed(fh, ev->type, ev->id);
+ if (sev == NULL)
+ return;
+
+ /* Increase event sequence number on fh. */
+ fh->sequence++;
+
+ /* Do we have any free events? */
+ if (sev->in_use == sev->elems) {
+ /* no, remove the oldest one */
+ kev = sev->events + sev_pos(sev, 0);
+ list_del(&kev->list);
+ sev->in_use--;
+ sev->first = sev_pos(sev, 1);
+ fh->navailable--;
+ if (sev->elems == 1) {
+ if (sev->ops && sev->ops->replace) {
+ sev->ops->replace(&kev->event, ev);
+ copy_payload = false;
+ }
+ } else if (sev->ops && sev->ops->merge) {
+ struct v4l2_kevent *second_oldest =
+ sev->events + sev_pos(sev, 0);
+ sev->ops->merge(&kev->event, &second_oldest->event);
+ }
+ }
+
+ /* Take one and fill it. */
+ kev = sev->events + sev_pos(sev, sev->in_use);
+ kev->event.type = ev->type;
+ if (copy_payload)
+ kev->event.u = ev->u;
+ kev->event.id = ev->id;
+ kev->ts = ts;
+ kev->event.sequence = fh->sequence;
+ sev->in_use++;
+ list_add_tail(&kev->list, &fh->available);
+
+ fh->navailable++;
+
+ wake_up_all(&fh->wait);
+}
+
+void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
+{
+ struct v4l2_fh *fh;
+ unsigned long flags;
+ u64 ts;
+
+ if (vdev == NULL)
+ return;
+
+ ts = ktime_get_ns();
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+
+ list_for_each_entry(fh, &vdev->fh_list, list)
+ __v4l2_event_queue_fh(fh, ev, ts);
+
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue);
+
+void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
+{
+ unsigned long flags;
+ u64 ts = ktime_get_ns();
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ __v4l2_event_queue_fh(fh, ev, ts);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
+
+int v4l2_event_pending(struct v4l2_fh *fh)
+{
+ return fh->navailable;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_pending);
+
+void v4l2_event_wake_all(struct video_device *vdev)
+{
+ struct v4l2_fh *fh;
+ unsigned long flags;
+
+ if (!vdev)
+ return;
+
+ spin_lock_irqsave(&vdev->fh_lock, flags);
+
+ list_for_each_entry(fh, &vdev->fh_list, list)
+ wake_up_all(&fh->wait);
+
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_wake_all);
+
+static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
+{
+ struct v4l2_fh *fh = sev->fh;
+ unsigned int i;
+
+ lockdep_assert_held(&fh->subscribe_lock);
+ assert_spin_locked(&fh->vdev->fh_lock);
+
+ /* Remove any pending events for this subscription */
+ for (i = 0; i < sev->in_use; i++) {
+ list_del(&sev->events[sev_pos(sev, i)].list);
+ fh->navailable--;
+ }
+ list_del(&sev->list);
+}
+
+int v4l2_event_subscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub, unsigned int elems,
+ const struct v4l2_subscribed_event_ops *ops)
+{
+ struct v4l2_subscribed_event *sev, *found_ev;
+ unsigned long flags;
+ unsigned int i;
+ int ret = 0;
+
+ if (sub->type == V4L2_EVENT_ALL)
+ return -EINVAL;
+
+ if (elems < 1)
+ elems = 1;
+
+ sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
+ if (!sev)
+ return -ENOMEM;
+ for (i = 0; i < elems; i++)
+ sev->events[i].sev = sev;
+ sev->type = sub->type;
+ sev->id = sub->id;
+ sev->flags = sub->flags;
+ sev->fh = fh;
+ sev->ops = ops;
+ sev->elems = elems;
+
+ mutex_lock(&fh->subscribe_lock);
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ if (!found_ev)
+ list_add(&sev->list, &fh->subscribed);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ if (found_ev) {
+ /* Already listening */
+ kvfree(sev);
+ } else if (sev->ops && sev->ops->add) {
+ ret = sev->ops->add(sev, elems);
+ if (ret) {
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ __v4l2_event_unsubscribe(sev);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ kvfree(sev);
+ }
+ }
+
+ mutex_unlock(&fh->subscribe_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
+
+void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
+{
+ struct v4l2_event_subscription sub;
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ do {
+ sev = NULL;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ if (!list_empty(&fh->subscribed)) {
+ sev = list_first_entry(&fh->subscribed,
+ struct v4l2_subscribed_event, list);
+ sub.type = sev->type;
+ sub.id = sev->id;
+ }
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ if (sev)
+ v4l2_event_unsubscribe(fh, &sub);
+ } while (sev);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
+
+int v4l2_event_unsubscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ struct v4l2_subscribed_event *sev;
+ unsigned long flags;
+
+ if (sub->type == V4L2_EVENT_ALL) {
+ v4l2_event_unsubscribe_all(fh);
+ return 0;
+ }
+
+ mutex_lock(&fh->subscribe_lock);
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+
+ sev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ if (sev != NULL)
+ __v4l2_event_unsubscribe(sev);
+
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+
+ if (sev && sev->ops && sev->ops->del)
+ sev->ops->del(sev);
+
+ mutex_unlock(&fh->subscribe_lock);
+
+ kvfree(sev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
+
+int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
+
+static void v4l2_event_src_replace(struct v4l2_event *old,
+ const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.src_change.changes;
+
+ old->u.src_change = new->u.src_change;
+ old->u.src_change.changes |= old_changes;
+}
+
+static void v4l2_event_src_merge(const struct v4l2_event *old,
+ struct v4l2_event *new)
+{
+ new->u.src_change.changes |= old->u.src_change.changes;
+}
+
+static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
+ .replace = v4l2_event_src_replace,
+ .merge = v4l2_event_src_merge,
+};
+
+int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
+ return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
+
+int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
+{
+ return v4l2_src_change_event_subscribe(fh, sub);
+}
+EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
new file mode 100644
index 0000000000..90eec79ee9
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * v4l2-fh.c
+ *
+ * V4L2 file handles.
+ *
+ * Copyright (C) 2009--2010 Nokia Corporation.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+
+void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
+{
+ fh->vdev = vdev;
+ /* Inherit from video_device. May be overridden by the driver. */
+ fh->ctrl_handler = vdev->ctrl_handler;
+ INIT_LIST_HEAD(&fh->list);
+ set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
+ /*
+ * determine_valid_ioctls() does not know if struct v4l2_fh
+ * is used by this driver, but here we do. So enable the
+ * prio ioctls here.
+ */
+ set_bit(_IOC_NR(VIDIOC_G_PRIORITY), vdev->valid_ioctls);
+ set_bit(_IOC_NR(VIDIOC_S_PRIORITY), vdev->valid_ioctls);
+ fh->prio = V4L2_PRIORITY_UNSET;
+ init_waitqueue_head(&fh->wait);
+ INIT_LIST_HEAD(&fh->available);
+ INIT_LIST_HEAD(&fh->subscribed);
+ fh->sequence = -1;
+ mutex_init(&fh->subscribe_lock);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_init);
+
+void v4l2_fh_add(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ v4l2_prio_open(fh->vdev->prio, &fh->prio);
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&fh->list, &fh->vdev->fh_list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_add);
+
+int v4l2_fh_open(struct file *filp)
+{
+ struct video_device *vdev = video_devdata(filp);
+ struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+
+ filp->private_data = fh;
+ if (fh == NULL)
+ return -ENOMEM;
+ v4l2_fh_init(fh, vdev);
+ v4l2_fh_add(fh);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_open);
+
+void v4l2_fh_del(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_del_init(&fh->list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ v4l2_prio_close(fh->vdev->prio, fh->prio);
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_del);
+
+void v4l2_fh_exit(struct v4l2_fh *fh)
+{
+ if (fh->vdev == NULL)
+ return;
+ v4l_disable_media_source(fh->vdev);
+ v4l2_event_unsubscribe_all(fh);
+ mutex_destroy(&fh->subscribe_lock);
+ fh->vdev = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_exit);
+
+int v4l2_fh_release(struct file *filp)
+{
+ struct v4l2_fh *fh = filp->private_data;
+
+ if (fh) {
+ v4l2_fh_del(fh);
+ v4l2_fh_exit(fh);
+ kfree(fh);
+ filp->private_data = NULL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_release);
+
+int v4l2_fh_is_singular(struct v4l2_fh *fh)
+{
+ unsigned long flags;
+ int is_singular;
+
+ if (fh == NULL || fh->vdev == NULL)
+ return 0;
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ is_singular = list_is_singular(&fh->list);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ return is_singular;
+}
+EXPORT_SYMBOL_GPL(v4l2_fh_is_singular);
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
new file mode 100644
index 0000000000..355595a0fe
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * V4L2 flash LED sub-device registration helpers.
+ *
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ */
+
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <media/v4l2-flash-led-class.h>
+
+#define has_flash_op(v4l2_flash, op) \
+ (v4l2_flash && v4l2_flash->ops && v4l2_flash->ops->op)
+
+#define call_flash_op(v4l2_flash, op, arg) \
+ (has_flash_op(v4l2_flash, op) ? \
+ v4l2_flash->ops->op(v4l2_flash, arg) : \
+ -EINVAL)
+
+enum ctrl_init_data_id {
+ LED_MODE,
+ TORCH_INTENSITY,
+ FLASH_INTENSITY,
+ INDICATOR_INTENSITY,
+ FLASH_TIMEOUT,
+ STROBE_SOURCE,
+ /*
+ * Only above values are applicable to
+ * the 'ctrls' array in the struct v4l2_flash.
+ */
+ FLASH_STROBE,
+ STROBE_STOP,
+ STROBE_STATUS,
+ FLASH_FAULT,
+ NUM_FLASH_CTRLS,
+};
+
+static enum led_brightness __intensity_to_led_brightness(
+ struct v4l2_ctrl *ctrl, s32 intensity)
+{
+ intensity -= ctrl->minimum;
+ intensity /= (u32) ctrl->step;
+
+ /*
+ * Indicator LEDs, unlike torch LEDs, are turned on/off basing on
+ * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only.
+ * Therefore it must be possible to set it to 0 level which in
+ * the LED subsystem reflects LED_OFF state.
+ */
+ if (ctrl->minimum)
+ ++intensity;
+
+ return intensity;
+}
+
+static s32 __led_brightness_to_intensity(struct v4l2_ctrl *ctrl,
+ enum led_brightness brightness)
+{
+ /*
+ * Indicator LEDs, unlike torch LEDs, are turned on/off basing on
+ * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only.
+ * Do not decrement brightness read from the LED subsystem for
+ * indicator LED as it may equal 0. For torch LEDs this function
+ * is called only when V4L2_FLASH_LED_MODE_TORCH is set and the
+ * brightness read is guaranteed to be greater than 0. In the mode
+ * V4L2_FLASH_LED_MODE_NONE the cached torch intensity value is used.
+ */
+ if (ctrl->id != V4L2_CID_FLASH_INDICATOR_INTENSITY)
+ --brightness;
+
+ return (brightness * ctrl->step) + ctrl->minimum;
+}
+
+static int v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash,
+ struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+ struct led_classdev *led_cdev;
+ enum led_brightness brightness;
+
+ if (has_flash_op(v4l2_flash, intensity_to_led_brightness))
+ brightness = call_flash_op(v4l2_flash,
+ intensity_to_led_brightness,
+ ctrl->val);
+ else
+ brightness = __intensity_to_led_brightness(ctrl, ctrl->val);
+ /*
+ * In case a LED Flash class driver provides ops for custom
+ * brightness <-> intensity conversion, it also must have defined
+ * related v4l2 control step == 1. In such a case a backward conversion
+ * from led brightness to v4l2 intensity is required to find out the
+ * aligned intensity value.
+ */
+ if (has_flash_op(v4l2_flash, led_brightness_to_intensity))
+ ctrl->val = call_flash_op(v4l2_flash,
+ led_brightness_to_intensity,
+ brightness);
+
+ if (ctrl == ctrls[TORCH_INTENSITY]) {
+ if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
+ return 0;
+
+ if (WARN_ON_ONCE(!v4l2_flash->fled_cdev))
+ return -EINVAL;
+
+ led_cdev = &v4l2_flash->fled_cdev->led_cdev;
+ } else {
+ if (WARN_ON_ONCE(!v4l2_flash->iled_cdev))
+ return -EINVAL;
+
+ led_cdev = v4l2_flash->iled_cdev;
+ }
+
+ return led_set_brightness_sync(led_cdev, brightness);
+}
+
+static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash,
+ struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+ struct led_classdev *led_cdev;
+ int ret;
+
+ if (ctrl == ctrls[TORCH_INTENSITY]) {
+ /*
+ * Update torch brightness only if in TORCH_MODE. In other modes
+ * torch led is turned off, which would spuriously inform the
+ * user space that V4L2_CID_FLASH_TORCH_INTENSITY control value
+ * has changed to 0.
+ */
+ if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
+ return 0;
+
+ if (WARN_ON_ONCE(!v4l2_flash->fled_cdev))
+ return -EINVAL;
+
+ led_cdev = &v4l2_flash->fled_cdev->led_cdev;
+ } else {
+ if (WARN_ON_ONCE(!v4l2_flash->iled_cdev))
+ return -EINVAL;
+
+ led_cdev = v4l2_flash->iled_cdev;
+ }
+
+ ret = led_update_brightness(led_cdev);
+ if (ret < 0)
+ return ret;
+
+ if (has_flash_op(v4l2_flash, led_brightness_to_intensity))
+ ctrl->val = call_flash_op(v4l2_flash,
+ led_brightness_to_intensity,
+ led_cdev->brightness);
+ else
+ ctrl->val = __led_brightness_to_intensity(ctrl,
+ led_cdev->brightness);
+
+ return 0;
+}
+
+static int v4l2_flash_g_volatile_ctrl(struct v4l2_ctrl *c)
+{
+ struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c);
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ bool is_strobing;
+ int ret;
+
+ switch (c->id) {
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ return v4l2_flash_update_led_brightness(v4l2_flash, c);
+ }
+
+ if (!fled_cdev)
+ return -EINVAL;
+
+ switch (c->id) {
+ case V4L2_CID_FLASH_INTENSITY:
+ ret = led_update_flash_brightness(fled_cdev);
+ if (ret < 0)
+ return ret;
+ /*
+ * No conversion is needed as LED Flash class also uses
+ * microamperes for flash intensity units.
+ */
+ c->val = fled_cdev->brightness.val;
+ return 0;
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ ret = led_get_flash_strobe(fled_cdev, &is_strobing);
+ if (ret < 0)
+ return ret;
+ c->val = is_strobing;
+ return 0;
+ case V4L2_CID_FLASH_FAULT:
+ /* LED faults map directly to V4L2 flash faults */
+ return led_get_flash_fault(fled_cdev, &c->val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool __software_strobe_mode_inactive(struct v4l2_ctrl **ctrls)
+{
+ return ((ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) ||
+ (ctrls[STROBE_SOURCE] && (ctrls[STROBE_SOURCE]->val !=
+ V4L2_FLASH_STROBE_SOURCE_SOFTWARE)));
+}
+
+static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c)
+{
+ struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c);
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct led_classdev *led_cdev;
+ struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+ bool external_strobe;
+ int ret = 0;
+
+ switch (c->id) {
+ case V4L2_CID_FLASH_TORCH_INTENSITY:
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY:
+ return v4l2_flash_set_led_brightness(v4l2_flash, c);
+ }
+
+ if (!fled_cdev)
+ return -EINVAL;
+
+ led_cdev = &fled_cdev->led_cdev;
+
+ switch (c->id) {
+ case V4L2_CID_FLASH_LED_MODE:
+ switch (c->val) {
+ case V4L2_FLASH_LED_MODE_NONE:
+ led_set_brightness_sync(led_cdev, LED_OFF);
+ return led_set_flash_strobe(fled_cdev, false);
+ case V4L2_FLASH_LED_MODE_FLASH:
+ /* Turn the torch LED off */
+ led_set_brightness_sync(led_cdev, LED_OFF);
+ if (ctrls[STROBE_SOURCE]) {
+ external_strobe = (ctrls[STROBE_SOURCE]->val ==
+ V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
+
+ ret = call_flash_op(v4l2_flash,
+ external_strobe_set,
+ external_strobe);
+ }
+ return ret;
+ case V4L2_FLASH_LED_MODE_TORCH:
+ if (ctrls[STROBE_SOURCE]) {
+ ret = call_flash_op(v4l2_flash,
+ external_strobe_set,
+ false);
+ if (ret < 0)
+ return ret;
+ }
+ /* Stop flash strobing */
+ ret = led_set_flash_strobe(fled_cdev, false);
+ if (ret < 0)
+ return ret;
+
+ return v4l2_flash_set_led_brightness(v4l2_flash,
+ ctrls[TORCH_INTENSITY]);
+ }
+ break;
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ external_strobe = (c->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
+ /*
+ * For some hardware arrangements setting strobe source may
+ * affect torch mode. Therefore, if not in the flash mode,
+ * cache only this setting. It will be applied upon switching
+ * to flash mode.
+ */
+ if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH)
+ return 0;
+
+ return call_flash_op(v4l2_flash, external_strobe_set,
+ external_strobe);
+ case V4L2_CID_FLASH_STROBE:
+ if (__software_strobe_mode_inactive(ctrls))
+ return -EBUSY;
+ return led_set_flash_strobe(fled_cdev, true);
+ case V4L2_CID_FLASH_STROBE_STOP:
+ if (__software_strobe_mode_inactive(ctrls))
+ return -EBUSY;
+ return led_set_flash_strobe(fled_cdev, false);
+ case V4L2_CID_FLASH_TIMEOUT:
+ /*
+ * No conversion is needed as LED Flash class also uses
+ * microseconds for flash timeout units.
+ */
+ return led_set_flash_timeout(fled_cdev, c->val);
+ case V4L2_CID_FLASH_INTENSITY:
+ /*
+ * No conversion is needed as LED Flash class also uses
+ * microamperes for flash intensity units.
+ */
+ return led_set_flash_brightness(fled_cdev, c->val);
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops v4l2_flash_ctrl_ops = {
+ .g_volatile_ctrl = v4l2_flash_g_volatile_ctrl,
+ .s_ctrl = v4l2_flash_s_ctrl,
+};
+
+static void __lfs_to_v4l2_ctrl_config(struct led_flash_setting *s,
+ struct v4l2_ctrl_config *c)
+{
+ c->min = s->min;
+ c->max = s->max;
+ c->step = s->step;
+ c->def = s->val;
+}
+
+static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash,
+ struct v4l2_flash_config *flash_cfg,
+ struct v4l2_flash_ctrl_data *ctrl_init_data)
+{
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
+ struct v4l2_ctrl_config *ctrl_cfg;
+ u32 mask;
+
+ /* Init INDICATOR_INTENSITY ctrl data */
+ if (v4l2_flash->iled_cdev) {
+ ctrl_init_data[INDICATOR_INTENSITY].cid =
+ V4L2_CID_FLASH_INDICATOR_INTENSITY;
+ ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config;
+ __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity,
+ ctrl_cfg);
+ ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY;
+ ctrl_cfg->min = 0;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ }
+
+ if (!led_cdev || WARN_ON(!(led_cdev->flags & LED_DEV_CAP_FLASH)))
+ return;
+
+ /* Init FLASH_FAULT ctrl data */
+ if (flash_cfg->flash_faults) {
+ ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT;
+ ctrl_cfg = &ctrl_init_data[FLASH_FAULT].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_FAULT;
+ ctrl_cfg->max = flash_cfg->flash_faults;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY;
+ }
+
+ /* Init FLASH_LED_MODE ctrl data */
+ mask = 1 << V4L2_FLASH_LED_MODE_NONE |
+ 1 << V4L2_FLASH_LED_MODE_TORCH;
+ if (led_cdev->flags & LED_DEV_CAP_FLASH)
+ mask |= 1 << V4L2_FLASH_LED_MODE_FLASH;
+
+ ctrl_init_data[LED_MODE].cid = V4L2_CID_FLASH_LED_MODE;
+ ctrl_cfg = &ctrl_init_data[LED_MODE].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_LED_MODE;
+ ctrl_cfg->max = V4L2_FLASH_LED_MODE_TORCH;
+ ctrl_cfg->menu_skip_mask = ~mask;
+ ctrl_cfg->def = V4L2_FLASH_LED_MODE_NONE;
+ ctrl_cfg->flags = 0;
+
+ /* Init TORCH_INTENSITY ctrl data */
+ ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY;
+ ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config;
+ __lfs_to_v4l2_ctrl_config(&flash_cfg->intensity, ctrl_cfg);
+ ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+
+ /* Init FLASH_STROBE ctrl data */
+ ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE;
+ ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_STROBE;
+
+ /* Init STROBE_STOP ctrl data */
+ ctrl_init_data[STROBE_STOP].cid = V4L2_CID_FLASH_STROBE_STOP;
+ ctrl_cfg = &ctrl_init_data[STROBE_STOP].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STOP;
+
+ /* Init FLASH_STROBE_SOURCE ctrl data */
+ if (flash_cfg->has_external_strobe) {
+ mask = (1 << V4L2_FLASH_STROBE_SOURCE_SOFTWARE) |
+ (1 << V4L2_FLASH_STROBE_SOURCE_EXTERNAL);
+ ctrl_init_data[STROBE_SOURCE].cid =
+ V4L2_CID_FLASH_STROBE_SOURCE;
+ ctrl_cfg = &ctrl_init_data[STROBE_SOURCE].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_STROBE_SOURCE;
+ ctrl_cfg->max = V4L2_FLASH_STROBE_SOURCE_EXTERNAL;
+ ctrl_cfg->menu_skip_mask = ~mask;
+ ctrl_cfg->def = V4L2_FLASH_STROBE_SOURCE_SOFTWARE;
+ }
+
+ /* Init STROBE_STATUS ctrl data */
+ if (has_flash_op(fled_cdev, strobe_get)) {
+ ctrl_init_data[STROBE_STATUS].cid =
+ V4L2_CID_FLASH_STROBE_STATUS;
+ ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config;
+ ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STATUS;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY;
+ }
+
+ /* Init FLASH_TIMEOUT ctrl data */
+ if (has_flash_op(fled_cdev, timeout_set)) {
+ ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT;
+ ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config;
+ __lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg);
+ ctrl_cfg->id = V4L2_CID_FLASH_TIMEOUT;
+ }
+
+ /* Init FLASH_INTENSITY ctrl data */
+ if (has_flash_op(fled_cdev, flash_brightness_set)) {
+ ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY;
+ ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config;
+ __lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg);
+ ctrl_cfg->id = V4L2_CID_FLASH_INTENSITY;
+ ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+ }
+}
+
+static int v4l2_flash_init_controls(struct v4l2_flash *v4l2_flash,
+ struct v4l2_flash_config *flash_cfg)
+
+{
+ struct v4l2_flash_ctrl_data *ctrl_init_data;
+ struct v4l2_ctrl *ctrl;
+ struct v4l2_ctrl_config *ctrl_cfg;
+ int i, ret, num_ctrls = 0;
+
+ v4l2_flash->ctrls = devm_kcalloc(v4l2_flash->sd.dev,
+ STROBE_SOURCE + 1,
+ sizeof(*v4l2_flash->ctrls),
+ GFP_KERNEL);
+ if (!v4l2_flash->ctrls)
+ return -ENOMEM;
+
+ /* allocate memory dynamically so as not to exceed stack frame size */
+ ctrl_init_data = kcalloc(NUM_FLASH_CTRLS, sizeof(*ctrl_init_data),
+ GFP_KERNEL);
+ if (!ctrl_init_data)
+ return -ENOMEM;
+
+ __fill_ctrl_init_data(v4l2_flash, flash_cfg, ctrl_init_data);
+
+ for (i = 0; i < NUM_FLASH_CTRLS; ++i)
+ if (ctrl_init_data[i].cid)
+ ++num_ctrls;
+
+ v4l2_ctrl_handler_init(&v4l2_flash->hdl, num_ctrls);
+
+ for (i = 0; i < NUM_FLASH_CTRLS; ++i) {
+ ctrl_cfg = &ctrl_init_data[i].config;
+ if (!ctrl_init_data[i].cid)
+ continue;
+
+ if (ctrl_cfg->id == V4L2_CID_FLASH_LED_MODE ||
+ ctrl_cfg->id == V4L2_CID_FLASH_STROBE_SOURCE)
+ ctrl = v4l2_ctrl_new_std_menu(&v4l2_flash->hdl,
+ &v4l2_flash_ctrl_ops,
+ ctrl_cfg->id,
+ ctrl_cfg->max,
+ ctrl_cfg->menu_skip_mask,
+ ctrl_cfg->def);
+ else
+ ctrl = v4l2_ctrl_new_std(&v4l2_flash->hdl,
+ &v4l2_flash_ctrl_ops,
+ ctrl_cfg->id,
+ ctrl_cfg->min,
+ ctrl_cfg->max,
+ ctrl_cfg->step,
+ ctrl_cfg->def);
+
+ if (ctrl)
+ ctrl->flags |= ctrl_cfg->flags;
+
+ if (i <= STROBE_SOURCE)
+ v4l2_flash->ctrls[i] = ctrl;
+ }
+
+ kfree(ctrl_init_data);
+
+ if (v4l2_flash->hdl.error) {
+ ret = v4l2_flash->hdl.error;
+ goto error_free_handler;
+ }
+
+ v4l2_ctrl_handler_setup(&v4l2_flash->hdl);
+
+ v4l2_flash->sd.ctrl_handler = &v4l2_flash->hdl;
+
+ return 0;
+
+error_free_handler:
+ v4l2_ctrl_handler_free(&v4l2_flash->hdl);
+ return ret;
+}
+
+static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash)
+{
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct v4l2_ctrl **ctrls = v4l2_flash->ctrls;
+ int ret = 0;
+
+ if (ctrls[TORCH_INTENSITY]) {
+ ret = v4l2_flash_set_led_brightness(v4l2_flash,
+ ctrls[TORCH_INTENSITY]);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls[INDICATOR_INTENSITY]) {
+ ret = v4l2_flash_set_led_brightness(v4l2_flash,
+ ctrls[INDICATOR_INTENSITY]);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls[FLASH_TIMEOUT]) {
+ if (WARN_ON_ONCE(!fled_cdev))
+ return -EINVAL;
+
+ ret = led_set_flash_timeout(fled_cdev,
+ ctrls[FLASH_TIMEOUT]->val);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (ctrls[FLASH_INTENSITY]) {
+ if (WARN_ON_ONCE(!fled_cdev))
+ return -EINVAL;
+
+ ret = led_set_flash_brightness(fled_cdev,
+ ctrls[FLASH_INTENSITY]->val);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * For some hardware arrangements setting strobe source may affect
+ * torch mode. Synchronize strobe source setting only if not in torch
+ * mode. For torch mode case it will get synchronized upon switching
+ * to flash mode.
+ */
+ if (ctrls[STROBE_SOURCE] &&
+ ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH)
+ ret = call_flash_op(v4l2_flash, external_strobe_set,
+ ctrls[STROBE_SOURCE]->val);
+
+ return ret;
+}
+
+/*
+ * V4L2 subdev internal operations
+ */
+
+static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd);
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
+ struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev;
+ int ret = 0;
+
+ if (!v4l2_fh_is_singular(&fh->vfh))
+ return 0;
+
+ if (led_cdev) {
+ mutex_lock(&led_cdev->led_access);
+
+ led_sysfs_disable(led_cdev);
+ led_trigger_remove(led_cdev);
+
+ mutex_unlock(&led_cdev->led_access);
+ }
+
+ if (led_cdev_ind) {
+ mutex_lock(&led_cdev_ind->led_access);
+
+ led_sysfs_disable(led_cdev_ind);
+ led_trigger_remove(led_cdev_ind);
+
+ mutex_unlock(&led_cdev_ind->led_access);
+ }
+
+ ret = __sync_device_with_v4l2_controls(v4l2_flash);
+ if (ret < 0)
+ goto out_sync_device;
+
+ return 0;
+out_sync_device:
+ if (led_cdev) {
+ mutex_lock(&led_cdev->led_access);
+ led_sysfs_enable(led_cdev);
+ mutex_unlock(&led_cdev->led_access);
+ }
+
+ if (led_cdev_ind) {
+ mutex_lock(&led_cdev_ind->led_access);
+ led_sysfs_enable(led_cdev_ind);
+ mutex_unlock(&led_cdev_ind->led_access);
+ }
+
+ return ret;
+}
+
+static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd);
+ struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev;
+ struct led_classdev *led_cdev = fled_cdev ? &fled_cdev->led_cdev : NULL;
+ struct led_classdev *led_cdev_ind = v4l2_flash->iled_cdev;
+ int ret = 0;
+
+ if (!v4l2_fh_is_singular(&fh->vfh))
+ return 0;
+
+ if (led_cdev) {
+ mutex_lock(&led_cdev->led_access);
+
+ if (v4l2_flash->ctrls[STROBE_SOURCE])
+ ret = v4l2_ctrl_s_ctrl(
+ v4l2_flash->ctrls[STROBE_SOURCE],
+ V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
+ led_sysfs_enable(led_cdev);
+
+ mutex_unlock(&led_cdev->led_access);
+ }
+
+ if (led_cdev_ind) {
+ mutex_lock(&led_cdev_ind->led_access);
+ led_sysfs_enable(led_cdev_ind);
+ mutex_unlock(&led_cdev_ind->led_access);
+ }
+
+ return ret;
+}
+
+static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = {
+ .open = v4l2_flash_open,
+ .close = v4l2_flash_close,
+};
+
+static const struct v4l2_subdev_ops v4l2_flash_subdev_ops;
+
+static struct v4l2_flash *__v4l2_flash_init(
+ struct device *dev, struct fwnode_handle *fwn,
+ struct led_classdev_flash *fled_cdev, struct led_classdev *iled_cdev,
+ const struct v4l2_flash_ops *ops, struct v4l2_flash_config *config)
+{
+ struct v4l2_flash *v4l2_flash;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ if (!config)
+ return ERR_PTR(-EINVAL);
+
+ v4l2_flash = devm_kzalloc(dev, sizeof(*v4l2_flash), GFP_KERNEL);
+ if (!v4l2_flash)
+ return ERR_PTR(-ENOMEM);
+
+ sd = &v4l2_flash->sd;
+ v4l2_flash->fled_cdev = fled_cdev;
+ v4l2_flash->iled_cdev = iled_cdev;
+ v4l2_flash->ops = ops;
+ sd->dev = dev;
+ sd->fwnode = fwn ? fwn : dev_fwnode(dev);
+ v4l2_subdev_init(sd, &v4l2_flash_subdev_ops);
+ sd->internal_ops = &v4l2_flash_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strscpy(sd->name, config->dev_name, sizeof(sd->name));
+
+ ret = media_entity_pads_init(&sd->entity, 0, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ sd->entity.function = MEDIA_ENT_F_FLASH;
+
+ ret = v4l2_flash_init_controls(v4l2_flash, config);
+ if (ret < 0)
+ goto err_init_controls;
+
+ fwnode_handle_get(sd->fwnode);
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto err_async_register_sd;
+
+ return v4l2_flash;
+
+err_async_register_sd:
+ fwnode_handle_put(sd->fwnode);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+err_init_controls:
+ media_entity_cleanup(&sd->entity);
+
+ return ERR_PTR(ret);
+}
+
+struct v4l2_flash *v4l2_flash_init(
+ struct device *dev, struct fwnode_handle *fwn,
+ struct led_classdev_flash *fled_cdev,
+ const struct v4l2_flash_ops *ops,
+ struct v4l2_flash_config *config)
+{
+ return __v4l2_flash_init(dev, fwn, fled_cdev, NULL, ops, config);
+}
+EXPORT_SYMBOL_GPL(v4l2_flash_init);
+
+struct v4l2_flash *v4l2_flash_indicator_init(
+ struct device *dev, struct fwnode_handle *fwn,
+ struct led_classdev *iled_cdev,
+ struct v4l2_flash_config *config)
+{
+ return __v4l2_flash_init(dev, fwn, NULL, iled_cdev, NULL, config);
+}
+EXPORT_SYMBOL_GPL(v4l2_flash_indicator_init);
+
+void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
+{
+ struct v4l2_subdev *sd;
+
+ if (IS_ERR_OR_NULL(v4l2_flash))
+ return;
+
+ sd = &v4l2_flash->sd;
+
+ v4l2_async_unregister_subdev(sd);
+
+ fwnode_handle_put(sd->fwnode);
+
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+}
+EXPORT_SYMBOL_GPL(v4l2_flash_release);
+
+MODULE_AUTHOR("Jacek Anaszewski <j.anaszewski@samsung.com>");
+MODULE_DESCRIPTION("V4L2 Flash sub-device helpers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
new file mode 100644
index 0000000000..7f181fbbb1
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * V4L2 fwnode binding parsing library
+ *
+ * The origins of the V4L2 fwnode library are in V4L2 OF library that
+ * formerly was located in v4l2-of.c.
+ *
+ * Copyright (c) 2016 Intel Corporation.
+ * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ */
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#include "v4l2-subdev-priv.h"
+
+static const struct v4l2_fwnode_bus_conv {
+ enum v4l2_fwnode_bus_type fwnode_bus_type;
+ enum v4l2_mbus_type mbus_type;
+ const char *name;
+} buses[] = {
+ {
+ V4L2_FWNODE_BUS_TYPE_GUESS,
+ V4L2_MBUS_UNKNOWN,
+ "not specified",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CSI2_CPHY,
+ V4L2_MBUS_CSI2_CPHY,
+ "MIPI CSI-2 C-PHY",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CSI1,
+ V4L2_MBUS_CSI1,
+ "MIPI CSI-1",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CCP2,
+ V4L2_MBUS_CCP2,
+ "compact camera port 2",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY,
+ V4L2_MBUS_CSI2_DPHY,
+ "MIPI CSI-2 D-PHY",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_PARALLEL,
+ V4L2_MBUS_PARALLEL,
+ "parallel",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_BT656,
+ V4L2_MBUS_BT656,
+ "Bt.656",
+ }, {
+ V4L2_FWNODE_BUS_TYPE_DPI,
+ V4L2_MBUS_DPI,
+ "DPI",
+ }
+};
+
+static const struct v4l2_fwnode_bus_conv *
+get_v4l2_fwnode_bus_conv_by_fwnode_bus(enum v4l2_fwnode_bus_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(buses); i++)
+ if (buses[i].fwnode_bus_type == type)
+ return &buses[i];
+
+ return NULL;
+}
+
+static enum v4l2_mbus_type
+v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type)
+{
+ const struct v4l2_fwnode_bus_conv *conv =
+ get_v4l2_fwnode_bus_conv_by_fwnode_bus(type);
+
+ return conv ? conv->mbus_type : V4L2_MBUS_INVALID;
+}
+
+static const char *
+v4l2_fwnode_bus_type_to_string(enum v4l2_fwnode_bus_type type)
+{
+ const struct v4l2_fwnode_bus_conv *conv =
+ get_v4l2_fwnode_bus_conv_by_fwnode_bus(type);
+
+ return conv ? conv->name : "not found";
+}
+
+static const struct v4l2_fwnode_bus_conv *
+get_v4l2_fwnode_bus_conv_by_mbus(enum v4l2_mbus_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(buses); i++)
+ if (buses[i].mbus_type == type)
+ return &buses[i];
+
+ return NULL;
+}
+
+static const char *
+v4l2_fwnode_mbus_type_to_string(enum v4l2_mbus_type type)
+{
+ const struct v4l2_fwnode_bus_conv *conv =
+ get_v4l2_fwnode_bus_conv_by_mbus(type);
+
+ return conv ? conv->name : "not found";
+}
+
+static int v4l2_fwnode_endpoint_parse_csi2_bus(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep,
+ enum v4l2_mbus_type bus_type)
+{
+ struct v4l2_mbus_config_mipi_csi2 *bus = &vep->bus.mipi_csi2;
+ bool have_clk_lane = false, have_data_lanes = false,
+ have_lane_polarities = false;
+ unsigned int flags = 0, lanes_used = 0;
+ u32 array[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
+ u32 clock_lane = 0;
+ unsigned int num_data_lanes = 0;
+ bool use_default_lane_mapping = false;
+ unsigned int i;
+ u32 v;
+ int rval;
+
+ if (bus_type == V4L2_MBUS_CSI2_DPHY ||
+ bus_type == V4L2_MBUS_CSI2_CPHY) {
+ use_default_lane_mapping = true;
+
+ num_data_lanes = min_t(u32, bus->num_data_lanes,
+ V4L2_MBUS_CSI2_MAX_DATA_LANES);
+
+ clock_lane = bus->clock_lane;
+ if (clock_lane)
+ use_default_lane_mapping = false;
+
+ for (i = 0; i < num_data_lanes; i++) {
+ array[i] = bus->data_lanes[i];
+ if (array[i])
+ use_default_lane_mapping = false;
+ }
+
+ if (use_default_lane_mapping)
+ pr_debug("no lane mapping given, using defaults\n");
+ }
+
+ rval = fwnode_property_count_u32(fwnode, "data-lanes");
+ if (rval > 0) {
+ num_data_lanes =
+ min_t(int, V4L2_MBUS_CSI2_MAX_DATA_LANES, rval);
+
+ fwnode_property_read_u32_array(fwnode, "data-lanes", array,
+ num_data_lanes);
+
+ have_data_lanes = true;
+ if (use_default_lane_mapping) {
+ pr_debug("data-lanes property exists; disabling default mapping\n");
+ use_default_lane_mapping = false;
+ }
+ }
+
+ for (i = 0; i < num_data_lanes; i++) {
+ if (lanes_used & BIT(array[i])) {
+ if (have_data_lanes || !use_default_lane_mapping)
+ pr_warn("duplicated lane %u in data-lanes, using defaults\n",
+ array[i]);
+ use_default_lane_mapping = true;
+ }
+ lanes_used |= BIT(array[i]);
+
+ if (have_data_lanes)
+ pr_debug("lane %u position %u\n", i, array[i]);
+ }
+
+ rval = fwnode_property_count_u32(fwnode, "lane-polarities");
+ if (rval > 0) {
+ if (rval != 1 + num_data_lanes /* clock+data */) {
+ pr_warn("invalid number of lane-polarities entries (need %u, got %u)\n",
+ 1 + num_data_lanes, rval);
+ return -EINVAL;
+ }
+
+ have_lane_polarities = true;
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
+ clock_lane = v;
+ pr_debug("clock lane position %u\n", v);
+ have_clk_lane = true;
+ }
+
+ if (have_clk_lane && lanes_used & BIT(clock_lane) &&
+ !use_default_lane_mapping) {
+ pr_warn("duplicated lane %u in clock-lanes, using defaults\n",
+ v);
+ use_default_lane_mapping = true;
+ }
+
+ if (fwnode_property_present(fwnode, "clock-noncontinuous")) {
+ flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+ pr_debug("non-continuous clock\n");
+ }
+
+ if (bus_type == V4L2_MBUS_CSI2_DPHY ||
+ bus_type == V4L2_MBUS_CSI2_CPHY ||
+ lanes_used || have_clk_lane || flags) {
+ /* Only D-PHY has a clock lane. */
+ unsigned int dfl_data_lane_index =
+ bus_type == V4L2_MBUS_CSI2_DPHY;
+
+ bus->flags = flags;
+ if (bus_type == V4L2_MBUS_UNKNOWN)
+ vep->bus_type = V4L2_MBUS_CSI2_DPHY;
+ bus->num_data_lanes = num_data_lanes;
+
+ if (use_default_lane_mapping) {
+ bus->clock_lane = 0;
+ for (i = 0; i < num_data_lanes; i++)
+ bus->data_lanes[i] = dfl_data_lane_index + i;
+ } else {
+ bus->clock_lane = clock_lane;
+ for (i = 0; i < num_data_lanes; i++)
+ bus->data_lanes[i] = array[i];
+ }
+
+ if (have_lane_polarities) {
+ fwnode_property_read_u32_array(fwnode,
+ "lane-polarities", array,
+ 1 + num_data_lanes);
+
+ for (i = 0; i < 1 + num_data_lanes; i++) {
+ bus->lane_polarities[i] = array[i];
+ pr_debug("lane %u polarity %sinverted",
+ i, array[i] ? "" : "not ");
+ }
+ } else {
+ pr_debug("no lane polarities defined, assuming not inverted\n");
+ }
+ }
+
+ return 0;
+}
+
+#define PARALLEL_MBUS_FLAGS (V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_FIELD_EVEN_HIGH | \
+ V4L2_MBUS_FIELD_EVEN_LOW)
+
+static void
+v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep,
+ enum v4l2_mbus_type bus_type)
+{
+ struct v4l2_mbus_config_parallel *bus = &vep->bus.parallel;
+ unsigned int flags = 0;
+ u32 v;
+
+ if (bus_type == V4L2_MBUS_PARALLEL || bus_type == V4L2_MBUS_BT656)
+ flags = bus->flags;
+
+ if (!fwnode_property_read_u32(fwnode, "hsync-active", &v)) {
+ flags &= ~(V4L2_MBUS_HSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ pr_debug("hsync-active %s\n", v ? "high" : "low");
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "vsync-active", &v)) {
+ flags &= ~(V4L2_MBUS_VSYNC_ACTIVE_HIGH |
+ V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ pr_debug("vsync-active %s\n", v ? "high" : "low");
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "field-even-active", &v)) {
+ flags &= ~(V4L2_MBUS_FIELD_EVEN_HIGH |
+ V4L2_MBUS_FIELD_EVEN_LOW);
+ flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
+ V4L2_MBUS_FIELD_EVEN_LOW;
+ pr_debug("field-even-active %s\n", v ? "high" : "low");
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "pclk-sample", &v)) {
+ flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
+ V4L2_MBUS_PCLK_SAMPLE_FALLING |
+ V4L2_MBUS_PCLK_SAMPLE_DUALEDGE);
+ switch (v) {
+ case 0:
+ flags |= V4L2_MBUS_PCLK_SAMPLE_FALLING;
+ pr_debug("pclk-sample low\n");
+ break;
+ case 1:
+ flags |= V4L2_MBUS_PCLK_SAMPLE_RISING;
+ pr_debug("pclk-sample high\n");
+ break;
+ case 2:
+ flags |= V4L2_MBUS_PCLK_SAMPLE_DUALEDGE;
+ pr_debug("pclk-sample dual edge\n");
+ break;
+ default:
+ pr_warn("invalid argument for pclk-sample");
+ break;
+ }
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "data-active", &v)) {
+ flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW);
+ flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
+ V4L2_MBUS_DATA_ACTIVE_LOW;
+ pr_debug("data-active %s\n", v ? "high" : "low");
+ }
+
+ if (fwnode_property_present(fwnode, "slave-mode")) {
+ pr_debug("slave mode\n");
+ flags &= ~V4L2_MBUS_MASTER;
+ flags |= V4L2_MBUS_SLAVE;
+ } else {
+ flags &= ~V4L2_MBUS_SLAVE;
+ flags |= V4L2_MBUS_MASTER;
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "bus-width", &v)) {
+ bus->bus_width = v;
+ pr_debug("bus-width %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "data-shift", &v)) {
+ bus->data_shift = v;
+ pr_debug("data-shift %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "sync-on-green-active", &v)) {
+ flags &= ~(V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH |
+ V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW);
+ flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
+ V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
+ pr_debug("sync-on-green-active %s\n", v ? "high" : "low");
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "data-enable-active", &v)) {
+ flags &= ~(V4L2_MBUS_DATA_ENABLE_HIGH |
+ V4L2_MBUS_DATA_ENABLE_LOW);
+ flags |= v ? V4L2_MBUS_DATA_ENABLE_HIGH :
+ V4L2_MBUS_DATA_ENABLE_LOW;
+ pr_debug("data-enable-active %s\n", v ? "high" : "low");
+ }
+
+ switch (bus_type) {
+ default:
+ bus->flags = flags;
+ if (flags & PARALLEL_MBUS_FLAGS)
+ vep->bus_type = V4L2_MBUS_PARALLEL;
+ else
+ vep->bus_type = V4L2_MBUS_BT656;
+ break;
+ case V4L2_MBUS_PARALLEL:
+ vep->bus_type = V4L2_MBUS_PARALLEL;
+ bus->flags = flags;
+ break;
+ case V4L2_MBUS_BT656:
+ vep->bus_type = V4L2_MBUS_BT656;
+ bus->flags = flags & ~PARALLEL_MBUS_FLAGS;
+ break;
+ }
+}
+
+static void
+v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep,
+ enum v4l2_mbus_type bus_type)
+{
+ struct v4l2_mbus_config_mipi_csi1 *bus = &vep->bus.mipi_csi1;
+ u32 v;
+
+ if (!fwnode_property_read_u32(fwnode, "clock-inv", &v)) {
+ bus->clock_inv = v;
+ pr_debug("clock-inv %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "strobe", &v)) {
+ bus->strobe = v;
+ pr_debug("strobe %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "data-lanes", &v)) {
+ bus->data_lane = v;
+ pr_debug("data-lanes %u\n", v);
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
+ bus->clock_lane = v;
+ pr_debug("clock-lanes %u\n", v);
+ }
+
+ if (bus_type == V4L2_MBUS_CCP2)
+ vep->bus_type = V4L2_MBUS_CCP2;
+ else
+ vep->bus_type = V4L2_MBUS_CSI1;
+}
+
+static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ u32 bus_type = V4L2_FWNODE_BUS_TYPE_GUESS;
+ enum v4l2_mbus_type mbus_type;
+ int rval;
+
+ pr_debug("===== begin parsing endpoint %pfw\n", fwnode);
+
+ fwnode_property_read_u32(fwnode, "bus-type", &bus_type);
+ pr_debug("fwnode video bus type %s (%u), mbus type %s (%u)\n",
+ v4l2_fwnode_bus_type_to_string(bus_type), bus_type,
+ v4l2_fwnode_mbus_type_to_string(vep->bus_type),
+ vep->bus_type);
+ mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type);
+ if (mbus_type == V4L2_MBUS_INVALID) {
+ pr_debug("unsupported bus type %u\n", bus_type);
+ return -EINVAL;
+ }
+
+ if (vep->bus_type != V4L2_MBUS_UNKNOWN) {
+ if (mbus_type != V4L2_MBUS_UNKNOWN &&
+ vep->bus_type != mbus_type) {
+ pr_debug("expecting bus type %s\n",
+ v4l2_fwnode_mbus_type_to_string(vep->bus_type));
+ return -ENXIO;
+ }
+ } else {
+ vep->bus_type = mbus_type;
+ }
+
+ switch (vep->bus_type) {
+ case V4L2_MBUS_UNKNOWN:
+ rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep,
+ V4L2_MBUS_UNKNOWN);
+ if (rval)
+ return rval;
+
+ if (vep->bus_type == V4L2_MBUS_UNKNOWN)
+ v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep,
+ V4L2_MBUS_UNKNOWN);
+
+ pr_debug("assuming media bus type %s (%u)\n",
+ v4l2_fwnode_mbus_type_to_string(vep->bus_type),
+ vep->bus_type);
+
+ break;
+ case V4L2_MBUS_CCP2:
+ case V4L2_MBUS_CSI1:
+ v4l2_fwnode_endpoint_parse_csi1_bus(fwnode, vep, vep->bus_type);
+
+ break;
+ case V4L2_MBUS_CSI2_DPHY:
+ case V4L2_MBUS_CSI2_CPHY:
+ rval = v4l2_fwnode_endpoint_parse_csi2_bus(fwnode, vep,
+ vep->bus_type);
+ if (rval)
+ return rval;
+
+ break;
+ case V4L2_MBUS_PARALLEL:
+ case V4L2_MBUS_BT656:
+ v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep,
+ vep->bus_type);
+
+ break;
+ default:
+ pr_warn("unsupported bus type %u\n", mbus_type);
+ return -EINVAL;
+ }
+
+ fwnode_graph_parse_endpoint(fwnode, &vep->base);
+
+ return 0;
+}
+
+int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ int ret;
+
+ ret = __v4l2_fwnode_endpoint_parse(fwnode, vep);
+
+ pr_debug("===== end parsing endpoint %pfw\n", fwnode);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse);
+
+void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
+{
+ if (IS_ERR_OR_NULL(vep))
+ return;
+
+ kfree(vep->link_frequencies);
+ vep->link_frequencies = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
+
+int v4l2_fwnode_endpoint_alloc_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ int rval;
+
+ rval = __v4l2_fwnode_endpoint_parse(fwnode, vep);
+ if (rval < 0)
+ return rval;
+
+ rval = fwnode_property_count_u64(fwnode, "link-frequencies");
+ if (rval > 0) {
+ unsigned int i;
+
+ vep->link_frequencies =
+ kmalloc_array(rval, sizeof(*vep->link_frequencies),
+ GFP_KERNEL);
+ if (!vep->link_frequencies)
+ return -ENOMEM;
+
+ vep->nr_of_link_frequencies = rval;
+
+ rval = fwnode_property_read_u64_array(fwnode,
+ "link-frequencies",
+ vep->link_frequencies,
+ vep->nr_of_link_frequencies);
+ if (rval < 0) {
+ v4l2_fwnode_endpoint_free(vep);
+ return rval;
+ }
+
+ for (i = 0; i < vep->nr_of_link_frequencies; i++)
+ pr_debug("link-frequencies %u value %llu\n", i,
+ vep->link_frequencies[i]);
+ }
+
+ pr_debug("===== end parsing endpoint %pfw\n", fwnode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
+
+int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_link *link)
+{
+ struct fwnode_endpoint fwep;
+
+ memset(link, 0, sizeof(*link));
+
+ fwnode_graph_parse_endpoint(fwnode, &fwep);
+ link->local_id = fwep.id;
+ link->local_port = fwep.port;
+ link->local_node = fwnode_graph_get_port_parent(fwnode);
+ if (!link->local_node)
+ return -ENOLINK;
+
+ fwnode = fwnode_graph_get_remote_endpoint(fwnode);
+ if (!fwnode)
+ goto err_put_local_node;
+
+ fwnode_graph_parse_endpoint(fwnode, &fwep);
+ link->remote_id = fwep.id;
+ link->remote_port = fwep.port;
+ link->remote_node = fwnode_graph_get_port_parent(fwnode);
+ if (!link->remote_node)
+ goto err_put_remote_endpoint;
+
+ return 0;
+
+err_put_remote_endpoint:
+ fwnode_handle_put(fwnode);
+
+err_put_local_node:
+ fwnode_handle_put(link->local_node);
+
+ return -ENOLINK;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
+
+void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
+{
+ fwnode_handle_put(link->local_node);
+ fwnode_handle_put(link->remote_node);
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link);
+
+static const struct v4l2_fwnode_connector_conv {
+ enum v4l2_connector_type type;
+ const char *compatible;
+} connectors[] = {
+ {
+ .type = V4L2_CONN_COMPOSITE,
+ .compatible = "composite-video-connector",
+ }, {
+ .type = V4L2_CONN_SVIDEO,
+ .compatible = "svideo-connector",
+ },
+};
+
+static enum v4l2_connector_type
+v4l2_fwnode_string_to_connector_type(const char *con_str)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(connectors); i++)
+ if (!strcmp(con_str, connectors[i].compatible))
+ return connectors[i].type;
+
+ return V4L2_CONN_UNKNOWN;
+}
+
+static void
+v4l2_fwnode_connector_parse_analog(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *vc)
+{
+ u32 stds;
+ int ret;
+
+ ret = fwnode_property_read_u32(fwnode, "sdtv-standards", &stds);
+
+ /* The property is optional. */
+ vc->connector.analog.sdtv_stds = ret ? V4L2_STD_ALL : stds;
+}
+
+void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector)
+{
+ struct v4l2_connector_link *link, *tmp;
+
+ if (IS_ERR_OR_NULL(connector) || connector->type == V4L2_CONN_UNKNOWN)
+ return;
+
+ list_for_each_entry_safe(link, tmp, &connector->links, head) {
+ v4l2_fwnode_put_link(&link->fwnode_link);
+ list_del(&link->head);
+ kfree(link);
+ }
+
+ kfree(connector->label);
+ connector->label = NULL;
+ connector->type = V4L2_CONN_UNKNOWN;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_free);
+
+static enum v4l2_connector_type
+v4l2_fwnode_get_connector_type(struct fwnode_handle *fwnode)
+{
+ const char *type_name;
+ int err;
+
+ if (!fwnode)
+ return V4L2_CONN_UNKNOWN;
+
+ /* The connector-type is stored within the compatible string. */
+ err = fwnode_property_read_string(fwnode, "compatible", &type_name);
+ if (err)
+ return V4L2_CONN_UNKNOWN;
+
+ return v4l2_fwnode_string_to_connector_type(type_name);
+}
+
+int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector)
+{
+ struct fwnode_handle *connector_node;
+ enum v4l2_connector_type connector_type;
+ const char *label;
+ int err;
+
+ if (!fwnode)
+ return -EINVAL;
+
+ memset(connector, 0, sizeof(*connector));
+
+ INIT_LIST_HEAD(&connector->links);
+
+ connector_node = fwnode_graph_get_port_parent(fwnode);
+ connector_type = v4l2_fwnode_get_connector_type(connector_node);
+ if (connector_type == V4L2_CONN_UNKNOWN) {
+ fwnode_handle_put(connector_node);
+ connector_node = fwnode_graph_get_remote_port_parent(fwnode);
+ connector_type = v4l2_fwnode_get_connector_type(connector_node);
+ }
+
+ if (connector_type == V4L2_CONN_UNKNOWN) {
+ pr_err("Unknown connector type\n");
+ err = -ENOTCONN;
+ goto out;
+ }
+
+ connector->type = connector_type;
+ connector->name = fwnode_get_name(connector_node);
+ err = fwnode_property_read_string(connector_node, "label", &label);
+ connector->label = err ? NULL : kstrdup_const(label, GFP_KERNEL);
+
+ /* Parse the connector specific properties. */
+ switch (connector->type) {
+ case V4L2_CONN_COMPOSITE:
+ case V4L2_CONN_SVIDEO:
+ v4l2_fwnode_connector_parse_analog(connector_node, connector);
+ break;
+ /* Avoid compiler warnings */
+ case V4L2_CONN_UNKNOWN:
+ break;
+ }
+
+out:
+ fwnode_handle_put(connector_node);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_parse);
+
+int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector)
+{
+ struct fwnode_handle *connector_ep;
+ struct v4l2_connector_link *link;
+ int err;
+
+ if (!fwnode || !connector || connector->type == V4L2_CONN_UNKNOWN)
+ return -EINVAL;
+
+ connector_ep = fwnode_graph_get_remote_endpoint(fwnode);
+ if (!connector_ep)
+ return -ENOTCONN;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ err = v4l2_fwnode_parse_link(connector_ep, &link->fwnode_link);
+ if (err)
+ goto err;
+
+ fwnode_handle_put(connector_ep);
+
+ list_add(&link->head, &connector->links);
+ connector->nr_of_links++;
+
+ return 0;
+
+err:
+ kfree(link);
+ fwnode_handle_put(connector_ep);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_connector_add_link);
+
+int v4l2_fwnode_device_parse(struct device *dev,
+ struct v4l2_fwnode_device_properties *props)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ u32 val;
+ int ret;
+
+ memset(props, 0, sizeof(*props));
+
+ props->orientation = V4L2_FWNODE_PROPERTY_UNSET;
+ ret = fwnode_property_read_u32(fwnode, "orientation", &val);
+ if (!ret) {
+ switch (val) {
+ case V4L2_FWNODE_ORIENTATION_FRONT:
+ case V4L2_FWNODE_ORIENTATION_BACK:
+ case V4L2_FWNODE_ORIENTATION_EXTERNAL:
+ break;
+ default:
+ dev_warn(dev, "Unsupported device orientation: %u\n", val);
+ return -EINVAL;
+ }
+
+ props->orientation = val;
+ dev_dbg(dev, "device orientation: %u\n", val);
+ }
+
+ props->rotation = V4L2_FWNODE_PROPERTY_UNSET;
+ ret = fwnode_property_read_u32(fwnode, "rotation", &val);
+ if (!ret) {
+ if (val >= 360) {
+ dev_warn(dev, "Unsupported device rotation: %u\n", val);
+ return -EINVAL;
+ }
+
+ props->rotation = val;
+ dev_dbg(dev, "device rotation: %u\n", val);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse);
+
+/*
+ * v4l2_fwnode_reference_parse - parse references for async sub-devices
+ * @dev: the device node the properties of which are parsed for references
+ * @notifier: the async notifier where the async subdevs will be added
+ * @prop: the name of the property
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries were found
+ * -ENOMEM if memory allocation failed
+ * -EINVAL if property parsing failed
+ */
+static int v4l2_fwnode_reference_parse(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ const char *prop)
+{
+ struct fwnode_reference_args args;
+ unsigned int index;
+ int ret;
+
+ for (index = 0;
+ !(ret = fwnode_property_get_reference_args(dev_fwnode(dev), prop,
+ NULL, 0, index, &args));
+ index++) {
+ struct v4l2_async_connection *asd;
+
+ asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode,
+ struct v4l2_async_connection);
+ fwnode_handle_put(args.fwnode);
+ if (IS_ERR(asd)) {
+ /* not an error if asd already exists */
+ if (PTR_ERR(asd) == -EEXIST)
+ continue;
+
+ return PTR_ERR(asd);
+ }
+ }
+
+ /* -ENOENT here means successful parsing */
+ if (ret != -ENOENT)
+ return ret;
+
+ /* Return -ENOENT if no references were found */
+ return index ? 0 : -ENOENT;
+}
+
+/*
+ * v4l2_fwnode_reference_get_int_prop - parse a reference with integer
+ * arguments
+ * @fwnode: fwnode to read @prop from
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @index: the index of the reference to get
+ * @props: the array of integer property names
+ * @nprops: the number of integer property names in @nprops
+ *
+ * First find an fwnode referred to by the reference at @index in @prop.
+ *
+ * Then under that fwnode, @nprops times, for each property in @props,
+ * iteratively follow child nodes starting from fwnode such that they have the
+ * property in @props array at the index of the child node distance from the
+ * root node and the value of that property matching with the integer argument
+ * of the reference, at the same index.
+ *
+ * The child fwnode reached at the end of the iteration is then returned to the
+ * caller.
+ *
+ * The core reason for this is that you cannot refer to just any node in ACPI.
+ * So to refer to an endpoint (easy in DT) you need to refer to a device, then
+ * provide a list of (property name, property value) tuples where each tuple
+ * uniquely identifies a child node. The first tuple identifies a child directly
+ * underneath the device fwnode, the next tuple identifies a child node
+ * underneath the fwnode identified by the previous tuple, etc. until you
+ * reached the fwnode you need.
+ *
+ * THIS EXAMPLE EXISTS MERELY TO DOCUMENT THIS FUNCTION. DO NOT USE IT AS A
+ * REFERENCE IN HOW ACPI TABLES SHOULD BE WRITTEN!! See documentation under
+ * Documentation/firmware-guide/acpi/dsd/ instead and especially graph.txt,
+ * data-node-references.txt and leds.txt .
+ *
+ * Scope (\_SB.PCI0.I2C2)
+ * {
+ * Device (CAM0)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () {
+ * "compatible",
+ * Package () { "nokia,smia" }
+ * },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port0", "PRT0" },
+ * }
+ * })
+ * Name (PRT0, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 0 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP00" },
+ * }
+ * })
+ * Name (EP00, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package() {
+ * \_SB.PCI0.ISP, 4, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * Scope (\_SB.PCI0)
+ * {
+ * Device (ISP)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port4", "PRT4" },
+ * }
+ * })
+ *
+ * Name (PRT4, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 4 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP40" },
+ * }
+ * })
+ *
+ * Name (EP40, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package () {
+ * \_SB.PCI0.I2C2.CAM0,
+ * 0, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * From the EP40 node under ISP device, you could parse the graph remote
+ * endpoint using v4l2_fwnode_reference_get_int_prop with these arguments:
+ *
+ * @fwnode: fwnode referring to EP40 under ISP.
+ * @prop: "remote-endpoint"
+ * @index: 0
+ * @props: "port", "endpoint"
+ * @nprops: 2
+ *
+ * And you'd get back fwnode referring to EP00 under CAM0.
+ *
+ * The same works the other way around: if you use EP00 under CAM0 as the
+ * fwnode, you'll get fwnode referring to EP40 under ISP.
+ *
+ * The same example in DT syntax would look like this:
+ *
+ * cam: cam0 {
+ * compatible = "nokia,smia";
+ *
+ * port {
+ * port = <0>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&isp 4 0>;
+ * };
+ * };
+ * };
+ *
+ * isp: isp {
+ * ports {
+ * port@4 {
+ * port = <4>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&cam 0 0>;
+ * };
+ * };
+ * };
+ * };
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwise failed
+ * -ENOMEM if memory allocation failed
+ */
+static struct fwnode_handle *
+v4l2_fwnode_reference_get_int_prop(struct fwnode_handle *fwnode,
+ const char *prop,
+ unsigned int index,
+ const char * const *props,
+ unsigned int nprops)
+{
+ struct fwnode_reference_args fwnode_args;
+ u64 *args = fwnode_args.args;
+ struct fwnode_handle *child;
+ int ret;
+
+ /*
+ * Obtain remote fwnode as well as the integer arguments.
+ *
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return -ENOENT in that case.
+ */
+ ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops,
+ index, &fwnode_args);
+ if (ret)
+ return ERR_PTR(ret == -ENODATA ? -ENOENT : ret);
+
+ /*
+ * Find a node in the tree under the referred fwnode corresponding to
+ * the integer arguments.
+ */
+ fwnode = fwnode_args.fwnode;
+ while (nprops--) {
+ u32 val;
+
+ /* Loop over all child nodes under fwnode. */
+ fwnode_for_each_child_node(fwnode, child) {
+ if (fwnode_property_read_u32(child, *props, &val))
+ continue;
+
+ /* Found property, see if its value matches. */
+ if (val == *args)
+ break;
+ }
+
+ fwnode_handle_put(fwnode);
+
+ /* No property found; return an error here. */
+ if (!child) {
+ fwnode = ERR_PTR(-ENOENT);
+ break;
+ }
+
+ props++;
+ args++;
+ fwnode = child;
+ }
+
+ return fwnode;
+}
+
+struct v4l2_fwnode_int_props {
+ const char *name;
+ const char * const *props;
+ unsigned int nprops;
+};
+
+/*
+ * v4l2_fwnode_reference_parse_int_props - parse references for async
+ * sub-devices
+ * @dev: struct device pointer
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @props: the array of integer property names
+ * @nprops: the number of integer properties
+ *
+ * Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in
+ * property @prop with integer arguments with child nodes matching in properties
+ * @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier
+ * accordingly.
+ *
+ * While it is technically possible to use this function on DT, it is only
+ * meaningful on ACPI. On Device tree you can refer to any node in the tree but
+ * on ACPI the references are limited to devices.
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwisefailed
+ * -ENOMEM if memory allocation failed
+ */
+static int
+v4l2_fwnode_reference_parse_int_props(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ const struct v4l2_fwnode_int_props *p)
+{
+ struct fwnode_handle *fwnode;
+ unsigned int index;
+ int ret;
+ const char *prop = p->name;
+ const char * const *props = p->props;
+ unsigned int nprops = p->nprops;
+
+ index = 0;
+ do {
+ fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev),
+ prop, index,
+ props, nprops);
+ if (IS_ERR(fwnode)) {
+ /*
+ * Note that right now both -ENODATA and -ENOENT may
+ * signal out-of-bounds access. Return the error in
+ * cases other than that.
+ */
+ if (PTR_ERR(fwnode) != -ENOENT &&
+ PTR_ERR(fwnode) != -ENODATA)
+ return PTR_ERR(fwnode);
+ break;
+ }
+ fwnode_handle_put(fwnode);
+ index++;
+ } while (1);
+
+ for (index = 0;
+ !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(dev_fwnode(dev),
+ prop, index,
+ props,
+ nprops)));
+ index++) {
+ struct v4l2_async_connection *asd;
+
+ asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
+ struct v4l2_async_connection);
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ /* not an error if asd already exists */
+ if (ret == -EEXIST)
+ continue;
+
+ return PTR_ERR(asd);
+ }
+ }
+
+ return !fwnode || PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
+}
+
+/**
+ * v4l2_async_nf_parse_fwnode_sensor - parse common references on
+ * sensors for async sub-devices
+ * @dev: the device node the properties of which are parsed for references
+ * @notifier: the async notifier where the async subdevs will be added
+ *
+ * Parse common sensor properties for remote devices related to the
+ * sensor and set up async sub-devices for them.
+ *
+ * Any notifier populated using this function must be released with a call to
+ * v4l2_async_nf_release() after it has been unregistered and the async
+ * sub-devices are no longer in use, even in the case the function returned an
+ * error.
+ *
+ * Return: 0 on success
+ * -ENOMEM if memory allocation failed
+ * -EINVAL if property parsing failed
+ */
+static int
+v4l2_async_nf_parse_fwnode_sensor(struct device *dev,
+ struct v4l2_async_notifier *notifier)
+{
+ static const char * const led_props[] = { "led" };
+ static const struct v4l2_fwnode_int_props props[] = {
+ { "flash-leds", led_props, ARRAY_SIZE(led_props) },
+ { "lens-focus", NULL, 0 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ if (props[i].props && is_acpi_node(dev_fwnode(dev)))
+ ret = v4l2_fwnode_reference_parse_int_props(dev,
+ notifier,
+ &props[i]);
+ else
+ ret = v4l2_fwnode_reference_parse(dev, notifier,
+ props[i].name);
+ if (ret && ret != -ENOENT) {
+ dev_warn(dev, "parsing property \"%s\" failed (%d)\n",
+ props[i].name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *notifier;
+ int ret;
+
+ if (WARN_ON(!sd->dev))
+ return -ENODEV;
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier)
+ return -ENOMEM;
+
+ v4l2_async_subdev_nf_init(notifier, sd);
+
+ ret = v4l2_subdev_get_privacy_led(sd);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_nf_register(notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto out_unregister;
+
+ sd->subdev_notifier = notifier;
+
+ return 0;
+
+out_unregister:
+ v4l2_async_nf_unregister(notifier);
+
+out_cleanup:
+ v4l2_subdev_put_privacy_led(sd);
+ v4l2_async_nf_cleanup(notifier);
+ kfree(notifier);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/media/v4l2-core/v4l2-h264.c b/drivers/media/v4l2-core/v4l2-h264.c
new file mode 100644
index 0000000000..c00197d095
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-h264.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 H264 helpers.
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ *
+ * Author: Boris Brezillon <boris.brezillon@collabora.com>
+ */
+
+#include <linux/module.h>
+#include <linux/sort.h>
+
+#include <media/v4l2-h264.h>
+
+/*
+ * Size of the tempory buffer allocated when printing reference lists. The
+ * output will be truncated if the size is too small.
+ */
+static const int tmp_str_size = 1024;
+
+/**
+ * v4l2_h264_init_reflist_builder() - Initialize a P/B0/B1 reference list
+ * builder
+ *
+ * @b: the builder context to initialize
+ * @dec_params: decode parameters control
+ * @sps: SPS control
+ * @dpb: DPB to use when creating the reference list
+ */
+void
+v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b,
+ const struct v4l2_ctrl_h264_decode_params *dec_params,
+ const struct v4l2_ctrl_h264_sps *sps,
+ const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES])
+{
+ int cur_frame_num, max_frame_num;
+ unsigned int i;
+
+ max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ cur_frame_num = dec_params->frame_num;
+
+ memset(b, 0, sizeof(*b));
+ if (!(dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)) {
+ b->cur_pic_order_count = min(dec_params->bottom_field_order_cnt,
+ dec_params->top_field_order_cnt);
+ b->cur_pic_fields = V4L2_H264_FRAME_REF;
+ } else if (dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD) {
+ b->cur_pic_order_count = dec_params->bottom_field_order_cnt;
+ b->cur_pic_fields = V4L2_H264_BOTTOM_FIELD_REF;
+ } else {
+ b->cur_pic_order_count = dec_params->top_field_order_cnt;
+ b->cur_pic_fields = V4L2_H264_TOP_FIELD_REF;
+ }
+
+ for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) {
+ if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ b->refs[i].longterm = true;
+
+ /*
+ * Handle frame_num wraparound as described in section
+ * '8.2.4.1 Decoding process for picture numbers' of the spec.
+ * For long term references, frame_num is set to
+ * long_term_frame_idx which requires no wrapping.
+ */
+ if (!b->refs[i].longterm && dpb[i].frame_num > cur_frame_num)
+ b->refs[i].frame_num = (int)dpb[i].frame_num -
+ max_frame_num;
+ else
+ b->refs[i].frame_num = dpb[i].frame_num;
+
+ b->refs[i].top_field_order_cnt = dpb[i].top_field_order_cnt;
+ b->refs[i].bottom_field_order_cnt = dpb[i].bottom_field_order_cnt;
+
+ if (b->cur_pic_fields == V4L2_H264_FRAME_REF) {
+ u8 fields = V4L2_H264_FRAME_REF;
+
+ b->unordered_reflist[b->num_valid].index = i;
+ b->unordered_reflist[b->num_valid].fields = fields;
+ b->num_valid++;
+ continue;
+ }
+
+ if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF) {
+ u8 fields = V4L2_H264_TOP_FIELD_REF;
+
+ b->unordered_reflist[b->num_valid].index = i;
+ b->unordered_reflist[b->num_valid].fields = fields;
+ b->num_valid++;
+ }
+
+ if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF) {
+ u8 fields = V4L2_H264_BOTTOM_FIELD_REF;
+
+ b->unordered_reflist[b->num_valid].index = i;
+ b->unordered_reflist[b->num_valid].fields = fields;
+ b->num_valid++;
+ }
+ }
+
+ for (i = b->num_valid; i < ARRAY_SIZE(b->unordered_reflist); i++)
+ b->unordered_reflist[i].index = i;
+}
+EXPORT_SYMBOL_GPL(v4l2_h264_init_reflist_builder);
+
+static s32 v4l2_h264_get_poc(const struct v4l2_h264_reflist_builder *b,
+ const struct v4l2_h264_reference *ref)
+{
+ switch (ref->fields) {
+ case V4L2_H264_FRAME_REF:
+ return min(b->refs[ref->index].top_field_order_cnt,
+ b->refs[ref->index].bottom_field_order_cnt);
+ case V4L2_H264_TOP_FIELD_REF:
+ return b->refs[ref->index].top_field_order_cnt;
+ case V4L2_H264_BOTTOM_FIELD_REF:
+ return b->refs[ref->index].bottom_field_order_cnt;
+ }
+
+ /* not reached */
+ return 0;
+}
+
+static int v4l2_h264_p_ref_list_cmp(const void *ptra, const void *ptrb,
+ const void *data)
+{
+ const struct v4l2_h264_reflist_builder *builder = data;
+ u8 idxa, idxb;
+
+ idxa = ((struct v4l2_h264_reference *)ptra)->index;
+ idxb = ((struct v4l2_h264_reference *)ptrb)->index;
+
+ if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
+ idxb >= V4L2_H264_NUM_DPB_ENTRIES))
+ return 1;
+
+ if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
+ /* Short term pics first. */
+ if (!builder->refs[idxa].longterm)
+ return -1;
+ else
+ return 1;
+ }
+
+ /*
+ * For frames, short term pics are in descending pic num order and long
+ * term ones in ascending order. For fields, the same direction is used
+ * but with frame_num (wrapped). For frames, the value of pic_num and
+ * frame_num are the same (see formula (8-28) and (8-29)). For this
+ * reason we can use frame_num only and share this function between
+ * frames and fields reflist.
+ */
+ if (!builder->refs[idxa].longterm)
+ return builder->refs[idxb].frame_num <
+ builder->refs[idxa].frame_num ?
+ -1 : 1;
+
+ return builder->refs[idxa].frame_num < builder->refs[idxb].frame_num ?
+ -1 : 1;
+}
+
+static int v4l2_h264_b0_ref_list_cmp(const void *ptra, const void *ptrb,
+ const void *data)
+{
+ const struct v4l2_h264_reflist_builder *builder = data;
+ s32 poca, pocb;
+ u8 idxa, idxb;
+
+ idxa = ((struct v4l2_h264_reference *)ptra)->index;
+ idxb = ((struct v4l2_h264_reference *)ptrb)->index;
+
+ if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
+ idxb >= V4L2_H264_NUM_DPB_ENTRIES))
+ return 1;
+
+ if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
+ /* Short term pics first. */
+ if (!builder->refs[idxa].longterm)
+ return -1;
+ else
+ return 1;
+ }
+
+ /* Long term pics in ascending frame num order. */
+ if (builder->refs[idxa].longterm)
+ return builder->refs[idxa].frame_num <
+ builder->refs[idxb].frame_num ?
+ -1 : 1;
+
+ poca = v4l2_h264_get_poc(builder, ptra);
+ pocb = v4l2_h264_get_poc(builder, ptrb);
+
+ /*
+ * Short term pics with POC < cur POC first in POC descending order
+ * followed by short term pics with POC > cur POC in POC ascending
+ * order.
+ */
+ if ((poca < builder->cur_pic_order_count) !=
+ (pocb < builder->cur_pic_order_count))
+ return poca < pocb ? -1 : 1;
+ else if (poca < builder->cur_pic_order_count)
+ return pocb < poca ? -1 : 1;
+
+ return poca < pocb ? -1 : 1;
+}
+
+static int v4l2_h264_b1_ref_list_cmp(const void *ptra, const void *ptrb,
+ const void *data)
+{
+ const struct v4l2_h264_reflist_builder *builder = data;
+ s32 poca, pocb;
+ u8 idxa, idxb;
+
+ idxa = ((struct v4l2_h264_reference *)ptra)->index;
+ idxb = ((struct v4l2_h264_reference *)ptrb)->index;
+
+ if (WARN_ON(idxa >= V4L2_H264_NUM_DPB_ENTRIES ||
+ idxb >= V4L2_H264_NUM_DPB_ENTRIES))
+ return 1;
+
+ if (builder->refs[idxa].longterm != builder->refs[idxb].longterm) {
+ /* Short term pics first. */
+ if (!builder->refs[idxa].longterm)
+ return -1;
+ else
+ return 1;
+ }
+
+ /* Long term pics in ascending frame num order. */
+ if (builder->refs[idxa].longterm)
+ return builder->refs[idxa].frame_num <
+ builder->refs[idxb].frame_num ?
+ -1 : 1;
+
+ poca = v4l2_h264_get_poc(builder, ptra);
+ pocb = v4l2_h264_get_poc(builder, ptrb);
+
+ /*
+ * Short term pics with POC > cur POC first in POC ascending order
+ * followed by short term pics with POC < cur POC in POC descending
+ * order.
+ */
+ if ((poca < builder->cur_pic_order_count) !=
+ (pocb < builder->cur_pic_order_count))
+ return pocb < poca ? -1 : 1;
+ else if (poca < builder->cur_pic_order_count)
+ return pocb < poca ? -1 : 1;
+
+ return poca < pocb ? -1 : 1;
+}
+
+/*
+ * The references need to be reordered so that references are alternating
+ * between top and bottom field references starting with the current picture
+ * parity. This has to be done for short term and long term references
+ * separately.
+ */
+static void reorder_field_reflist(const struct v4l2_h264_reflist_builder *b,
+ struct v4l2_h264_reference *reflist)
+{
+ struct v4l2_h264_reference tmplist[V4L2_H264_REF_LIST_LEN];
+ u8 lt, i = 0, j = 0, k = 0;
+
+ memcpy(tmplist, reflist, sizeof(tmplist[0]) * b->num_valid);
+
+ for (lt = 0; lt <= 1; lt++) {
+ do {
+ for (; i < b->num_valid && b->refs[tmplist[i].index].longterm == lt; i++) {
+ if (tmplist[i].fields == b->cur_pic_fields) {
+ reflist[k++] = tmplist[i++];
+ break;
+ }
+ }
+
+ for (; j < b->num_valid && b->refs[tmplist[j].index].longterm == lt; j++) {
+ if (tmplist[j].fields != b->cur_pic_fields) {
+ reflist[k++] = tmplist[j++];
+ break;
+ }
+ }
+ } while ((i < b->num_valid && b->refs[tmplist[i].index].longterm == lt) ||
+ (j < b->num_valid && b->refs[tmplist[j].index].longterm == lt));
+ }
+}
+
+static char ref_type_to_char(u8 ref_type)
+{
+ switch (ref_type) {
+ case V4L2_H264_FRAME_REF:
+ return 'f';
+ case V4L2_H264_TOP_FIELD_REF:
+ return 't';
+ case V4L2_H264_BOTTOM_FIELD_REF:
+ return 'b';
+ }
+
+ return '?';
+}
+
+static const char *format_ref_list_p(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist,
+ char **out_str)
+{
+ int n = 0, i;
+
+ *out_str = kmalloc(tmp_str_size, GFP_KERNEL);
+ if (!(*out_str))
+ return NULL;
+
+ n += snprintf(*out_str + n, tmp_str_size - n, "|");
+
+ for (i = 0; i < builder->num_valid; i++) {
+ /* this is pic_num for frame and frame_num (wrapped) for field,
+ * but for frame pic_num is equal to frame_num (wrapped).
+ */
+ int frame_num = builder->refs[reflist[i].index].frame_num;
+ bool longterm = builder->refs[reflist[i].index].longterm;
+
+ n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|",
+ frame_num, longterm ? 'l' : 's',
+ ref_type_to_char(reflist[i].fields));
+ }
+
+ return *out_str;
+}
+
+static void print_ref_list_p(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist)
+{
+ char *buf = NULL;
+
+ pr_debug("ref_pic_list_p (cur_poc %u%c) %s\n",
+ builder->cur_pic_order_count,
+ ref_type_to_char(builder->cur_pic_fields),
+ format_ref_list_p(builder, reflist, &buf));
+
+ kfree(buf);
+}
+
+static const char *format_ref_list_b(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist,
+ char **out_str)
+{
+ int n = 0, i;
+
+ *out_str = kmalloc(tmp_str_size, GFP_KERNEL);
+ if (!(*out_str))
+ return NULL;
+
+ n += snprintf(*out_str + n, tmp_str_size - n, "|");
+
+ for (i = 0; i < builder->num_valid; i++) {
+ int frame_num = builder->refs[reflist[i].index].frame_num;
+ u32 poc = v4l2_h264_get_poc(builder, reflist + i);
+ bool longterm = builder->refs[reflist[i].index].longterm;
+
+ n += scnprintf(*out_str + n, tmp_str_size - n, "%i%c%c|",
+ longterm ? frame_num : poc,
+ longterm ? 'l' : 's',
+ ref_type_to_char(reflist[i].fields));
+ }
+
+ return *out_str;
+}
+
+static void print_ref_list_b(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist, u8 list_num)
+{
+ char *buf = NULL;
+
+ pr_debug("ref_pic_list_b%u (cur_poc %u%c) %s",
+ list_num, builder->cur_pic_order_count,
+ ref_type_to_char(builder->cur_pic_fields),
+ format_ref_list_b(builder, reflist, &buf));
+
+ kfree(buf);
+}
+
+/**
+ * v4l2_h264_build_p_ref_list() - Build the P reference list
+ *
+ * @builder: reference list builder context
+ * @reflist: 32 sized array used to store the P reference list. Each entry
+ * is a v4l2_h264_reference structure
+ *
+ * This functions builds the P reference lists. This procedure is describe in
+ * section '8.2.4 Decoding process for reference picture lists construction'
+ * of the H264 spec. This function can be used by H264 decoder drivers that
+ * need to pass a P reference list to the hardware.
+ */
+void
+v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *reflist)
+{
+ memcpy(reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist[0]) * builder->num_valid);
+ sort_r(reflist, builder->num_valid, sizeof(*reflist),
+ v4l2_h264_p_ref_list_cmp, NULL, builder);
+
+ if (builder->cur_pic_fields != V4L2_H264_FRAME_REF)
+ reorder_field_reflist(builder, reflist);
+
+ print_ref_list_p(builder, reflist);
+}
+EXPORT_SYMBOL_GPL(v4l2_h264_build_p_ref_list);
+
+/**
+ * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists
+ *
+ * @builder: reference list builder context
+ * @b0_reflist: 32 sized array used to store the B0 reference list. Each entry
+ * is a v4l2_h264_reference structure
+ * @b1_reflist: 32 sized array used to store the B1 reference list. Each entry
+ * is a v4l2_h264_reference structure
+ *
+ * This functions builds the B0/B1 reference lists. This procedure is described
+ * in section '8.2.4 Decoding process for reference picture lists construction'
+ * of the H264 spec. This function can be used by H264 decoder drivers that
+ * need to pass B0/B1 reference lists to the hardware.
+ */
+void
+v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder,
+ struct v4l2_h264_reference *b0_reflist,
+ struct v4l2_h264_reference *b1_reflist)
+{
+ memcpy(b0_reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist[0]) * builder->num_valid);
+ sort_r(b0_reflist, builder->num_valid, sizeof(*b0_reflist),
+ v4l2_h264_b0_ref_list_cmp, NULL, builder);
+
+ memcpy(b1_reflist, builder->unordered_reflist,
+ sizeof(builder->unordered_reflist[0]) * builder->num_valid);
+ sort_r(b1_reflist, builder->num_valid, sizeof(*b1_reflist),
+ v4l2_h264_b1_ref_list_cmp, NULL, builder);
+
+ if (builder->cur_pic_fields != V4L2_H264_FRAME_REF) {
+ reorder_field_reflist(builder, b0_reflist);
+ reorder_field_reflist(builder, b1_reflist);
+ }
+
+ if (builder->num_valid > 1 &&
+ !memcmp(b1_reflist, b0_reflist, builder->num_valid))
+ swap(b1_reflist[0], b1_reflist[1]);
+
+ print_ref_list_b(builder, b0_reflist, 0);
+ print_ref_list_b(builder, b1_reflist, 1);
+}
+EXPORT_SYMBOL_GPL(v4l2_h264_build_b_ref_lists);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("V4L2 H264 Helpers");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@collabora.com>");
diff --git a/drivers/media/v4l2-core/v4l2-i2c.c b/drivers/media/v4l2-core/v4l2-i2c.c
new file mode 100644
index 0000000000..b4acca7564
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-i2c.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * v4l2-i2c - I2C helpers for Video4Linux2
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+void v4l2_i2c_subdev_unregister(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ /*
+ * We need to unregister the i2c client
+ * explicitly. We cannot rely on
+ * i2c_del_adapter to always unregister
+ * clients for us, since if the i2c bus is a
+ * platform bus, then it is never deleted.
+ *
+ * Device tree or ACPI based devices must not
+ * be unregistered as they have not been
+ * registered by us, and would not be
+ * re-created by just probing the V4L2 driver.
+ */
+ if (client && !client->dev.of_node && !client->dev.fwnode)
+ i2c_unregister_device(client);
+}
+
+void v4l2_i2c_subdev_set_name(struct v4l2_subdev *sd,
+ struct i2c_client *client,
+ const char *devname, const char *postfix)
+{
+ if (!devname)
+ devname = client->dev.driver->name;
+ if (!postfix)
+ postfix = "";
+
+ snprintf(sd->name, sizeof(sd->name), "%s%s %d-%04x", devname, postfix,
+ i2c_adapter_id(client->adapter), client->addr);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_set_name);
+
+void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
+ /* the owner is the same as the i2c_client's driver owner */
+ sd->owner = client->dev.driver->owner;
+ sd->dev = &client->dev;
+ /* i2c_client and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, client);
+ i2c_set_clientdata(client, sd);
+ v4l2_i2c_subdev_set_name(sd, client, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
+
+/* Load an i2c sub-device. */
+struct v4l2_subdev
+*v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ struct i2c_board_info *info,
+ const unsigned short *probe_addrs)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct i2c_client *client;
+
+ if (!v4l2_dev)
+ return NULL;
+
+ request_module(I2C_MODULE_PREFIX "%s", info->type);
+
+ /* Create the i2c client */
+ if (info->addr == 0 && probe_addrs)
+ client = i2c_new_scanned_device(adapter, info, probe_addrs,
+ NULL);
+ else
+ client = i2c_new_client_device(adapter, info);
+
+ /*
+ * Note: by loading the module first we are certain that c->driver
+ * will be set if the driver was found. If the module was not loaded
+ * first, then the i2c core tries to delay-load the module for us,
+ * and then c->driver is still NULL until the module is finally
+ * loaded. This delay-load mechanism doesn't work if other drivers
+ * want to use the i2c device, so explicitly loading the module
+ * is the best alternative.
+ */
+ if (!i2c_client_has_driver(client))
+ goto error;
+
+ /* Lock the module so we can safely get the v4l2_subdev pointer */
+ if (!try_module_get(client->dev.driver->owner))
+ goto error;
+ sd = i2c_get_clientdata(client);
+
+ /*
+ * Register with the v4l2_device which increases the module's
+ * use count as well.
+ */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(client->dev.driver->owner);
+
+error:
+ /*
+ * If we have a client but no subdev, then something went wrong and
+ * we must unregister the client.
+ */
+ if (!IS_ERR(client) && !sd)
+ i2c_unregister_device(client);
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
+
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *client_type,
+ u8 addr,
+ const unsigned short *probe_addrs)
+{
+ struct i2c_board_info info;
+
+ /*
+ * Setup the i2c board info with the device type and
+ * the device address.
+ */
+ memset(&info, 0, sizeof(info));
+ strscpy(info.type, client_type, sizeof(info.type));
+ info.addr = addr;
+
+ return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info,
+ probe_addrs);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
+
+/* Return i2c client address of v4l2_subdev. */
+unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return client ? client->addr : I2C_CLIENT_END;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
+
+/*
+ * Return a list of I2C tuner addresses to probe. Use only if the tuner
+ * addresses are unknown.
+ */
+const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
+{
+ static const unsigned short radio_addrs[] = {
+#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
+ 0x10,
+#endif
+ 0x60,
+ I2C_CLIENT_END
+ };
+ static const unsigned short demod_addrs[] = {
+ 0x42, 0x43, 0x4a, 0x4b,
+ I2C_CLIENT_END
+ };
+ static const unsigned short tv_addrs[] = {
+ 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
+ 0x60, 0x61, 0x62, 0x63, 0x64,
+ I2C_CLIENT_END
+ };
+
+ switch (type) {
+ case ADDRS_RADIO:
+ return radio_addrs;
+ case ADDRS_DEMOD:
+ return demod_addrs;
+ case ADDRS_TV:
+ return tv_addrs;
+ case ADDRS_TV_WITH_DEMOD:
+ return tv_addrs + 4;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
new file mode 100644
index 0000000000..f4d9d62790
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -0,0 +1,3445 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Video capture interface for Linux version 2
+ *
+ * A generic framework to process V4L2 ioctl commands.
+ *
+ * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
+ * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
+ */
+
+#include <linux/compat.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include <linux/v4l2-subdev.h>
+#include <linux/videodev2.h>
+
+#include <media/media-device.h> /* for media_set_bus_info() */
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-mem2mem.h>
+
+#include <trace/events/v4l2.h>
+
+#define is_valid_ioctl(vfd, cmd) test_bit(_IOC_NR(cmd), (vfd)->valid_ioctls)
+
+struct std_descr {
+ v4l2_std_id std;
+ const char *descr;
+};
+
+static const struct std_descr standards[] = {
+ { V4L2_STD_NTSC, "NTSC" },
+ { V4L2_STD_NTSC_M, "NTSC-M" },
+ { V4L2_STD_NTSC_M_JP, "NTSC-M-JP" },
+ { V4L2_STD_NTSC_M_KR, "NTSC-M-KR" },
+ { V4L2_STD_NTSC_443, "NTSC-443" },
+ { V4L2_STD_PAL, "PAL" },
+ { V4L2_STD_PAL_BG, "PAL-BG" },
+ { V4L2_STD_PAL_B, "PAL-B" },
+ { V4L2_STD_PAL_B1, "PAL-B1" },
+ { V4L2_STD_PAL_G, "PAL-G" },
+ { V4L2_STD_PAL_H, "PAL-H" },
+ { V4L2_STD_PAL_I, "PAL-I" },
+ { V4L2_STD_PAL_DK, "PAL-DK" },
+ { V4L2_STD_PAL_D, "PAL-D" },
+ { V4L2_STD_PAL_D1, "PAL-D1" },
+ { V4L2_STD_PAL_K, "PAL-K" },
+ { V4L2_STD_PAL_M, "PAL-M" },
+ { V4L2_STD_PAL_N, "PAL-N" },
+ { V4L2_STD_PAL_Nc, "PAL-Nc" },
+ { V4L2_STD_PAL_60, "PAL-60" },
+ { V4L2_STD_SECAM, "SECAM" },
+ { V4L2_STD_SECAM_B, "SECAM-B" },
+ { V4L2_STD_SECAM_G, "SECAM-G" },
+ { V4L2_STD_SECAM_H, "SECAM-H" },
+ { V4L2_STD_SECAM_DK, "SECAM-DK" },
+ { V4L2_STD_SECAM_D, "SECAM-D" },
+ { V4L2_STD_SECAM_K, "SECAM-K" },
+ { V4L2_STD_SECAM_K1, "SECAM-K1" },
+ { V4L2_STD_SECAM_L, "SECAM-L" },
+ { V4L2_STD_SECAM_LC, "SECAM-Lc" },
+ { 0, "Unknown" }
+};
+
+/* video4linux standard ID conversion to standard name
+ */
+const char *v4l2_norm_to_name(v4l2_std_id id)
+{
+ u32 myid = id;
+ int i;
+
+ /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
+ 64 bit comparisons. So, on that architecture, with some gcc
+ variants, compilation fails. Currently, the max value is 30bit wide.
+ */
+ BUG_ON(myid != id);
+
+ for (i = 0; standards[i].std; i++)
+ if (myid == standards[i].std)
+ break;
+ return standards[i].descr;
+}
+EXPORT_SYMBOL(v4l2_norm_to_name);
+
+/* Returns frame period for the given standard */
+void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod)
+{
+ if (id & V4L2_STD_525_60) {
+ frameperiod->numerator = 1001;
+ frameperiod->denominator = 30000;
+ } else {
+ frameperiod->numerator = 1;
+ frameperiod->denominator = 25;
+ }
+}
+EXPORT_SYMBOL(v4l2_video_std_frame_period);
+
+/* Fill in the fields of a v4l2_standard structure according to the
+ 'id' and 'transmission' parameters. Returns negative on error. */
+int v4l2_video_std_construct(struct v4l2_standard *vs,
+ int id, const char *name)
+{
+ vs->id = id;
+ v4l2_video_std_frame_period(id, &vs->frameperiod);
+ vs->framelines = (id & V4L2_STD_525_60) ? 525 : 625;
+ strscpy(vs->name, name, sizeof(vs->name));
+ return 0;
+}
+EXPORT_SYMBOL(v4l2_video_std_construct);
+
+/* Fill in the fields of a v4l2_standard structure according to the
+ * 'id' and 'vs->index' parameters. Returns negative on error. */
+int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id)
+{
+ v4l2_std_id curr_id = 0;
+ unsigned int index = vs->index, i, j = 0;
+ const char *descr = "";
+
+ /* Return -ENODATA if the id for the current input
+ or output is 0, meaning that it doesn't support this API. */
+ if (id == 0)
+ return -ENODATA;
+
+ /* Return norm array in a canonical way */
+ for (i = 0; i <= index && id; i++) {
+ /* last std value in the standards array is 0, so this
+ while always ends there since (id & 0) == 0. */
+ while ((id & standards[j].std) != standards[j].std)
+ j++;
+ curr_id = standards[j].std;
+ descr = standards[j].descr;
+ j++;
+ if (curr_id == 0)
+ break;
+ if (curr_id != V4L2_STD_PAL &&
+ curr_id != V4L2_STD_SECAM &&
+ curr_id != V4L2_STD_NTSC)
+ id &= ~curr_id;
+ }
+ if (i <= index)
+ return -EINVAL;
+
+ v4l2_video_std_construct(vs, curr_id, descr);
+ return 0;
+}
+
+/* ----------------------------------------------------------------- */
+/* some arrays for pretty-printing debug messages of enum types */
+
+const char *v4l2_field_names[] = {
+ [V4L2_FIELD_ANY] = "any",
+ [V4L2_FIELD_NONE] = "none",
+ [V4L2_FIELD_TOP] = "top",
+ [V4L2_FIELD_BOTTOM] = "bottom",
+ [V4L2_FIELD_INTERLACED] = "interlaced",
+ [V4L2_FIELD_SEQ_TB] = "seq-tb",
+ [V4L2_FIELD_SEQ_BT] = "seq-bt",
+ [V4L2_FIELD_ALTERNATE] = "alternate",
+ [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb",
+ [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt",
+};
+EXPORT_SYMBOL(v4l2_field_names);
+
+const char *v4l2_type_names[] = {
+ [0] = "0",
+ [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "vid-cap",
+ [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "vid-overlay",
+ [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "vid-out",
+ [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap",
+ [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
+ [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap",
+ [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
+ [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "vid-out-overlay",
+ [V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE] = "vid-cap-mplane",
+ [V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE] = "vid-out-mplane",
+ [V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap",
+ [V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out",
+ [V4L2_BUF_TYPE_META_CAPTURE] = "meta-cap",
+ [V4L2_BUF_TYPE_META_OUTPUT] = "meta-out",
+};
+EXPORT_SYMBOL(v4l2_type_names);
+
+static const char *v4l2_memory_names[] = {
+ [V4L2_MEMORY_MMAP] = "mmap",
+ [V4L2_MEMORY_USERPTR] = "userptr",
+ [V4L2_MEMORY_OVERLAY] = "overlay",
+ [V4L2_MEMORY_DMABUF] = "dmabuf",
+};
+
+#define prt_names(a, arr) (((unsigned)(a)) < ARRAY_SIZE(arr) ? arr[a] : "unknown")
+
+/* ------------------------------------------------------------------ */
+/* debug help functions */
+
+static void v4l_print_querycap(const void *arg, bool write_only)
+{
+ const struct v4l2_capability *p = arg;
+
+ pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n",
+ (int)sizeof(p->driver), p->driver,
+ (int)sizeof(p->card), p->card,
+ (int)sizeof(p->bus_info), p->bus_info,
+ p->version, p->capabilities, p->device_caps);
+}
+
+static void v4l_print_enuminput(const void *arg, bool write_only)
+{
+ const struct v4l2_input *p = arg;
+
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
+ p->tuner, (unsigned long long)p->std, p->status,
+ p->capabilities);
+}
+
+static void v4l_print_enumoutput(const void *arg, bool write_only)
+{
+ const struct v4l2_output *p = arg;
+
+ pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
+ p->modulator, (unsigned long long)p->std, p->capabilities);
+}
+
+static void v4l_print_audio(const void *arg, bool write_only)
+{
+ const struct v4l2_audio *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u, mode=0x%x\n", p->index, p->mode);
+ else
+ pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name,
+ p->capability, p->mode);
+}
+
+static void v4l_print_audioout(const void *arg, bool write_only)
+{
+ const struct v4l2_audioout *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u\n", p->index);
+ else
+ pr_cont("index=%u, name=%.*s, capability=0x%x, mode=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name,
+ p->capability, p->mode);
+}
+
+static void v4l_print_fmtdesc(const void *arg, bool write_only)
+{
+ const struct v4l2_fmtdesc *p = arg;
+
+ pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%p4cc, mbus_code=0x%04x, description='%.*s'\n",
+ p->index, prt_names(p->type, v4l2_type_names),
+ p->flags, &p->pixelformat, p->mbus_code,
+ (int)sizeof(p->description), p->description);
+}
+
+static void v4l_print_format(const void *arg, bool write_only)
+{
+ const struct v4l2_format *p = arg;
+ const struct v4l2_pix_format *pix;
+ const struct v4l2_pix_format_mplane *mp;
+ const struct v4l2_vbi_format *vbi;
+ const struct v4l2_sliced_vbi_format *sliced;
+ const struct v4l2_window *win;
+ const struct v4l2_meta_format *meta;
+ u32 pixelformat;
+ u32 planes;
+ unsigned i;
+
+ pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &p->fmt.pix;
+ pr_cont(", width=%u, height=%u, pixelformat=%p4cc, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
+ pix->width, pix->height, &pix->pixelformat,
+ prt_names(pix->field, v4l2_field_names),
+ pix->bytesperline, pix->sizeimage,
+ pix->colorspace, pix->flags, pix->ycbcr_enc,
+ pix->quantization, pix->xfer_func);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ mp = &p->fmt.pix_mp;
+ pixelformat = mp->pixelformat;
+ pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
+ mp->width, mp->height, &pixelformat,
+ prt_names(mp->field, v4l2_field_names),
+ mp->colorspace, mp->num_planes, mp->flags,
+ mp->ycbcr_enc, mp->quantization, mp->xfer_func);
+ planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
+ for (i = 0; i < planes; i++)
+ printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
+ mp->plane_fmt[i].bytesperline,
+ mp->plane_fmt[i].sizeimage);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ win = &p->fmt.win;
+ pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n",
+ win->w.width, win->w.height, win->w.left, win->w.top,
+ prt_names(win->field, v4l2_field_names),
+ win->chromakey, win->global_alpha);
+ break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ vbi = &p->fmt.vbi;
+ pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%p4cc, start=%u,%u, count=%u,%u\n",
+ vbi->sampling_rate, vbi->offset,
+ vbi->samples_per_line, &vbi->sample_format,
+ vbi->start[0], vbi->start[1],
+ vbi->count[0], vbi->count[1]);
+ break;
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ sliced = &p->fmt.sliced;
+ pr_cont(", service_set=0x%08x, io_size=%d\n",
+ sliced->service_set, sliced->io_size);
+ for (i = 0; i < 24; i++)
+ printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
+ sliced->service_lines[0][i],
+ sliced->service_lines[1][i]);
+ break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ pixelformat = p->fmt.sdr.pixelformat;
+ pr_cont(", pixelformat=%p4cc\n", &pixelformat);
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ meta = &p->fmt.meta;
+ pixelformat = meta->dataformat;
+ pr_cont(", dataformat=%p4cc, buffersize=%u\n",
+ &pixelformat, meta->buffersize);
+ break;
+ }
+}
+
+static void v4l_print_framebuffer(const void *arg, bool write_only)
+{
+ const struct v4l2_framebuffer *p = arg;
+
+ pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%p4cc, bytesperline=%u, sizeimage=%u, colorspace=%d\n",
+ p->capability, p->flags, p->base, p->fmt.width, p->fmt.height,
+ &p->fmt.pixelformat, p->fmt.bytesperline, p->fmt.sizeimage,
+ p->fmt.colorspace);
+}
+
+static void v4l_print_buftype(const void *arg, bool write_only)
+{
+ pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names));
+}
+
+static void v4l_print_modulator(const void *arg, bool write_only)
+{
+ const struct v4l2_modulator *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
+ else
+ pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
+ p->index, (int)sizeof(p->name), p->name, p->capability,
+ p->rangelow, p->rangehigh, p->txsubchans);
+}
+
+static void v4l_print_tuner(const void *arg, bool write_only)
+{
+ const struct v4l2_tuner *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
+ else
+ pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n",
+ p->index, (int)sizeof(p->name), p->name, p->type,
+ p->capability, p->rangelow,
+ p->rangehigh, p->signal, p->afc,
+ p->rxsubchans, p->audmode);
+}
+
+static void v4l_print_frequency(const void *arg, bool write_only)
+{
+ const struct v4l2_frequency *p = arg;
+
+ pr_cont("tuner=%u, type=%u, frequency=%u\n",
+ p->tuner, p->type, p->frequency);
+}
+
+static void v4l_print_standard(const void *arg, bool write_only)
+{
+ const struct v4l2_standard *p = arg;
+
+ pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n",
+ p->index,
+ (unsigned long long)p->id, (int)sizeof(p->name), p->name,
+ p->frameperiod.numerator,
+ p->frameperiod.denominator,
+ p->framelines);
+}
+
+static void v4l_print_std(const void *arg, bool write_only)
+{
+ pr_cont("std=0x%08Lx\n", *(const long long unsigned *)arg);
+}
+
+static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
+{
+ const struct v4l2_hw_freq_seek *p = arg;
+
+ pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n",
+ p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
+ p->rangelow, p->rangehigh);
+}
+
+static void v4l_print_requestbuffers(const void *arg, bool write_only)
+{
+ const struct v4l2_requestbuffers *p = arg;
+
+ pr_cont("count=%d, type=%s, memory=%s\n",
+ p->count,
+ prt_names(p->type, v4l2_type_names),
+ prt_names(p->memory, v4l2_memory_names));
+}
+
+static void v4l_print_buffer(const void *arg, bool write_only)
+{
+ const struct v4l2_buffer *p = arg;
+ const struct v4l2_timecode *tc = &p->timecode;
+ const struct v4l2_plane *plane;
+ int i;
+
+ pr_cont("%02d:%02d:%02d.%06ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
+ (int)p->timestamp.tv_sec / 3600,
+ ((int)p->timestamp.tv_sec / 60) % 60,
+ ((int)p->timestamp.tv_sec % 60),
+ (long)p->timestamp.tv_usec,
+ p->index,
+ prt_names(p->type, v4l2_type_names), p->request_fd,
+ p->flags, prt_names(p->field, v4l2_field_names),
+ p->sequence, prt_names(p->memory, v4l2_memory_names));
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) {
+ pr_cont("\n");
+ for (i = 0; i < p->length; ++i) {
+ plane = &p->m.planes[i];
+ printk(KERN_DEBUG
+ "plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n",
+ i, plane->bytesused, plane->data_offset,
+ plane->m.userptr, plane->length);
+ }
+ } else {
+ pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n",
+ p->bytesused, p->m.userptr, p->length);
+ }
+
+ printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n",
+ tc->hours, tc->minutes, tc->seconds,
+ tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
+}
+
+static void v4l_print_exportbuffer(const void *arg, bool write_only)
+{
+ const struct v4l2_exportbuffer *p = arg;
+
+ pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n",
+ p->fd, prt_names(p->type, v4l2_type_names),
+ p->index, p->plane, p->flags);
+}
+
+static void v4l_print_create_buffers(const void *arg, bool write_only)
+{
+ const struct v4l2_create_buffers *p = arg;
+
+ pr_cont("index=%d, count=%d, memory=%s, capabilities=0x%08x, ",
+ p->index, p->count, prt_names(p->memory, v4l2_memory_names),
+ p->capabilities);
+ v4l_print_format(&p->format, write_only);
+}
+
+static void v4l_print_streamparm(const void *arg, bool write_only)
+{
+ const struct v4l2_streamparm *p = arg;
+
+ pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
+
+ if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ const struct v4l2_captureparm *c = &p->parm.capture;
+
+ pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n",
+ c->capability, c->capturemode,
+ c->timeperframe.numerator, c->timeperframe.denominator,
+ c->extendedmode, c->readbuffers);
+ } else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ const struct v4l2_outputparm *c = &p->parm.output;
+
+ pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n",
+ c->capability, c->outputmode,
+ c->timeperframe.numerator, c->timeperframe.denominator,
+ c->extendedmode, c->writebuffers);
+ } else {
+ pr_cont("\n");
+ }
+}
+
+static void v4l_print_queryctrl(const void *arg, bool write_only)
+{
+ const struct v4l2_queryctrl *p = arg;
+
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n",
+ p->id, p->type, (int)sizeof(p->name), p->name,
+ p->minimum, p->maximum,
+ p->step, p->default_value, p->flags);
+}
+
+static void v4l_print_query_ext_ctrl(const void *arg, bool write_only)
+{
+ const struct v4l2_query_ext_ctrl *p = arg;
+
+ pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n",
+ p->id, p->type, (int)sizeof(p->name), p->name,
+ p->minimum, p->maximum,
+ p->step, p->default_value, p->flags,
+ p->elem_size, p->elems, p->nr_of_dims,
+ p->dims[0], p->dims[1], p->dims[2], p->dims[3]);
+}
+
+static void v4l_print_querymenu(const void *arg, bool write_only)
+{
+ const struct v4l2_querymenu *p = arg;
+
+ pr_cont("id=0x%x, index=%d\n", p->id, p->index);
+}
+
+static void v4l_print_control(const void *arg, bool write_only)
+{
+ const struct v4l2_control *p = arg;
+ const char *name = v4l2_ctrl_get_name(p->id);
+
+ if (name)
+ pr_cont("name=%s, ", name);
+ pr_cont("id=0x%x, value=%d\n", p->id, p->value);
+}
+
+static void v4l_print_ext_controls(const void *arg, bool write_only)
+{
+ const struct v4l2_ext_controls *p = arg;
+ int i;
+
+ pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d",
+ p->which, p->count, p->error_idx, p->request_fd);
+ for (i = 0; i < p->count; i++) {
+ unsigned int id = p->controls[i].id;
+ const char *name = v4l2_ctrl_get_name(id);
+
+ if (name)
+ pr_cont(", name=%s", name);
+ if (!p->controls[i].size)
+ pr_cont(", id/val=0x%x/0x%x", id, p->controls[i].value);
+ else
+ pr_cont(", id/size=0x%x/%u", id, p->controls[i].size);
+ }
+ pr_cont("\n");
+}
+
+static void v4l_print_cropcap(const void *arg, bool write_only)
+{
+ const struct v4l2_cropcap *p = arg;
+
+ pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n",
+ prt_names(p->type, v4l2_type_names),
+ p->bounds.width, p->bounds.height,
+ p->bounds.left, p->bounds.top,
+ p->defrect.width, p->defrect.height,
+ p->defrect.left, p->defrect.top,
+ p->pixelaspect.numerator, p->pixelaspect.denominator);
+}
+
+static void v4l_print_crop(const void *arg, bool write_only)
+{
+ const struct v4l2_crop *p = arg;
+
+ pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n",
+ prt_names(p->type, v4l2_type_names),
+ p->c.width, p->c.height,
+ p->c.left, p->c.top);
+}
+
+static void v4l_print_selection(const void *arg, bool write_only)
+{
+ const struct v4l2_selection *p = arg;
+
+ pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n",
+ prt_names(p->type, v4l2_type_names),
+ p->target, p->flags,
+ p->r.width, p->r.height, p->r.left, p->r.top);
+}
+
+static void v4l_print_jpegcompression(const void *arg, bool write_only)
+{
+ const struct v4l2_jpegcompression *p = arg;
+
+ pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n",
+ p->quality, p->APPn, p->APP_len,
+ p->COM_len, p->jpeg_markers);
+}
+
+static void v4l_print_enc_idx(const void *arg, bool write_only)
+{
+ const struct v4l2_enc_idx *p = arg;
+
+ pr_cont("entries=%d, entries_cap=%d\n",
+ p->entries, p->entries_cap);
+}
+
+static void v4l_print_encoder_cmd(const void *arg, bool write_only)
+{
+ const struct v4l2_encoder_cmd *p = arg;
+
+ pr_cont("cmd=%d, flags=0x%x\n",
+ p->cmd, p->flags);
+}
+
+static void v4l_print_decoder_cmd(const void *arg, bool write_only)
+{
+ const struct v4l2_decoder_cmd *p = arg;
+
+ pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags);
+
+ if (p->cmd == V4L2_DEC_CMD_START)
+ pr_info("speed=%d, format=%u\n",
+ p->start.speed, p->start.format);
+ else if (p->cmd == V4L2_DEC_CMD_STOP)
+ pr_info("pts=%llu\n", p->stop.pts);
+}
+
+static void v4l_print_dbg_chip_info(const void *arg, bool write_only)
+{
+ const struct v4l2_dbg_chip_info *p = arg;
+
+ pr_cont("type=%u, ", p->match.type);
+ if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
+ pr_cont("name=%.*s, ",
+ (int)sizeof(p->match.name), p->match.name);
+ else
+ pr_cont("addr=%u, ", p->match.addr);
+ pr_cont("name=%.*s\n", (int)sizeof(p->name), p->name);
+}
+
+static void v4l_print_dbg_register(const void *arg, bool write_only)
+{
+ const struct v4l2_dbg_register *p = arg;
+
+ pr_cont("type=%u, ", p->match.type);
+ if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
+ pr_cont("name=%.*s, ",
+ (int)sizeof(p->match.name), p->match.name);
+ else
+ pr_cont("addr=%u, ", p->match.addr);
+ pr_cont("reg=0x%llx, val=0x%llx\n",
+ p->reg, p->val);
+}
+
+static void v4l_print_dv_timings(const void *arg, bool write_only)
+{
+ const struct v4l2_dv_timings *p = arg;
+
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
+ p->bt.interlaced, p->bt.pixelclock,
+ p->bt.width, p->bt.height,
+ p->bt.polarities, p->bt.hfrontporch,
+ p->bt.hsync, p->bt.hbackporch,
+ p->bt.vfrontporch, p->bt.vsync,
+ p->bt.vbackporch, p->bt.il_vfrontporch,
+ p->bt.il_vsync, p->bt.il_vbackporch,
+ p->bt.standards, p->bt.flags);
+ break;
+ default:
+ pr_cont("type=%d\n", p->type);
+ break;
+ }
+}
+
+static void v4l_print_enum_dv_timings(const void *arg, bool write_only)
+{
+ const struct v4l2_enum_dv_timings *p = arg;
+
+ pr_cont("index=%u, ", p->index);
+ v4l_print_dv_timings(&p->timings, write_only);
+}
+
+static void v4l_print_dv_timings_cap(const void *arg, bool write_only)
+{
+ const struct v4l2_dv_timings_cap *p = arg;
+
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
+ p->bt.min_width, p->bt.max_width,
+ p->bt.min_height, p->bt.max_height,
+ p->bt.min_pixelclock, p->bt.max_pixelclock,
+ p->bt.standards, p->bt.capabilities);
+ break;
+ default:
+ pr_cont("type=%u\n", p->type);
+ break;
+ }
+}
+
+static void v4l_print_frmsizeenum(const void *arg, bool write_only)
+{
+ const struct v4l2_frmsizeenum *p = arg;
+
+ pr_cont("index=%u, pixelformat=%p4cc, type=%u",
+ p->index, &p->pixel_format, p->type);
+ switch (p->type) {
+ case V4L2_FRMSIZE_TYPE_DISCRETE:
+ pr_cont(", wxh=%ux%u\n",
+ p->discrete.width, p->discrete.height);
+ break;
+ case V4L2_FRMSIZE_TYPE_STEPWISE:
+ pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
+ p->stepwise.min_width,
+ p->stepwise.min_height,
+ p->stepwise.max_width,
+ p->stepwise.max_height,
+ p->stepwise.step_width,
+ p->stepwise.step_height);
+ break;
+ case V4L2_FRMSIZE_TYPE_CONTINUOUS:
+ default:
+ pr_cont("\n");
+ break;
+ }
+}
+
+static void v4l_print_frmivalenum(const void *arg, bool write_only)
+{
+ const struct v4l2_frmivalenum *p = arg;
+
+ pr_cont("index=%u, pixelformat=%p4cc, wxh=%ux%u, type=%u",
+ p->index, &p->pixel_format, p->width, p->height, p->type);
+ switch (p->type) {
+ case V4L2_FRMIVAL_TYPE_DISCRETE:
+ pr_cont(", fps=%d/%d\n",
+ p->discrete.numerator,
+ p->discrete.denominator);
+ break;
+ case V4L2_FRMIVAL_TYPE_STEPWISE:
+ pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n",
+ p->stepwise.min.numerator,
+ p->stepwise.min.denominator,
+ p->stepwise.max.numerator,
+ p->stepwise.max.denominator,
+ p->stepwise.step.numerator,
+ p->stepwise.step.denominator);
+ break;
+ case V4L2_FRMIVAL_TYPE_CONTINUOUS:
+ default:
+ pr_cont("\n");
+ break;
+ }
+}
+
+static void v4l_print_event(const void *arg, bool write_only)
+{
+ const struct v4l2_event *p = arg;
+ const struct v4l2_event_ctrl *c;
+
+ pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%llu.%9.9llu\n",
+ p->type, p->pending, p->sequence, p->id,
+ p->timestamp.tv_sec, p->timestamp.tv_nsec);
+ switch (p->type) {
+ case V4L2_EVENT_VSYNC:
+ printk(KERN_DEBUG "field=%s\n",
+ prt_names(p->u.vsync.field, v4l2_field_names));
+ break;
+ case V4L2_EVENT_CTRL:
+ c = &p->u.ctrl;
+ printk(KERN_DEBUG "changes=0x%x, type=%u, ",
+ c->changes, c->type);
+ if (c->type == V4L2_CTRL_TYPE_INTEGER64)
+ pr_cont("value64=%lld, ", c->value64);
+ else
+ pr_cont("value=%d, ", c->value);
+ pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n",
+ c->flags, c->minimum, c->maximum,
+ c->step, c->default_value);
+ break;
+ case V4L2_EVENT_FRAME_SYNC:
+ pr_cont("frame_sequence=%u\n",
+ p->u.frame_sync.frame_sequence);
+ break;
+ }
+}
+
+static void v4l_print_event_subscription(const void *arg, bool write_only)
+{
+ const struct v4l2_event_subscription *p = arg;
+
+ pr_cont("type=0x%x, id=0x%x, flags=0x%x\n",
+ p->type, p->id, p->flags);
+}
+
+static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only)
+{
+ const struct v4l2_sliced_vbi_cap *p = arg;
+ int i;
+
+ pr_cont("type=%s, service_set=0x%08x\n",
+ prt_names(p->type, v4l2_type_names), p->service_set);
+ for (i = 0; i < 24; i++)
+ printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
+ p->service_lines[0][i],
+ p->service_lines[1][i]);
+}
+
+static void v4l_print_freq_band(const void *arg, bool write_only)
+{
+ const struct v4l2_frequency_band *p = arg;
+
+ pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+ p->tuner, p->type, p->index,
+ p->capability, p->rangelow,
+ p->rangehigh, p->modulation);
+}
+
+static void v4l_print_edid(const void *arg, bool write_only)
+{
+ const struct v4l2_edid *p = arg;
+
+ pr_cont("pad=%u, start_block=%u, blocks=%u\n",
+ p->pad, p->start_block, p->blocks);
+}
+
+static void v4l_print_u32(const void *arg, bool write_only)
+{
+ pr_cont("value=%u\n", *(const u32 *)arg);
+}
+
+static void v4l_print_newline(const void *arg, bool write_only)
+{
+ pr_cont("\n");
+}
+
+static void v4l_print_default(const void *arg, bool write_only)
+{
+ pr_cont("driver-specific ioctl\n");
+}
+
+static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl)
+{
+ __u32 i;
+
+ /* zero the reserved fields */
+ c->reserved[0] = 0;
+ for (i = 0; i < c->count; i++)
+ c->controls[i].reserved2[0] = 0;
+
+ switch (c->which) {
+ case V4L2_CID_PRIVATE_BASE:
+ /*
+ * V4L2_CID_PRIVATE_BASE cannot be used as control class
+ * when using extended controls.
+ * Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
+ * is it allowed for backwards compatibility.
+ */
+ if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL)
+ return false;
+ break;
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ /* Default value cannot be changed */
+ if (ioctl == VIDIOC_S_EXT_CTRLS ||
+ ioctl == VIDIOC_TRY_EXT_CTRLS) {
+ c->error_idx = c->count;
+ return false;
+ }
+ return true;
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ return true;
+ case V4L2_CTRL_WHICH_REQUEST_VAL:
+ c->error_idx = c->count;
+ return false;
+ }
+
+ /* Check that all controls are from the same control class. */
+ for (i = 0; i < c->count; i++) {
+ if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) {
+ c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i :
+ c->count;
+ return false;
+ }
+ }
+ return true;
+}
+
+static int check_fmt(struct file *file, enum v4l2_buf_type type)
+{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
+ struct video_device *vfd = video_devdata(file);
+ const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
+ bool is_vid = vfd->vfl_type == VFL_TYPE_VIDEO &&
+ (vfd->device_caps & vid_caps);
+ bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
+ bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
+ bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vfd->vfl_type == VFL_TYPE_VIDEO &&
+ (vfd->device_caps & meta_caps);
+ bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
+ bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
+
+ if (ops == NULL)
+ return -EINVAL;
+
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if ((is_vid || is_tch) && is_rx &&
+ (ops->vidioc_g_fmt_vid_cap || ops->vidioc_g_fmt_vid_cap_mplane))
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if ((is_vid || is_tch) && is_rx && ops->vidioc_g_fmt_vid_cap_mplane)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (is_vid && is_rx && ops->vidioc_g_fmt_vid_overlay)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (is_vid && is_tx &&
+ (ops->vidioc_g_fmt_vid_out || ops->vidioc_g_fmt_vid_out_mplane))
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_mplane)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (is_vid && is_tx && ops->vidioc_g_fmt_vid_out_overlay)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (is_vbi && is_rx && ops->vidioc_g_fmt_vbi_cap)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (is_vbi && is_tx && ops->vidioc_g_fmt_vbi_out)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (is_vbi && is_rx && ops->vidioc_g_fmt_sliced_vbi_cap)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (is_vbi && is_tx && ops->vidioc_g_fmt_sliced_vbi_out)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (is_sdr && is_rx && ops->vidioc_g_fmt_sdr_cap)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (is_sdr && is_tx && ops->vidioc_g_fmt_sdr_out)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap)
+ return 0;
+ break;
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out)
+ return 0;
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static void v4l_sanitize_colorspace(u32 pixelformat, u32 *colorspace,
+ u32 *encoding, u32 *quantization,
+ u32 *xfer_func)
+{
+ bool is_hsv = pixelformat == V4L2_PIX_FMT_HSV24 ||
+ pixelformat == V4L2_PIX_FMT_HSV32;
+
+ if (!v4l2_is_colorspace_valid(*colorspace)) {
+ *colorspace = V4L2_COLORSPACE_DEFAULT;
+ *encoding = V4L2_YCBCR_ENC_DEFAULT;
+ *quantization = V4L2_QUANTIZATION_DEFAULT;
+ *xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ }
+
+ if ((!is_hsv && !v4l2_is_ycbcr_enc_valid(*encoding)) ||
+ (is_hsv && !v4l2_is_hsv_enc_valid(*encoding)))
+ *encoding = V4L2_YCBCR_ENC_DEFAULT;
+
+ if (!v4l2_is_quant_valid(*quantization))
+ *quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ if (!v4l2_is_xfer_func_valid(*xfer_func))
+ *xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static void v4l_sanitize_format(struct v4l2_format *fmt)
+{
+ unsigned int offset;
+
+ /* Make sure num_planes is not bogus */
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt->fmt.pix_mp.num_planes = min_t(u32, fmt->fmt.pix_mp.num_planes,
+ VIDEO_MAX_PLANES);
+
+ /*
+ * The v4l2_pix_format structure has been extended with fields that were
+ * not previously required to be set to zero by applications. The priv
+ * field, when set to a magic value, indicates that the extended fields
+ * are valid. Otherwise they will contain undefined values. To simplify
+ * the API towards drivers zero the extended fields and set the priv
+ * field to the magic value when the extended pixel format structure
+ * isn't used by applications.
+ */
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (fmt->fmt.pix.priv != V4L2_PIX_FMT_PRIV_MAGIC) {
+ fmt->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+
+ offset = offsetof(struct v4l2_pix_format, priv)
+ + sizeof(fmt->fmt.pix.priv);
+ memset(((void *)&fmt->fmt.pix) + offset, 0,
+ sizeof(fmt->fmt.pix) - offset);
+ }
+ }
+
+ /* Replace invalid colorspace values with defaults. */
+ if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l_sanitize_colorspace(fmt->fmt.pix.pixelformat,
+ &fmt->fmt.pix.colorspace,
+ &fmt->fmt.pix.ycbcr_enc,
+ &fmt->fmt.pix.quantization,
+ &fmt->fmt.pix.xfer_func);
+ } else if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ u32 ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc;
+ u32 quantization = fmt->fmt.pix_mp.quantization;
+ u32 xfer_func = fmt->fmt.pix_mp.xfer_func;
+
+ v4l_sanitize_colorspace(fmt->fmt.pix_mp.pixelformat,
+ &fmt->fmt.pix_mp.colorspace, &ycbcr_enc,
+ &quantization, &xfer_func);
+
+ fmt->fmt.pix_mp.ycbcr_enc = ycbcr_enc;
+ fmt->fmt.pix_mp.quantization = quantization;
+ fmt->fmt.pix_mp.xfer_func = xfer_func;
+ }
+}
+
+static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_capability *cap = (struct v4l2_capability *)arg;
+ struct video_device *vfd = video_devdata(file);
+ int ret;
+
+ cap->version = LINUX_VERSION_CODE;
+ cap->device_caps = vfd->device_caps;
+ cap->capabilities = vfd->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ media_set_bus_info(cap->bus_info, sizeof(cap->bus_info),
+ vfd->dev_parent);
+
+ ret = ops->vidioc_querycap(file, fh, cap);
+
+ /*
+ * Drivers must not change device_caps, so check for this and
+ * warn if this happened.
+ */
+ WARN_ON(cap->device_caps != vfd->device_caps);
+ /*
+ * Check that capabilities is a superset of
+ * vfd->device_caps | V4L2_CAP_DEVICE_CAPS
+ */
+ WARN_ON((cap->capabilities &
+ (vfd->device_caps | V4L2_CAP_DEVICE_CAPS)) !=
+ (vfd->device_caps | V4L2_CAP_DEVICE_CAPS));
+ cap->capabilities |= V4L2_CAP_EXT_PIX_FORMAT;
+ cap->device_caps |= V4L2_CAP_EXT_PIX_FORMAT;
+
+ return ret;
+}
+
+static int v4l_g_input(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ *(int *)arg = 0;
+ return 0;
+ }
+
+ return ops->vidioc_g_input(file, fh, arg);
+}
+
+static int v4l_g_output(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ *(int *)arg = 0;
+ return 0;
+ }
+
+ return ops->vidioc_g_output(file, fh, arg);
+}
+
+static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC)
+ return *(int *)arg ? -EINVAL : 0;
+
+ return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_s_output(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC)
+ return *(int *)arg ? -EINVAL : 0;
+
+ return ops->vidioc_s_output(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd;
+ u32 *p = arg;
+
+ vfd = video_devdata(file);
+ *p = v4l2_prio_max(vfd->prio);
+ return 0;
+}
+
+static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd;
+ struct v4l2_fh *vfh;
+ u32 *p = arg;
+
+ vfd = video_devdata(file);
+ if (!test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
+ return -ENOTTY;
+ vfh = file->private_data;
+ return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
+}
+
+static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_input *p = arg;
+
+ /*
+ * We set the flags for CAP_DV_TIMINGS &
+ * CAP_STD here based on ioctl handler provided by the
+ * driver. If the driver doesn't support these
+ * for a specific input, it must override these flags.
+ */
+ if (is_valid_ioctl(vfd, VIDIOC_S_STD))
+ p->capabilities |= V4L2_IN_CAP_STD;
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ if (p->index)
+ return -EINVAL;
+ strscpy(p->name, vfd->name, sizeof(p->name));
+ p->type = V4L2_INPUT_TYPE_CAMERA;
+ return 0;
+ }
+
+ return ops->vidioc_enum_input(file, fh, p);
+}
+
+static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_output *p = arg;
+
+ /*
+ * We set the flags for CAP_DV_TIMINGS &
+ * CAP_STD here based on ioctl handler provided by the
+ * driver. If the driver doesn't support these
+ * for a specific output, it must override these flags.
+ */
+ if (is_valid_ioctl(vfd, VIDIOC_S_STD))
+ p->capabilities |= V4L2_OUT_CAP_STD;
+
+ if (vfd->device_caps & V4L2_CAP_IO_MC) {
+ if (p->index)
+ return -EINVAL;
+ strscpy(p->name, vfd->name, sizeof(p->name));
+ p->type = V4L2_OUTPUT_TYPE_ANALOG;
+ return 0;
+ }
+
+ return ops->vidioc_enum_output(file, fh, p);
+}
+
+static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
+{
+ const unsigned sz = sizeof(fmt->description);
+ const char *descr = NULL;
+ u32 flags = 0;
+
+ /*
+ * We depart from the normal coding style here since the descriptions
+ * should be aligned so it is easy to see which descriptions will be
+ * longer than 31 characters (the max length for a description).
+ * And frankly, this is easier to read anyway.
+ *
+ * Note that gcc will use O(log N) comparisons to find the right case.
+ */
+ switch (fmt->pixelformat) {
+ /* Max description length mask: descr = "0123456789012345678901234567890" */
+ case V4L2_PIX_FMT_RGB332: descr = "8-bit RGB 3-3-2"; break;
+ case V4L2_PIX_FMT_RGB444: descr = "16-bit A/XRGB 4-4-4-4"; break;
+ case V4L2_PIX_FMT_ARGB444: descr = "16-bit ARGB 4-4-4-4"; break;
+ case V4L2_PIX_FMT_XRGB444: descr = "16-bit XRGB 4-4-4-4"; break;
+ case V4L2_PIX_FMT_RGBA444: descr = "16-bit RGBA 4-4-4-4"; break;
+ case V4L2_PIX_FMT_RGBX444: descr = "16-bit RGBX 4-4-4-4"; break;
+ case V4L2_PIX_FMT_ABGR444: descr = "16-bit ABGR 4-4-4-4"; break;
+ case V4L2_PIX_FMT_XBGR444: descr = "16-bit XBGR 4-4-4-4"; break;
+ case V4L2_PIX_FMT_BGRA444: descr = "16-bit BGRA 4-4-4-4"; break;
+ case V4L2_PIX_FMT_BGRX444: descr = "16-bit BGRX 4-4-4-4"; break;
+ case V4L2_PIX_FMT_RGB555: descr = "16-bit A/XRGB 1-5-5-5"; break;
+ case V4L2_PIX_FMT_ARGB555: descr = "16-bit ARGB 1-5-5-5"; break;
+ case V4L2_PIX_FMT_XRGB555: descr = "16-bit XRGB 1-5-5-5"; break;
+ case V4L2_PIX_FMT_ABGR555: descr = "16-bit ABGR 1-5-5-5"; break;
+ case V4L2_PIX_FMT_XBGR555: descr = "16-bit XBGR 1-5-5-5"; break;
+ case V4L2_PIX_FMT_RGBA555: descr = "16-bit RGBA 5-5-5-1"; break;
+ case V4L2_PIX_FMT_RGBX555: descr = "16-bit RGBX 5-5-5-1"; break;
+ case V4L2_PIX_FMT_BGRA555: descr = "16-bit BGRA 5-5-5-1"; break;
+ case V4L2_PIX_FMT_BGRX555: descr = "16-bit BGRX 5-5-5-1"; break;
+ case V4L2_PIX_FMT_RGB565: descr = "16-bit RGB 5-6-5"; break;
+ case V4L2_PIX_FMT_RGB555X: descr = "16-bit A/XRGB 1-5-5-5 BE"; break;
+ case V4L2_PIX_FMT_ARGB555X: descr = "16-bit ARGB 1-5-5-5 BE"; break;
+ case V4L2_PIX_FMT_XRGB555X: descr = "16-bit XRGB 1-5-5-5 BE"; break;
+ case V4L2_PIX_FMT_RGB565X: descr = "16-bit RGB 5-6-5 BE"; break;
+ case V4L2_PIX_FMT_BGR666: descr = "18-bit BGRX 6-6-6-14"; break;
+ case V4L2_PIX_FMT_BGR24: descr = "24-bit BGR 8-8-8"; break;
+ case V4L2_PIX_FMT_RGB24: descr = "24-bit RGB 8-8-8"; break;
+ case V4L2_PIX_FMT_BGR32: descr = "32-bit BGRA/X 8-8-8-8"; break;
+ case V4L2_PIX_FMT_ABGR32: descr = "32-bit BGRA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XBGR32: descr = "32-bit BGRX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGB32: descr = "32-bit A/XRGB 8-8-8-8"; break;
+ case V4L2_PIX_FMT_ARGB32: descr = "32-bit ARGB 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XRGB32: descr = "32-bit XRGB 8-8-8-8"; break;
+ case V4L2_PIX_FMT_BGRA32: descr = "32-bit ABGR 8-8-8-8"; break;
+ case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGBX1010102: descr = "32-bit RGBX 10-10-10-2"; break;
+ case V4L2_PIX_FMT_RGBA1010102: descr = "32-bit RGBA 10-10-10-2"; break;
+ case V4L2_PIX_FMT_ARGB2101010: descr = "32-bit ARGB 2-10-10-10"; break;
+ case V4L2_PIX_FMT_BGR48_12: descr = "12-bit Depth BGR"; break;
+ case V4L2_PIX_FMT_ABGR64_12: descr = "12-bit Depth BGRA"; break;
+ case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y012: descr = "12-bit Greyscale (bits 15-4)"; break;
+ case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break;
+ case V4L2_PIX_FMT_Y10BPACK: descr = "10-bit Greyscale (Packed)"; break;
+ case V4L2_PIX_FMT_Y10P: descr = "10-bit Greyscale (MIPI Packed)"; break;
+ case V4L2_PIX_FMT_IPU3_Y10: descr = "10-bit greyscale (IPU3 Packed)"; break;
+ case V4L2_PIX_FMT_Y8I: descr = "Interleaved 8-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
+ case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
+ case V4L2_PIX_FMT_INZI: descr = "Planar 10:16 Greyscale Depth"; break;
+ case V4L2_PIX_FMT_CNF4: descr = "4-bit Depth Confidence (Packed)"; break;
+ case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break;
+ case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break;
+ case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break;
+ case V4L2_PIX_FMT_YVU420: descr = "Planar YVU 4:2:0"; break;
+ case V4L2_PIX_FMT_YUYV: descr = "YUYV 4:2:2"; break;
+ case V4L2_PIX_FMT_YYUV: descr = "YYUV 4:2:2"; break;
+ case V4L2_PIX_FMT_YVYU: descr = "YVYU 4:2:2"; break;
+ case V4L2_PIX_FMT_UYVY: descr = "UYVY 4:2:2"; break;
+ case V4L2_PIX_FMT_VYUY: descr = "VYUY 4:2:2"; break;
+ case V4L2_PIX_FMT_YUV422P: descr = "Planar YUV 4:2:2"; break;
+ case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break;
+ case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break;
+ case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break;
+ case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break;
+ case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break;
+ case V4L2_PIX_FMT_YUV24: descr = "24-bit YUV 4:4:4 8-8-8"; break;
+ case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break;
+ case V4L2_PIX_FMT_AYUV32: descr = "32-bit AYUV 8-8-8-8"; break;
+ case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break;
+ case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_YUVA32: descr = "32-bit YUVA 8-8-8-8"; break;
+ case V4L2_PIX_FMT_YUVX32: descr = "32-bit YUVX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break;
+ case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break;
+ case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
+ case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
+ case V4L2_PIX_FMT_YUV48_12: descr = "12-bit YUV 4:4:4 Packed"; break;
+ case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break;
+ case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break;
+ case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break;
+ case V4L2_PIX_FMT_NV61: descr = "Y/VU 4:2:2"; break;
+ case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break;
+ case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break;
+ case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break;
+ case V4L2_PIX_FMT_P012: descr = "12-bit Y/UV 4:2:0"; break;
+ case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break;
+ case V4L2_PIX_FMT_NV15_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
+ case V4L2_PIX_FMT_P010_4L4: descr = "10-bit Y/UV 4:2:0 (4x4 Linear)"; break;
+ case V4L2_PIX_FMT_NV12M: descr = "Y/UV 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_NV21M: descr = "Y/VU 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_NV16M: descr = "Y/UV 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_NV61M: descr = "Y/VU 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_NV12MT: descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break;
+ case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break;
+ case V4L2_PIX_FMT_P012M: descr = "12-bit Y/UV 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break;
+ case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_YVU422M: descr = "Planar YVU 4:2:2 (N-C)"; break;
+ case V4L2_PIX_FMT_YUV444M: descr = "Planar YUV 4:4:4 (N-C)"; break;
+ case V4L2_PIX_FMT_YVU444M: descr = "Planar YVU 4:4:4 (N-C)"; break;
+ case V4L2_PIX_FMT_SBGGR8: descr = "8-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG8: descr = "8-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG8: descr = "8-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB8: descr = "8-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_SBGGR10: descr = "10-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break;
+ case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break;
+ case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break;
+ case V4L2_PIX_FMT_SRGGB10P: descr = "10-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_IPU3_SBGGR10: descr = "10-bit bayer BGGR IPU3 Packed"; break;
+ case V4L2_PIX_FMT_IPU3_SGBRG10: descr = "10-bit bayer GBRG IPU3 Packed"; break;
+ case V4L2_PIX_FMT_IPU3_SGRBG10: descr = "10-bit bayer GRBG IPU3 Packed"; break;
+ case V4L2_PIX_FMT_IPU3_SRGGB10: descr = "10-bit bayer RGGB IPU3 Packed"; break;
+ case V4L2_PIX_FMT_SBGGR10ALAW8: descr = "8-bit Bayer BGBG/GRGR (A-law)"; break;
+ case V4L2_PIX_FMT_SGBRG10ALAW8: descr = "8-bit Bayer GBGB/RGRG (A-law)"; break;
+ case V4L2_PIX_FMT_SGRBG10ALAW8: descr = "8-bit Bayer GRGR/BGBG (A-law)"; break;
+ case V4L2_PIX_FMT_SRGGB10ALAW8: descr = "8-bit Bayer RGRG/GBGB (A-law)"; break;
+ case V4L2_PIX_FMT_SBGGR10DPCM8: descr = "8-bit Bayer BGBG/GRGR (DPCM)"; break;
+ case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break;
+ case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break;
+ case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break;
+ case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break;
+ case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break;
+ case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break;
+ case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_SBGGR14: descr = "14-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG14: descr = "14-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG14: descr = "14-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB14: descr = "14-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_SBGGR14P: descr = "14-bit Bayer BGBG/GRGR Packed"; break;
+ case V4L2_PIX_FMT_SGBRG14P: descr = "14-bit Bayer GBGB/RGRG Packed"; break;
+ case V4L2_PIX_FMT_SGRBG14P: descr = "14-bit Bayer GRGR/BGBG Packed"; break;
+ case V4L2_PIX_FMT_SRGGB14P: descr = "14-bit Bayer RGRG/GBGB Packed"; break;
+ case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR"; break;
+ case V4L2_PIX_FMT_SGBRG16: descr = "16-bit Bayer GBGB/RGRG"; break;
+ case V4L2_PIX_FMT_SGRBG16: descr = "16-bit Bayer GRGR/BGBG"; break;
+ case V4L2_PIX_FMT_SRGGB16: descr = "16-bit Bayer RGRG/GBGB"; break;
+ case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break;
+ case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break;
+ case V4L2_PIX_FMT_SPCA505: descr = "GSPCA SPCA505"; break;
+ case V4L2_PIX_FMT_SPCA508: descr = "GSPCA SPCA508"; break;
+ case V4L2_PIX_FMT_STV0680: descr = "GSPCA STV0680"; break;
+ case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break;
+ case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break;
+ case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break;
+ case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break;
+ case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break;
+ case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break;
+ case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break;
+ case V4L2_SDR_FMT_CU16LE: descr = "Complex U16LE"; break;
+ case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break;
+ case V4L2_SDR_FMT_CS14LE: descr = "Complex S14LE"; break;
+ case V4L2_SDR_FMT_RU12LE: descr = "Real U12LE"; break;
+ case V4L2_SDR_FMT_PCU16BE: descr = "Planar Complex U16BE"; break;
+ case V4L2_SDR_FMT_PCU18BE: descr = "Planar Complex U18BE"; break;
+ case V4L2_SDR_FMT_PCU20BE: descr = "Planar Complex U20BE"; break;
+ case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit Signed Deltas"; break;
+ case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit Signed Deltas"; break;
+ case V4L2_TCH_FMT_TU16: descr = "16-bit Unsigned Touch Data"; break;
+ case V4L2_TCH_FMT_TU08: descr = "8-bit Unsigned Touch Data"; break;
+ case V4L2_META_FMT_VSP1_HGO: descr = "R-Car VSP1 1-D Histogram"; break;
+ case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
+ case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
+ case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
+ case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
+ case V4L2_META_FMT_RK_ISP1_PARAMS: descr = "Rockchip ISP1 3A Parameters"; break;
+ case V4L2_META_FMT_RK_ISP1_STAT_3A: descr = "Rockchip ISP1 3A Statistics"; break;
+ case V4L2_PIX_FMT_NV12_8L128: descr = "NV12 (8x128 Linear)"; break;
+ case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break;
+ case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break;
+ case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break;
+ case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break;
+ case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break;
+
+ default:
+ /* Compressed formats */
+ flags = V4L2_FMT_FLAG_COMPRESSED;
+ switch (fmt->pixelformat) {
+ /* Max description length mask: descr = "0123456789012345678901234567890" */
+ case V4L2_PIX_FMT_MJPEG: descr = "Motion-JPEG"; break;
+ case V4L2_PIX_FMT_JPEG: descr = "JFIF JPEG"; break;
+ case V4L2_PIX_FMT_DV: descr = "1394"; break;
+ case V4L2_PIX_FMT_MPEG: descr = "MPEG-1/2/4"; break;
+ case V4L2_PIX_FMT_H264: descr = "H.264"; break;
+ case V4L2_PIX_FMT_H264_NO_SC: descr = "H.264 (No Start Codes)"; break;
+ case V4L2_PIX_FMT_H264_MVC: descr = "H.264 MVC"; break;
+ case V4L2_PIX_FMT_H264_SLICE: descr = "H.264 Parsed Slice Data"; break;
+ case V4L2_PIX_FMT_H263: descr = "H.263"; break;
+ case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break;
+ case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break;
+ case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break;
+ case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 Part 2 ES"; break;
+ case V4L2_PIX_FMT_XVID: descr = "Xvid"; break;
+ case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break;
+ case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break;
+ case V4L2_PIX_FMT_VP8: descr = "VP8"; break;
+ case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break;
+ case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
+ case V4L2_PIX_FMT_VP9_FRAME: descr = "VP9 Frame"; break;
+ case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
+ case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break;
+ case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
+ case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */
+ case V4L2_PIX_FMT_SPK: descr = "Sorenson Spark"; break;
+ case V4L2_PIX_FMT_RV30: descr = "RealVideo 8"; break;
+ case V4L2_PIX_FMT_RV40: descr = "RealVideo 9 & 10"; break;
+ case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
+ case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break;
+ case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break;
+ case V4L2_PIX_FMT_PWC1: descr = "Raw Philips Webcam Type (Old)"; break;
+ case V4L2_PIX_FMT_PWC2: descr = "Raw Philips Webcam Type (New)"; break;
+ case V4L2_PIX_FMT_ET61X251: descr = "GSPCA ET61X251"; break;
+ case V4L2_PIX_FMT_SPCA561: descr = "GSPCA SPCA561"; break;
+ case V4L2_PIX_FMT_PAC207: descr = "GSPCA PAC207"; break;
+ case V4L2_PIX_FMT_MR97310A: descr = "GSPCA MR97310A"; break;
+ case V4L2_PIX_FMT_JL2005BCD: descr = "GSPCA JL2005BCD"; break;
+ case V4L2_PIX_FMT_SN9C2028: descr = "GSPCA SN9C2028"; break;
+ case V4L2_PIX_FMT_SQ905C: descr = "GSPCA SQ905C"; break;
+ case V4L2_PIX_FMT_PJPG: descr = "GSPCA PJPG"; break;
+ case V4L2_PIX_FMT_OV511: descr = "GSPCA OV511"; break;
+ case V4L2_PIX_FMT_OV518: descr = "GSPCA OV518"; break;
+ case V4L2_PIX_FMT_JPGL: descr = "JPEG Lite"; break;
+ case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
+ case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
+ case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
+ case V4L2_PIX_FMT_QC08C: descr = "QCOM Compressed 8-bit Format"; break;
+ case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break;
+ case V4L2_PIX_FMT_AJPG: descr = "Aspeed JPEG"; break;
+ case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break;
+ case V4L2_PIX_FMT_MT2110T: descr = "Mediatek 10bit Tile Mode"; break;
+ case V4L2_PIX_FMT_MT2110R: descr = "Mediatek 10bit Raster Mode"; break;
+ default:
+ if (fmt->description[0])
+ return;
+ WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
+ flags = 0;
+ snprintf(fmt->description, sz, "%p4cc",
+ &fmt->pixelformat);
+ break;
+ }
+ }
+
+ if (descr)
+ WARN_ON(strscpy(fmt->description, descr, sz) < 0);
+ fmt->flags |= flags;
+}
+
+static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_fmtdesc *p = arg;
+ int ret = check_fmt(file, p->type);
+ u32 mbus_code;
+ u32 cap_mask;
+
+ if (ret)
+ return ret;
+ ret = -EINVAL;
+
+ if (!(vdev->device_caps & V4L2_CAP_IO_MC))
+ p->mbus_code = 0;
+
+ mbus_code = p->mbus_code;
+ memset_after(p, 0, type);
+ p->mbus_code = mbus_code;
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ cap_mask = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+ if (!!(vdev->device_caps & cap_mask) !=
+ (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE))
+ break;
+
+ if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
+ break;
+ ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
+ break;
+ ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ cap_mask = V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M_MPLANE;
+ if (!!(vdev->device_caps & cap_mask) !=
+ (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
+ break;
+
+ if (unlikely(!ops->vidioc_enum_fmt_vid_out))
+ break;
+ ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg);
+ break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!ops->vidioc_enum_fmt_sdr_cap))
+ break;
+ ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
+ break;
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!ops->vidioc_enum_fmt_sdr_out))
+ break;
+ ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg);
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!ops->vidioc_enum_fmt_meta_cap))
+ break;
+ ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg);
+ break;
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (unlikely(!ops->vidioc_enum_fmt_meta_out))
+ break;
+ ret = ops->vidioc_enum_fmt_meta_out(file, fh, arg);
+ break;
+ }
+ if (ret == 0)
+ v4l_fill_fmtdesc(p);
+ return ret;
+}
+
+static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+{
+ /*
+ * The v4l2_pix_format structure contains fields that make no sense for
+ * touch. Set them to default values in this case.
+ */
+
+ p->field = V4L2_FIELD_NONE;
+ p->colorspace = V4L2_COLORSPACE_RAW;
+ p->flags = 0;
+ p->ycbcr_enc = 0;
+ p->quantization = 0;
+ p->xfer_func = 0;
+}
+
+static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+ return ret;
+
+ memset(&p->fmt, 0, sizeof(p->fmt));
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!ops->vidioc_g_fmt_vid_cap))
+ break;
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!ops->vidioc_g_fmt_vid_out))
+ break;
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ ret = ops->vidioc_g_fmt_vid_out(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ return ops->vidioc_g_fmt_sdr_out(file, fh, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ return ops->vidioc_g_fmt_meta_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return ops->vidioc_g_fmt_meta_out(file, fh, arg);
+ }
+ return -EINVAL;
+}
+
+static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ int ret = check_fmt(file, p->type);
+ unsigned int i;
+
+ if (ret)
+ return ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+ v4l_sanitize_format(p);
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_vid_cap))
+ break;
+ memset_after(p, 0, fmt.pix);
+ ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
+ break;
+ memset_after(p, 0, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ memset_after(&p->fmt.pix_mp.plane_fmt[i],
+ 0, bytesperline);
+ return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
+ break;
+ memset_after(p, 0, fmt.win);
+ p->fmt.win.clips = NULL;
+ p->fmt.win.clipcount = 0;
+ p->fmt.win.bitmap = NULL;
+ return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
+ break;
+ memset_after(p, 0, fmt.vbi.flags);
+ return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
+ break;
+ memset_after(p, 0, fmt.sliced.io_size);
+ return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_vid_out))
+ break;
+ memset_after(p, 0, fmt.pix);
+ ret = ops->vidioc_s_fmt_vid_out(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
+ break;
+ memset_after(p, 0, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ memset_after(&p->fmt.pix_mp.plane_fmt[i],
+ 0, bytesperline);
+ return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
+ break;
+ memset_after(p, 0, fmt.win);
+ p->fmt.win.clips = NULL;
+ p->fmt.win.clipcount = 0;
+ p->fmt.win.bitmap = NULL;
+ return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_vbi_out))
+ break;
+ memset_after(p, 0, fmt.vbi.flags);
+ return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
+ break;
+ memset_after(p, 0, fmt.sliced.io_size);
+ return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
+ break;
+ memset_after(p, 0, fmt.sdr.buffersize);
+ return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_sdr_out))
+ break;
+ memset_after(p, 0, fmt.sdr.buffersize);
+ return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_meta_cap))
+ break;
+ memset_after(p, 0, fmt.meta);
+ return ops->vidioc_s_fmt_meta_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_meta_out))
+ break;
+ memset_after(p, 0, fmt.meta);
+ return ops->vidioc_s_fmt_meta_out(file, fh, arg);
+ }
+ return -EINVAL;
+}
+
+static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ int ret = check_fmt(file, p->type);
+ unsigned int i;
+
+ if (ret)
+ return ret;
+
+ v4l_sanitize_format(p);
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_vid_cap))
+ break;
+ memset_after(p, 0, fmt.pix);
+ ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
+ break;
+ memset_after(p, 0, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ memset_after(&p->fmt.pix_mp.plane_fmt[i],
+ 0, bytesperline);
+ return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
+ break;
+ memset_after(p, 0, fmt.win);
+ p->fmt.win.clips = NULL;
+ p->fmt.win.clipcount = 0;
+ p->fmt.win.bitmap = NULL;
+ return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
+ break;
+ memset_after(p, 0, fmt.vbi.flags);
+ return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
+ break;
+ memset_after(p, 0, fmt.sliced.io_size);
+ return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_vid_out))
+ break;
+ memset_after(p, 0, fmt.pix);
+ ret = ops->vidioc_try_fmt_vid_out(file, fh, arg);
+ /* just in case the driver zeroed it again */
+ p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ return ret;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
+ break;
+ memset_after(p, 0, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ memset_after(&p->fmt.pix_mp.plane_fmt[i],
+ 0, bytesperline);
+ return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
+ break;
+ memset_after(p, 0, fmt.win);
+ p->fmt.win.clips = NULL;
+ p->fmt.win.clipcount = 0;
+ p->fmt.win.bitmap = NULL;
+ return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_vbi_out))
+ break;
+ memset_after(p, 0, fmt.vbi.flags);
+ return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
+ break;
+ memset_after(p, 0, fmt.sliced.io_size);
+ return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
+ break;
+ memset_after(p, 0, fmt.sdr.buffersize);
+ return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_sdr_out))
+ break;
+ memset_after(p, 0, fmt.sdr.buffersize);
+ return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_meta_cap))
+ break;
+ memset_after(p, 0, fmt.meta);
+ return ops->vidioc_try_fmt_meta_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_meta_out))
+ break;
+ memset_after(p, 0, fmt.meta);
+ return ops->vidioc_try_fmt_meta_out(file, fh, arg);
+ }
+ return -EINVAL;
+}
+
+static int v4l_streamon(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_streamon(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_streamoff(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_tuner *p = arg;
+ int err;
+
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ err = ops->vidioc_g_tuner(file, fh, p);
+ if (!err)
+ p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
+ return err;
+}
+
+static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_tuner *p = arg;
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ return ops->vidioc_s_tuner(file, fh, p);
+}
+
+static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_modulator *p = arg;
+ int err;
+
+ if (vfd->vfl_type == VFL_TYPE_RADIO)
+ p->type = V4L2_TUNER_RADIO;
+
+ err = ops->vidioc_g_modulator(file, fh, p);
+ if (!err)
+ p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
+ return err;
+}
+
+static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_modulator *p = arg;
+
+ if (vfd->vfl_type == VFL_TYPE_RADIO)
+ p->type = V4L2_TUNER_RADIO;
+
+ return ops->vidioc_s_modulator(file, fh, p);
+}
+
+static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_frequency *p = arg;
+
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ p->type = V4L2_TUNER_SDR;
+ else
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ return ops->vidioc_g_frequency(file, fh, p);
+}
+
+static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ const struct v4l2_frequency *p = arg;
+ enum v4l2_tuner_type type;
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
+ return ops->vidioc_s_frequency(file, fh, p);
+}
+
+static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_standard *p = arg;
+
+ return v4l_video_std_enumstd(p, vfd->tvnorms);
+}
+
+static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ v4l2_std_id id = *(v4l2_std_id *)arg, norm;
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+ norm = id & vfd->tvnorms;
+ if (vfd->tvnorms && !norm) /* Check if std is supported */
+ return -EINVAL;
+
+ /* Calls the specific handler */
+ return ops->vidioc_s_std(file, fh, norm);
+}
+
+static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ v4l2_std_id *p = arg;
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+ /*
+ * If no signal is detected, then the driver should return
+ * V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with
+ * any standards that do not apply removed.
+ *
+ * This means that tuners, audio and video decoders can join
+ * their efforts to improve the standards detection.
+ */
+ *p = vfd->tvnorms;
+ return ops->vidioc_querystd(file, fh, arg);
+}
+
+static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_hw_freq_seek *p = arg;
+ enum v4l2_tuner_type type;
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+ /* s_hw_freq_seek is not supported for SDR for now */
+ if (vfd->vfl_type == VFL_TYPE_SDR)
+ return -EINVAL;
+
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (p->type != type)
+ return -EINVAL;
+ return ops->vidioc_s_hw_freq_seek(file, fh, p);
+}
+
+static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_framebuffer *p = arg;
+
+ p->base = NULL;
+ return ops->vidioc_s_fbuf(file, fh, p);
+}
+
+static int v4l_overlay(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_overlay(file, fh, *(unsigned int *)arg);
+}
+
+static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_requestbuffers *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+ return ret;
+
+ memset_after(p, 0, flags);
+
+ return ops->vidioc_reqbufs(file, fh, p);
+}
+
+static int v4l_querybuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ return ret ? ret : ops->vidioc_querybuf(file, fh, p);
+}
+
+static int v4l_qbuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ return ret ? ret : ops->vidioc_qbuf(file, fh, p);
+}
+
+static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ return ret ? ret : ops->vidioc_dqbuf(file, fh, p);
+}
+
+static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_create_buffers *create = arg;
+ int ret = check_fmt(file, create->format.type);
+
+ if (ret)
+ return ret;
+
+ memset_after(create, 0, flags);
+
+ v4l_sanitize_format(&create->format);
+
+ ret = ops->vidioc_create_bufs(file, fh, create);
+
+ if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ create->format.fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+
+ return ret;
+}
+
+static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *b = arg;
+ int ret = check_fmt(file, b->type);
+
+ return ret ? ret : ops->vidioc_prepare_buf(file, fh, b);
+}
+
+static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_streamparm *p = arg;
+ v4l2_std_id std;
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+ return ret;
+ if (ops->vidioc_g_parm)
+ return ops->vidioc_g_parm(file, fh, p);
+ if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+ if (vfd->device_caps & V4L2_CAP_READWRITE)
+ p->parm.capture.readbuffers = 2;
+ ret = ops->vidioc_g_std(file, fh, &std);
+ if (ret == 0)
+ v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe);
+ return ret;
+}
+
+static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_streamparm *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+ return ret;
+
+ /* Note: extendedmode is never used in drivers */
+ if (V4L2_TYPE_IS_OUTPUT(p->type)) {
+ memset(p->parm.output.reserved, 0,
+ sizeof(p->parm.output.reserved));
+ p->parm.output.extendedmode = 0;
+ p->parm.output.outputmode &= V4L2_MODE_HIGHQUALITY;
+ } else {
+ memset(p->parm.capture.reserved, 0,
+ sizeof(p->parm.capture.reserved));
+ p->parm.capture.extendedmode = 0;
+ p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY;
+ }
+ return ops->vidioc_s_parm(file, fh, p);
+}
+
+static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_queryctrl *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_queryctrl(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_queryctrl(vfd->ctrl_handler, p);
+ if (ops->vidioc_queryctrl)
+ return ops->vidioc_queryctrl(file, fh, p);
+ return -ENOTTY;
+}
+
+static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_query_ext_ctrl *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_query_ext_ctrl(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_query_ext_ctrl(vfd->ctrl_handler, p);
+ if (ops->vidioc_query_ext_ctrl)
+ return ops->vidioc_query_ext_ctrl(file, fh, p);
+ return -ENOTTY;
+}
+
+static int v4l_querymenu(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_querymenu *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_querymenu(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_querymenu(vfd->ctrl_handler, p);
+ if (ops->vidioc_querymenu)
+ return ops->vidioc_querymenu(file, fh, p);
+ return -ENOTTY;
+}
+
+static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_control *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_ext_controls ctrls;
+ struct v4l2_ext_control ctrl;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_g_ctrl(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_g_ctrl(vfd->ctrl_handler, p);
+ if (ops->vidioc_g_ctrl)
+ return ops->vidioc_g_ctrl(file, fh, p);
+ if (ops->vidioc_g_ext_ctrls == NULL)
+ return -ENOTTY;
+
+ ctrls.which = V4L2_CTRL_ID2WHICH(p->id);
+ ctrls.count = 1;
+ ctrls.controls = &ctrl;
+ ctrl.id = p->id;
+ ctrl.value = p->value;
+ if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) {
+ int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
+
+ if (ret == 0)
+ p->value = ctrl.value;
+ return ret;
+ }
+ return -EINVAL;
+}
+
+static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_control *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_ext_controls ctrls;
+ struct v4l2_ext_control ctrl;
+ int ret;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
+ if (ops->vidioc_s_ctrl)
+ return ops->vidioc_s_ctrl(file, fh, p);
+ if (ops->vidioc_s_ext_ctrls == NULL)
+ return -ENOTTY;
+
+ ctrls.which = V4L2_CTRL_ID2WHICH(p->id);
+ ctrls.count = 1;
+ ctrls.controls = &ctrl;
+ ctrl.id = p->id;
+ ctrl.value = p->value;
+ if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL))
+ return -EINVAL;
+ ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
+ p->value = ctrl.value;
+ return ret;
+}
+
+static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_ext_controls *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ p->error_idx = p->count;
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
+ if (vfd->ctrl_handler)
+ return v4l2_g_ext_ctrls(vfd->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
+ if (ops->vidioc_g_ext_ctrls == NULL)
+ return -ENOTTY;
+ return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ?
+ ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL;
+}
+
+static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_ext_controls *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ p->error_idx = p->count;
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
+ if (vfd->ctrl_handler)
+ return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
+ if (ops->vidioc_s_ext_ctrls == NULL)
+ return -ENOTTY;
+ return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ?
+ ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL;
+}
+
+static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_ext_controls *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ p->error_idx = p->count;
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
+ if (vfd->ctrl_handler)
+ return v4l2_try_ext_ctrls(vfd->ctrl_handler,
+ vfd, vfd->v4l2_dev->mdev, p);
+ if (ops->vidioc_try_ext_ctrls == NULL)
+ return -ENOTTY;
+ return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ?
+ ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL;
+}
+
+/*
+ * The selection API specified originally that the _MPLANE buffer types
+ * shouldn't be used. The reasons for this are lost in the mists of time
+ * (or just really crappy memories). Regardless, this is really annoying
+ * for userspace. So to keep things simple we map _MPLANE buffer types
+ * to their 'regular' counterparts before calling the driver. And we
+ * restore it afterwards. This way applications can use either buffer
+ * type and drivers don't need to check for both.
+ */
+static int v4l_g_selection(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_selection *p = arg;
+ u32 old_type = p->type;
+ int ret;
+
+ if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ret = ops->vidioc_g_selection(file, fh, p);
+ p->type = old_type;
+ return ret;
+}
+
+static int v4l_s_selection(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_selection *p = arg;
+ u32 old_type = p->type;
+ int ret;
+
+ if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ret = ops->vidioc_s_selection(file, fh, p);
+ p->type = old_type;
+ return ret;
+}
+
+static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_crop *p = arg;
+ struct v4l2_selection s = {
+ .type = p->type,
+ };
+ int ret;
+
+ /* simulate capture crop using selection api */
+
+ /* crop means compose for output devices */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE;
+ else
+ s.target = V4L2_SEL_TGT_CROP;
+
+ if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
+ s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
+ V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
+
+ ret = v4l_g_selection(ops, file, fh, &s);
+
+ /* copying results to old structure on success */
+ if (!ret)
+ p->c = s.r;
+ return ret;
+}
+
+static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_crop *p = arg;
+ struct v4l2_selection s = {
+ .type = p->type,
+ .r = p->c,
+ };
+
+ /* simulate capture crop using selection api */
+
+ /* crop means compose for output devices */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE;
+ else
+ s.target = V4L2_SEL_TGT_CROP;
+
+ if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
+ s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
+ V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
+
+ return v4l_s_selection(ops, file, fh, &s);
+}
+
+static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_cropcap *p = arg;
+ struct v4l2_selection s = { .type = p->type };
+ int ret = 0;
+
+ /* setting trivial pixelaspect */
+ p->pixelaspect.numerator = 1;
+ p->pixelaspect.denominator = 1;
+
+ if (s.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ s.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (s.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ s.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
+ /*
+ * The determine_valid_ioctls() call already should ensure
+ * that this can never happen, but just in case...
+ */
+ if (WARN_ON(!ops->vidioc_g_selection))
+ return -ENOTTY;
+
+ if (ops->vidioc_g_pixelaspect)
+ ret = ops->vidioc_g_pixelaspect(file, fh, s.type,
+ &p->pixelaspect);
+
+ /*
+ * Ignore ENOTTY or ENOIOCTLCMD error returns, just use the
+ * square pixel aspect ratio in that case.
+ */
+ if (ret && ret != -ENOTTY && ret != -ENOIOCTLCMD)
+ return ret;
+
+ /* Use g_selection() to fill in the bounds and defrect rectangles */
+
+ /* obtaining bounds */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
+ else
+ s.target = V4L2_SEL_TGT_CROP_BOUNDS;
+
+ if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
+ s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ?
+ V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS;
+
+ ret = v4l_g_selection(ops, file, fh, &s);
+ if (ret)
+ return ret;
+ p->bounds = s.r;
+
+ /* obtaining defrect */
+ if (s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS)
+ s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
+ else
+ s.target = V4L2_SEL_TGT_CROP_DEFAULT;
+
+ ret = v4l_g_selection(ops, file, fh, &s);
+ if (ret)
+ return ret;
+ p->defrect = s.r;
+
+ return 0;
+}
+
+static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ int ret;
+
+ if (vfd->v4l2_dev)
+ pr_info("%s: ================= START STATUS =================\n",
+ vfd->v4l2_dev->name);
+ ret = ops->vidioc_log_status(file, fh);
+ if (vfd->v4l2_dev)
+ pr_info("%s: ================== END STATUS ==================\n",
+ vfd->v4l2_dev->name);
+ return ret;
+}
+
+static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ struct v4l2_dbg_register *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_subdev *sd;
+ int idx = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
+ if (vfd->v4l2_dev == NULL)
+ return -EINVAL;
+ v4l2_device_for_each_subdev(sd, vfd->v4l2_dev)
+ if (p->match.addr == idx++)
+ return v4l2_subdev_call(sd, core, g_register, p);
+ return -EINVAL;
+ }
+ if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
+ (ops->vidioc_g_chip_info || p->match.addr == 0))
+ return ops->vidioc_g_register(file, fh, p);
+ return -EINVAL;
+#else
+ return -ENOTTY;
+#endif
+}
+
+static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ const struct v4l2_dbg_register *p = arg;
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_subdev *sd;
+ int idx = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (p->match.type == V4L2_CHIP_MATCH_SUBDEV) {
+ if (vfd->v4l2_dev == NULL)
+ return -EINVAL;
+ v4l2_device_for_each_subdev(sd, vfd->v4l2_dev)
+ if (p->match.addr == idx++)
+ return v4l2_subdev_call(sd, core, s_register, p);
+ return -EINVAL;
+ }
+ if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
+ (ops->vidioc_g_chip_info || p->match.addr == 0))
+ return ops->vidioc_s_register(file, fh, p);
+ return -EINVAL;
+#else
+ return -ENOTTY;
+#endif
+}
+
+static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_dbg_chip_info *p = arg;
+ struct v4l2_subdev *sd;
+ int idx = 0;
+
+ switch (p->match.type) {
+ case V4L2_CHIP_MATCH_BRIDGE:
+ if (ops->vidioc_s_register)
+ p->flags |= V4L2_CHIP_FL_WRITABLE;
+ if (ops->vidioc_g_register)
+ p->flags |= V4L2_CHIP_FL_READABLE;
+ strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
+ if (ops->vidioc_g_chip_info)
+ return ops->vidioc_g_chip_info(file, fh, arg);
+ if (p->match.addr)
+ return -EINVAL;
+ return 0;
+
+ case V4L2_CHIP_MATCH_SUBDEV:
+ if (vfd->v4l2_dev == NULL)
+ break;
+ v4l2_device_for_each_subdev(sd, vfd->v4l2_dev) {
+ if (p->match.addr != idx++)
+ continue;
+ if (sd->ops->core && sd->ops->core->s_register)
+ p->flags |= V4L2_CHIP_FL_WRITABLE;
+ if (sd->ops->core && sd->ops->core->g_register)
+ p->flags |= V4L2_CHIP_FL_READABLE;
+ strscpy(p->name, sd->name, sizeof(p->name));
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+#else
+ return -ENOTTY;
+#endif
+}
+
+static int v4l_dqevent(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK);
+}
+
+static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_subscribe_event(fh, arg);
+}
+
+static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_unsubscribe_event(fh, arg);
+}
+
+static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_sliced_vbi_cap *p = arg;
+ int ret = check_fmt(file, p->type);
+
+ if (ret)
+ return ret;
+
+ /* Clear up to type, everything after type is zeroed already */
+ memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
+
+ return ops->vidioc_g_sliced_vbi_cap(file, fh, p);
+}
+
+static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_frequency_band *p = arg;
+ enum v4l2_tuner_type type;
+ int err;
+
+ if (vfd->vfl_type == VFL_TYPE_SDR) {
+ if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
+ return -EINVAL;
+ type = p->type;
+ } else {
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (type != p->type)
+ return -EINVAL;
+ }
+ if (ops->vidioc_enum_freq_bands) {
+ err = ops->vidioc_enum_freq_bands(file, fh, p);
+ if (err != -ENOTTY)
+ return err;
+ }
+ if (is_valid_ioctl(vfd, VIDIOC_G_TUNER)) {
+ struct v4l2_tuner t = {
+ .index = p->tuner,
+ .type = type,
+ };
+
+ if (p->index)
+ return -EINVAL;
+ err = ops->vidioc_g_tuner(file, fh, &t);
+ if (err)
+ return err;
+ p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
+ p->rangelow = t.rangelow;
+ p->rangehigh = t.rangehigh;
+ p->modulation = (type == V4L2_TUNER_RADIO) ?
+ V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
+ return 0;
+ }
+ if (is_valid_ioctl(vfd, VIDIOC_G_MODULATOR)) {
+ struct v4l2_modulator m = {
+ .index = p->tuner,
+ };
+
+ if (type != V4L2_TUNER_RADIO)
+ return -EINVAL;
+ if (p->index)
+ return -EINVAL;
+ err = ops->vidioc_g_modulator(file, fh, &m);
+ if (err)
+ return err;
+ p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
+ p->rangelow = m.rangelow;
+ p->rangehigh = m.rangehigh;
+ p->modulation = (type == V4L2_TUNER_RADIO) ?
+ V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
+ return 0;
+ }
+ return -ENOTTY;
+}
+
+struct v4l2_ioctl_info {
+ unsigned int ioctl;
+ u32 flags;
+ const char * const name;
+ int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *fh, void *p);
+ void (*debug)(const void *arg, bool write_only);
+};
+
+/* This control needs a priority check */
+#define INFO_FL_PRIO (1 << 0)
+/* This control can be valid if the filehandle passes a control handler. */
+#define INFO_FL_CTRL (1 << 1)
+/* Queuing ioctl */
+#define INFO_FL_QUEUE (1 << 2)
+/* Always copy back result, even on error */
+#define INFO_FL_ALWAYS_COPY (1 << 3)
+/* Zero struct from after the field to the end */
+#define INFO_FL_CLEAR(v4l2_struct, field) \
+ ((offsetof(struct v4l2_struct, field) + \
+ sizeof_field(struct v4l2_struct, field)) << 16)
+#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
+
+#define DEFINE_V4L_STUB_FUNC(_vidioc) \
+ static int v4l_stub_ ## _vidioc( \
+ const struct v4l2_ioctl_ops *ops, \
+ struct file *file, void *fh, void *p) \
+ { \
+ return ops->vidioc_ ## _vidioc(file, fh, p); \
+ }
+
+#define IOCTL_INFO(_ioctl, _func, _debug, _flags) \
+ [_IOC_NR(_ioctl)] = { \
+ .ioctl = _ioctl, \
+ .flags = _flags, \
+ .name = #_ioctl, \
+ .func = _func, \
+ .debug = _debug, \
+ }
+
+DEFINE_V4L_STUB_FUNC(g_fbuf)
+DEFINE_V4L_STUB_FUNC(expbuf)
+DEFINE_V4L_STUB_FUNC(g_std)
+DEFINE_V4L_STUB_FUNC(g_audio)
+DEFINE_V4L_STUB_FUNC(s_audio)
+DEFINE_V4L_STUB_FUNC(g_edid)
+DEFINE_V4L_STUB_FUNC(s_edid)
+DEFINE_V4L_STUB_FUNC(g_audout)
+DEFINE_V4L_STUB_FUNC(s_audout)
+DEFINE_V4L_STUB_FUNC(g_jpegcomp)
+DEFINE_V4L_STUB_FUNC(s_jpegcomp)
+DEFINE_V4L_STUB_FUNC(enumaudio)
+DEFINE_V4L_STUB_FUNC(enumaudout)
+DEFINE_V4L_STUB_FUNC(enum_framesizes)
+DEFINE_V4L_STUB_FUNC(enum_frameintervals)
+DEFINE_V4L_STUB_FUNC(g_enc_index)
+DEFINE_V4L_STUB_FUNC(encoder_cmd)
+DEFINE_V4L_STUB_FUNC(try_encoder_cmd)
+DEFINE_V4L_STUB_FUNC(decoder_cmd)
+DEFINE_V4L_STUB_FUNC(try_decoder_cmd)
+DEFINE_V4L_STUB_FUNC(s_dv_timings)
+DEFINE_V4L_STUB_FUNC(g_dv_timings)
+DEFINE_V4L_STUB_FUNC(enum_dv_timings)
+DEFINE_V4L_STUB_FUNC(query_dv_timings)
+DEFINE_V4L_STUB_FUNC(dv_timings_cap)
+
+static const struct v4l2_ioctl_info v4l2_ioctls[] = {
+ IOCTL_INFO(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0),
+ IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, 0),
+ IOCTL_INFO(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0),
+ IOCTL_INFO(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)),
+ IOCTL_INFO(VIDIOC_G_FBUF, v4l_stub_g_fbuf, v4l_print_framebuffer, 0),
+ IOCTL_INFO(VIDIOC_S_FBUF, v4l_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_OVERLAY, v4l_overlay, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_EXPBUF, v4l_stub_expbuf, v4l_print_exportbuffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_exportbuffer, flags)),
+ IOCTL_INFO(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)),
+ IOCTL_INFO(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_STD, v4l_stub_g_std, v4l_print_std, 0),
+ IOCTL_INFO(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)),
+ IOCTL_INFO(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)),
+ IOCTL_INFO(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)),
+ IOCTL_INFO(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)),
+ IOCTL_INFO(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_AUDIO, v4l_stub_g_audio, v4l_print_audio, 0),
+ IOCTL_INFO(VIDIOC_S_AUDIO, v4l_stub_s_audio, v4l_print_audio, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)),
+ IOCTL_INFO(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
+ IOCTL_INFO(VIDIOC_G_INPUT, v4l_g_input, v4l_print_u32, 0),
+ IOCTL_INFO(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_EDID, v4l_stub_g_edid, v4l_print_edid, INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_S_EDID, v4l_stub_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_G_OUTPUT, v4l_g_output, v4l_print_u32, 0),
+ IOCTL_INFO(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
+ IOCTL_INFO(VIDIOC_G_AUDOUT, v4l_stub_g_audout, v4l_print_audioout, 0),
+ IOCTL_INFO(VIDIOC_S_AUDOUT, v4l_stub_s_audout, v4l_print_audioout, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)),
+ IOCTL_INFO(VIDIOC_S_MODULATOR, v4l_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)),
+ IOCTL_INFO(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
+ IOCTL_INFO(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)),
+ IOCTL_INFO(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_SELECTION, v4l_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)),
+ IOCTL_INFO(VIDIOC_S_SELECTION, v4l_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)),
+ IOCTL_INFO(VIDIOC_G_JPEGCOMP, v4l_stub_g_jpegcomp, v4l_print_jpegcompression, 0),
+ IOCTL_INFO(VIDIOC_S_JPEGCOMP, v4l_stub_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0),
+ IOCTL_INFO(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0),
+ IOCTL_INFO(VIDIOC_ENUMAUDIO, v4l_stub_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)),
+ IOCTL_INFO(VIDIOC_ENUMAUDOUT, v4l_stub_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)),
+ IOCTL_INFO(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0),
+ IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
+ IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
+ IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
+ IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
+ IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0),
+ IOCTL_INFO(VIDIOC_ENCODER_CMD, v4l_stub_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+ IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, v4l_stub_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+ IOCTL_INFO(VIDIOC_DECODER_CMD, v4l_stub_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, v4l_stub_try_decoder_cmd, v4l_print_decoder_cmd, 0),
+ IOCTL_INFO(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
+ IOCTL_INFO(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
+ IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
+ IOCTL_INFO(VIDIOC_S_DV_TIMINGS, v4l_stub_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)),
+ IOCTL_INFO(VIDIOC_G_DV_TIMINGS, v4l_stub_g_dv_timings, v4l_print_dv_timings, 0),
+ IOCTL_INFO(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
+ IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0),
+ IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0),
+ IOCTL_INFO(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, v4l_stub_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)),
+ IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, v4l_stub_query_dv_timings, v4l_print_dv_timings, INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, v4l_stub_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, pad)),
+ IOCTL_INFO(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
+ IOCTL_INFO(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)),
+ IOCTL_INFO(VIDIOC_QUERY_EXT_CTRL, v4l_query_ext_ctrl, v4l_print_query_ext_ctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_query_ext_ctrl, id)),
+};
+#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
+
+static bool v4l2_is_known_ioctl(unsigned int cmd)
+{
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS)
+ return false;
+ return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
+}
+
+static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev,
+ struct v4l2_fh *vfh, unsigned int cmd,
+ void *arg)
+{
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS)
+ return vdev->lock;
+ if (vfh && vfh->m2m_ctx &&
+ (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) {
+ if (vfh->m2m_ctx->q_lock)
+ return vfh->m2m_ctx->q_lock;
+ }
+ if (vdev->queue && vdev->queue->lock &&
+ (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE))
+ return vdev->queue->lock;
+ return vdev->lock;
+}
+
+/* Common ioctl debug function. This function can be used by
+ external ioctl messages as well as internal V4L ioctl */
+void v4l_printk_ioctl(const char *prefix, unsigned int cmd)
+{
+ const char *dir, *type;
+
+ if (prefix)
+ printk(KERN_DEBUG "%s: ", prefix);
+
+ switch (_IOC_TYPE(cmd)) {
+ case 'd':
+ type = "v4l2_int";
+ break;
+ case 'V':
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS) {
+ type = "v4l2";
+ break;
+ }
+ pr_cont("%s", v4l2_ioctls[_IOC_NR(cmd)].name);
+ return;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ switch (_IOC_DIR(cmd)) {
+ case _IOC_NONE: dir = "--"; break;
+ case _IOC_READ: dir = "r-"; break;
+ case _IOC_WRITE: dir = "-w"; break;
+ case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
+ default: dir = "*ERR*"; break;
+ }
+ pr_cont("%s ioctl '%c', dir=%s, #%d (0x%08x)",
+ type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd);
+}
+EXPORT_SYMBOL(v4l_printk_ioctl);
+
+static long __video_do_ioctl(struct file *file,
+ unsigned int cmd, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct mutex *req_queue_lock = NULL;
+ struct mutex *lock; /* ioctl serialization mutex */
+ const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
+ bool write_only = false;
+ struct v4l2_ioctl_info default_info;
+ const struct v4l2_ioctl_info *info;
+ void *fh = file->private_data;
+ struct v4l2_fh *vfh = NULL;
+ int dev_debug = vfd->dev_debug;
+ long ret = -ENOTTY;
+
+ if (ops == NULL) {
+ pr_warn("%s: has no ioctl_ops.\n",
+ video_device_node_name(vfd));
+ return ret;
+ }
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
+ vfh = file->private_data;
+
+ /*
+ * We need to serialize streamon/off with queueing new requests.
+ * These ioctls may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (v4l2_device_supports_requests(vfd->v4l2_dev) &&
+ (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) {
+ req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex;
+
+ if (mutex_lock_interruptible(req_queue_lock))
+ return -ERESTARTSYS;
+ }
+
+ lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg);
+
+ if (lock && mutex_lock_interruptible(lock)) {
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
+ return -ERESTARTSYS;
+ }
+
+ if (!video_is_registered(vfd)) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (v4l2_is_known_ioctl(cmd)) {
+ info = &v4l2_ioctls[_IOC_NR(cmd)];
+
+ if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
+ !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
+ goto done;
+
+ if (vfh && (info->flags & INFO_FL_PRIO)) {
+ ret = v4l2_prio_check(vfd->prio, vfh->prio);
+ if (ret)
+ goto done;
+ }
+ } else {
+ default_info.ioctl = cmd;
+ default_info.flags = 0;
+ default_info.debug = v4l_print_default;
+ info = &default_info;
+ }
+
+ write_only = _IOC_DIR(cmd) == _IOC_WRITE;
+ if (info != &default_info) {
+ ret = info->func(ops, file, fh, arg);
+ } else if (!ops->vidioc_default) {
+ ret = -ENOTTY;
+ } else {
+ ret = ops->vidioc_default(file, fh,
+ vfh ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
+ cmd, arg);
+ }
+
+done:
+ if (dev_debug & (V4L2_DEV_DEBUG_IOCTL | V4L2_DEV_DEBUG_IOCTL_ARG)) {
+ if (!(dev_debug & V4L2_DEV_DEBUG_STREAMING) &&
+ (cmd == VIDIOC_QBUF || cmd == VIDIOC_DQBUF))
+ goto unlock;
+
+ v4l_printk_ioctl(video_device_node_name(vfd), cmd);
+ if (ret < 0)
+ pr_cont(": error %ld", ret);
+ if (!(dev_debug & V4L2_DEV_DEBUG_IOCTL_ARG))
+ pr_cont("\n");
+ else if (_IOC_DIR(cmd) == _IOC_NONE)
+ info->debug(arg, write_only);
+ else {
+ pr_cont(": ");
+ info->debug(arg, write_only);
+ }
+ }
+
+unlock:
+ if (lock)
+ mutex_unlock(lock);
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
+ return ret;
+}
+
+static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+ void __user **user_ptr, void ***kernel_ptr)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case VIDIOC_PREPARE_BUF:
+ case VIDIOC_QUERYBUF:
+ case VIDIOC_QBUF:
+ case VIDIOC_DQBUF: {
+ struct v4l2_buffer *buf = parg;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(buf->type) && buf->length > 0) {
+ if (buf->length > VIDEO_MAX_PLANES) {
+ ret = -EINVAL;
+ break;
+ }
+ *user_ptr = (void __user *)buf->m.planes;
+ *kernel_ptr = (void **)&buf->m.planes;
+ *array_size = sizeof(struct v4l2_plane) * buf->length;
+ ret = 1;
+ }
+ break;
+ }
+
+ case VIDIOC_G_EDID:
+ case VIDIOC_S_EDID: {
+ struct v4l2_edid *edid = parg;
+
+ if (edid->blocks) {
+ if (edid->blocks > 256) {
+ ret = -EINVAL;
+ break;
+ }
+ *user_ptr = (void __user *)edid->edid;
+ *kernel_ptr = (void **)&edid->edid;
+ *array_size = edid->blocks * 128;
+ ret = 1;
+ }
+ break;
+ }
+
+ case VIDIOC_S_EXT_CTRLS:
+ case VIDIOC_G_EXT_CTRLS:
+ case VIDIOC_TRY_EXT_CTRLS: {
+ struct v4l2_ext_controls *ctrls = parg;
+
+ if (ctrls->count != 0) {
+ if (ctrls->count > V4L2_CID_MAX_CTRLS) {
+ ret = -EINVAL;
+ break;
+ }
+ *user_ptr = (void __user *)ctrls->controls;
+ *kernel_ptr = (void **)&ctrls->controls;
+ *array_size = sizeof(struct v4l2_ext_control)
+ * ctrls->count;
+ ret = 1;
+ }
+ break;
+ }
+
+ case VIDIOC_SUBDEV_G_ROUTING:
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *routing = parg;
+
+ if (routing->num_routes > 256)
+ return -E2BIG;
+
+ *user_ptr = u64_to_user_ptr(routing->routes);
+ *kernel_ptr = (void **)&routing->routes;
+ *array_size = sizeof(struct v4l2_subdev_route)
+ * routing->num_routes;
+ ret = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static unsigned int video_translate_cmd(unsigned int cmd)
+{
+#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ switch (cmd) {
+ case VIDIOC_DQEVENT_TIME32:
+ return VIDIOC_DQEVENT;
+ case VIDIOC_QUERYBUF_TIME32:
+ return VIDIOC_QUERYBUF;
+ case VIDIOC_QBUF_TIME32:
+ return VIDIOC_QBUF;
+ case VIDIOC_DQBUF_TIME32:
+ return VIDIOC_DQBUF;
+ case VIDIOC_PREPARE_BUF_TIME32:
+ return VIDIOC_PREPARE_BUF;
+ }
+#endif
+ if (in_compat_syscall())
+ return v4l2_compat_translate_cmd(cmd);
+
+ return cmd;
+}
+
+static int video_get_user(void __user *arg, void *parg,
+ unsigned int real_cmd, unsigned int cmd,
+ bool *always_copy)
+{
+ unsigned int n = _IOC_SIZE(real_cmd);
+ int err = 0;
+
+ if (!(_IOC_DIR(cmd) & _IOC_WRITE)) {
+ /* read-only ioctl */
+ memset(parg, 0, n);
+ return 0;
+ }
+
+ /*
+ * In some cases, only a few fields are used as input,
+ * i.e. when the app sets "index" and then the driver
+ * fills in the rest of the structure for the thing
+ * with that index. We only need to copy up the first
+ * non-input field.
+ */
+ if (v4l2_is_known_ioctl(real_cmd)) {
+ u32 flags = v4l2_ioctls[_IOC_NR(real_cmd)].flags;
+
+ if (flags & INFO_FL_CLEAR_MASK)
+ n = (flags & INFO_FL_CLEAR_MASK) >> 16;
+ *always_copy = flags & INFO_FL_ALWAYS_COPY;
+ }
+
+ if (cmd == real_cmd) {
+ if (copy_from_user(parg, (void __user *)arg, n))
+ err = -EFAULT;
+ } else if (in_compat_syscall()) {
+ memset(parg, 0, n);
+ err = v4l2_compat_get_user(arg, parg, cmd);
+ } else {
+ memset(parg, 0, n);
+#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ switch (cmd) {
+ case VIDIOC_QUERYBUF_TIME32:
+ case VIDIOC_QBUF_TIME32:
+ case VIDIOC_DQBUF_TIME32:
+ case VIDIOC_PREPARE_BUF_TIME32: {
+ struct v4l2_buffer_time32 vb32;
+ struct v4l2_buffer *vb = parg;
+
+ if (copy_from_user(&vb32, arg, sizeof(vb32)))
+ return -EFAULT;
+
+ *vb = (struct v4l2_buffer) {
+ .index = vb32.index,
+ .type = vb32.type,
+ .bytesused = vb32.bytesused,
+ .flags = vb32.flags,
+ .field = vb32.field,
+ .timestamp.tv_sec = vb32.timestamp.tv_sec,
+ .timestamp.tv_usec = vb32.timestamp.tv_usec,
+ .timecode = vb32.timecode,
+ .sequence = vb32.sequence,
+ .memory = vb32.memory,
+ .m.userptr = vb32.m.userptr,
+ .length = vb32.length,
+ .request_fd = vb32.request_fd,
+ };
+ break;
+ }
+ }
+#endif
+ }
+
+ /* zero out anything we don't copy from userspace */
+ if (!err && n < _IOC_SIZE(real_cmd))
+ memset((u8 *)parg + n, 0, _IOC_SIZE(real_cmd) - n);
+ return err;
+}
+
+static int video_put_user(void __user *arg, void *parg,
+ unsigned int real_cmd, unsigned int cmd)
+{
+ if (!(_IOC_DIR(cmd) & _IOC_READ))
+ return 0;
+
+ if (cmd == real_cmd) {
+ /* Copy results into user buffer */
+ if (copy_to_user(arg, parg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+ return 0;
+ }
+
+ if (in_compat_syscall())
+ return v4l2_compat_put_user(arg, parg, cmd);
+
+#if !defined(CONFIG_64BIT) && defined(CONFIG_COMPAT_32BIT_TIME)
+ switch (cmd) {
+ case VIDIOC_DQEVENT_TIME32: {
+ struct v4l2_event *ev = parg;
+ struct v4l2_event_time32 ev32;
+
+ memset(&ev32, 0, sizeof(ev32));
+
+ ev32.type = ev->type;
+ ev32.pending = ev->pending;
+ ev32.sequence = ev->sequence;
+ ev32.timestamp.tv_sec = ev->timestamp.tv_sec;
+ ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec;
+ ev32.id = ev->id;
+
+ memcpy(&ev32.u, &ev->u, sizeof(ev->u));
+ memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved));
+
+ if (copy_to_user(arg, &ev32, sizeof(ev32)))
+ return -EFAULT;
+ break;
+ }
+ case VIDIOC_QUERYBUF_TIME32:
+ case VIDIOC_QBUF_TIME32:
+ case VIDIOC_DQBUF_TIME32:
+ case VIDIOC_PREPARE_BUF_TIME32: {
+ struct v4l2_buffer *vb = parg;
+ struct v4l2_buffer_time32 vb32;
+
+ memset(&vb32, 0, sizeof(vb32));
+
+ vb32.index = vb->index;
+ vb32.type = vb->type;
+ vb32.bytesused = vb->bytesused;
+ vb32.flags = vb->flags;
+ vb32.field = vb->field;
+ vb32.timestamp.tv_sec = vb->timestamp.tv_sec;
+ vb32.timestamp.tv_usec = vb->timestamp.tv_usec;
+ vb32.timecode = vb->timecode;
+ vb32.sequence = vb->sequence;
+ vb32.memory = vb->memory;
+ vb32.m.userptr = vb->m.userptr;
+ vb32.length = vb->length;
+ vb32.request_fd = vb->request_fd;
+
+ if (copy_to_user(arg, &vb32, sizeof(vb32)))
+ return -EFAULT;
+ break;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+long
+video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
+ v4l2_kioctl func)
+{
+ char sbuf[128];
+ void *mbuf = NULL, *array_buf = NULL;
+ void *parg = (void *)arg;
+ long err = -EINVAL;
+ bool has_array_args;
+ bool always_copy = false;
+ size_t array_size = 0;
+ void __user *user_ptr = NULL;
+ void **kernel_ptr = NULL;
+ unsigned int cmd = video_translate_cmd(orig_cmd);
+ const size_t ioc_size = _IOC_SIZE(cmd);
+
+ /* Copy arguments into temp kernel buffer */
+ if (_IOC_DIR(cmd) != _IOC_NONE) {
+ if (ioc_size <= sizeof(sbuf)) {
+ parg = sbuf;
+ } else {
+ /* too big to allocate from stack */
+ mbuf = kmalloc(ioc_size, GFP_KERNEL);
+ if (NULL == mbuf)
+ return -ENOMEM;
+ parg = mbuf;
+ }
+
+ err = video_get_user((void __user *)arg, parg, cmd,
+ orig_cmd, &always_copy);
+ if (err)
+ goto out;
+ }
+
+ err = check_array_args(cmd, parg, &array_size, &user_ptr, &kernel_ptr);
+ if (err < 0)
+ goto out;
+ has_array_args = err;
+
+ if (has_array_args) {
+ array_buf = kvmalloc(array_size, GFP_KERNEL);
+ err = -ENOMEM;
+ if (array_buf == NULL)
+ goto out;
+ if (in_compat_syscall())
+ err = v4l2_compat_get_array_args(file, array_buf,
+ user_ptr, array_size,
+ orig_cmd, parg);
+ else
+ err = copy_from_user(array_buf, user_ptr, array_size) ?
+ -EFAULT : 0;
+ if (err)
+ goto out;
+ *kernel_ptr = array_buf;
+ }
+
+ /* Handles IOCTL */
+ err = func(file, cmd, parg);
+ if (err == -ENOTTY || err == -ENOIOCTLCMD) {
+ err = -ENOTTY;
+ goto out;
+ }
+
+ if (err == 0) {
+ if (cmd == VIDIOC_DQBUF)
+ trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
+ else if (cmd == VIDIOC_QBUF)
+ trace_v4l2_qbuf(video_devdata(file)->minor, parg);
+ }
+
+ /*
+ * Some ioctls can return an error, but still have valid
+ * results that must be returned.
+ *
+ * FIXME: subdev IOCTLS are partially handled here and partially in
+ * v4l2-subdev.c and the 'always_copy' flag can only be set for IOCTLS
+ * defined here as part of the 'v4l2_ioctls' array. As
+ * VIDIOC_SUBDEV_G_ROUTING needs to return results to applications even
+ * in case of failure, but it is not defined here as part of the
+ * 'v4l2_ioctls' array, insert an ad-hoc check to address that.
+ */
+ if (err < 0 && !always_copy && cmd != VIDIOC_SUBDEV_G_ROUTING)
+ goto out;
+
+ if (has_array_args) {
+ *kernel_ptr = (void __force *)user_ptr;
+ if (in_compat_syscall()) {
+ int put_err;
+
+ put_err = v4l2_compat_put_array_args(file, user_ptr,
+ array_buf,
+ array_size,
+ orig_cmd, parg);
+ if (put_err)
+ err = put_err;
+ } else if (copy_to_user(user_ptr, array_buf, array_size)) {
+ err = -EFAULT;
+ }
+ }
+
+ if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
+ err = -EFAULT;
+out:
+ kvfree(array_buf);
+ kfree(mbuf);
+ return err;
+}
+
+long video_ioctl2(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, __video_do_ioctl);
+}
+EXPORT_SYMBOL(video_ioctl2);
diff --git a/drivers/media/v4l2-core/v4l2-jpeg.c b/drivers/media/v4l2-core/v4l2-jpeg.c
new file mode 100644
index 0000000000..94435a7b68
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-jpeg.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * V4L2 JPEG header parser helpers.
+ *
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ *
+ * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1]
+ *
+ * [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf
+ */
+
+#include <asm/unaligned.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <media/v4l2-jpeg.h>
+
+MODULE_DESCRIPTION("V4L2 JPEG header parser helpers");
+MODULE_AUTHOR("Philipp Zabel <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL");
+
+/* Table B.1 - Marker code assignments */
+#define SOF0 0xffc0 /* start of frame */
+#define SOF1 0xffc1
+#define SOF2 0xffc2
+#define SOF3 0xffc3
+#define SOF5 0xffc5
+#define SOF7 0xffc7
+#define JPG 0xffc8 /* extensions */
+#define SOF9 0xffc9
+#define SOF11 0xffcb
+#define SOF13 0xffcd
+#define SOF15 0xffcf
+#define DHT 0xffc4 /* huffman table */
+#define DAC 0xffcc /* arithmetic coding conditioning */
+#define RST0 0xffd0 /* restart */
+#define RST7 0xffd7
+#define SOI 0xffd8 /* start of image */
+#define EOI 0xffd9 /* end of image */
+#define SOS 0xffda /* start of stream */
+#define DQT 0xffdb /* quantization table */
+#define DNL 0xffdc /* number of lines */
+#define DRI 0xffdd /* restart interval */
+#define DHP 0xffde /* hierarchical progression */
+#define EXP 0xffdf /* expand reference */
+#define APP0 0xffe0 /* application data */
+#define APP14 0xffee /* application data for colour encoding */
+#define APP15 0xffef
+#define JPG0 0xfff0 /* extensions */
+#define JPG13 0xfffd
+#define COM 0xfffe /* comment */
+#define TEM 0xff01 /* temporary */
+
+/**
+ * struct jpeg_stream - JPEG byte stream
+ * @curr: current position in stream
+ * @end: end position, after last byte
+ */
+struct jpeg_stream {
+ u8 *curr;
+ u8 *end;
+};
+
+/* returns a value that fits into u8, or negative error */
+static int jpeg_get_byte(struct jpeg_stream *stream)
+{
+ if (stream->curr >= stream->end)
+ return -EINVAL;
+
+ return *stream->curr++;
+}
+
+/* returns a value that fits into u16, or negative error */
+static int jpeg_get_word_be(struct jpeg_stream *stream)
+{
+ u16 word;
+
+ if (stream->curr + sizeof(__be16) > stream->end)
+ return -EINVAL;
+
+ word = get_unaligned_be16(stream->curr);
+ stream->curr += sizeof(__be16);
+
+ return word;
+}
+
+static int jpeg_skip(struct jpeg_stream *stream, size_t len)
+{
+ if (stream->curr + len > stream->end)
+ return -EINVAL;
+
+ stream->curr += len;
+
+ return 0;
+}
+
+static int jpeg_next_marker(struct jpeg_stream *stream)
+{
+ int byte;
+ u16 marker = 0;
+
+ while ((byte = jpeg_get_byte(stream)) >= 0) {
+ marker = (marker << 8) | byte;
+ /* skip stuffing bytes and REServed markers */
+ if (marker == TEM || (marker > 0xffbf && marker < 0xffff))
+ return marker;
+ }
+
+ return byte;
+}
+
+/* this does not advance the current position in the stream */
+static int jpeg_reference_segment(struct jpeg_stream *stream,
+ struct v4l2_jpeg_reference *segment)
+{
+ u16 len;
+
+ if (stream->curr + sizeof(__be16) > stream->end)
+ return -EINVAL;
+
+ len = get_unaligned_be16(stream->curr);
+ if (stream->curr + len > stream->end)
+ return -EINVAL;
+
+ segment->start = stream->curr;
+ segment->length = len;
+
+ return 0;
+}
+
+static int v4l2_jpeg_decode_subsampling(u8 nf, u8 h_v)
+{
+ if (nf == 1)
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
+
+ /* no chroma subsampling for 4-component images */
+ if (nf == 4 && h_v != 0x11)
+ return -EINVAL;
+
+ switch (h_v) {
+ case 0x11:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_444;
+ case 0x21:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_422;
+ case 0x22:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
+ case 0x41:
+ return V4L2_JPEG_CHROMA_SUBSAMPLING_411;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int jpeg_parse_frame_header(struct jpeg_stream *stream, u16 sof_marker,
+ struct v4l2_jpeg_frame_header *frame_header)
+{
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Lf = 8 + 3 * Nf, Nf >= 1 */
+ if (len < 8 + 3)
+ return -EINVAL;
+
+ if (frame_header) {
+ /* Table B.2 - Frame header parameter sizes and values */
+ int p, y, x, nf;
+ int i;
+
+ p = jpeg_get_byte(stream);
+ if (p < 0)
+ return p;
+ /*
+ * Baseline DCT only supports 8-bit precision.
+ * Extended sequential DCT also supports 12-bit precision.
+ */
+ if (p != 8 && (p != 12 || sof_marker != SOF1))
+ return -EINVAL;
+
+ y = jpeg_get_word_be(stream);
+ if (y < 0)
+ return y;
+ if (y == 0)
+ return -EINVAL;
+
+ x = jpeg_get_word_be(stream);
+ if (x < 0)
+ return x;
+ if (x == 0)
+ return -EINVAL;
+
+ nf = jpeg_get_byte(stream);
+ if (nf < 0)
+ return nf;
+ /*
+ * The spec allows 1 <= Nf <= 255, but we only support up to 4
+ * components.
+ */
+ if (nf < 1 || nf > V4L2_JPEG_MAX_COMPONENTS)
+ return -EINVAL;
+ if (len != 8 + 3 * nf)
+ return -EINVAL;
+
+ frame_header->precision = p;
+ frame_header->height = y;
+ frame_header->width = x;
+ frame_header->num_components = nf;
+
+ for (i = 0; i < nf; i++) {
+ struct v4l2_jpeg_frame_component_spec *component;
+ int c, h_v, tq;
+
+ c = jpeg_get_byte(stream);
+ if (c < 0)
+ return c;
+
+ h_v = jpeg_get_byte(stream);
+ if (h_v < 0)
+ return h_v;
+ if (i == 0) {
+ int subs;
+
+ subs = v4l2_jpeg_decode_subsampling(nf, h_v);
+ if (subs < 0)
+ return subs;
+ frame_header->subsampling = subs;
+ } else if (h_v != 0x11) {
+ /* all chroma sampling factors must be 1 */
+ return -EINVAL;
+ }
+
+ tq = jpeg_get_byte(stream);
+ if (tq < 0)
+ return tq;
+
+ component = &frame_header->component[i];
+ component->component_identifier = c;
+ component->horizontal_sampling_factor =
+ (h_v >> 4) & 0xf;
+ component->vertical_sampling_factor = h_v & 0xf;
+ component->quantization_table_selector = tq;
+ }
+ } else {
+ return jpeg_skip(stream, len - 2);
+ }
+
+ return 0;
+}
+
+static int jpeg_parse_scan_header(struct jpeg_stream *stream,
+ struct v4l2_jpeg_scan_header *scan_header)
+{
+ size_t skip;
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Ls = 8 + 3 * Ns, Ns >= 1 */
+ if (len < 6 + 2)
+ return -EINVAL;
+
+ if (scan_header) {
+ int ns;
+ int i;
+
+ ns = jpeg_get_byte(stream);
+ if (ns < 0)
+ return ns;
+ if (ns < 1 || ns > 4 || len != 6 + 2 * ns)
+ return -EINVAL;
+
+ scan_header->num_components = ns;
+
+ for (i = 0; i < ns; i++) {
+ struct v4l2_jpeg_scan_component_spec *component;
+ int cs, td_ta;
+
+ cs = jpeg_get_byte(stream);
+ if (cs < 0)
+ return cs;
+
+ td_ta = jpeg_get_byte(stream);
+ if (td_ta < 0)
+ return td_ta;
+
+ component = &scan_header->component[i];
+ component->component_selector = cs;
+ component->dc_entropy_coding_table_selector =
+ (td_ta >> 4) & 0xf;
+ component->ac_entropy_coding_table_selector =
+ td_ta & 0xf;
+ }
+
+ skip = 3; /* skip Ss, Se, Ah, and Al */
+ } else {
+ skip = len - 2;
+ }
+
+ return jpeg_skip(stream, skip);
+}
+
+/* B.2.4.1 Quantization table-specification syntax */
+static int jpeg_parse_quantization_tables(struct jpeg_stream *stream,
+ u8 precision,
+ struct v4l2_jpeg_reference *tables)
+{
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Lq = 2 + n * 65 (for baseline DCT), n >= 1 */
+ if (len < 2 + 65)
+ return -EINVAL;
+
+ len -= 2;
+ while (len >= 65) {
+ u8 pq, tq, *qk;
+ int ret;
+ int pq_tq = jpeg_get_byte(stream);
+
+ if (pq_tq < 0)
+ return pq_tq;
+
+ /* quantization table element precision */
+ pq = (pq_tq >> 4) & 0xf;
+ /*
+ * Only 8-bit Qk values for 8-bit sample precision. Extended
+ * sequential DCT with 12-bit sample precision also supports
+ * 16-bit Qk values.
+ */
+ if (pq != 0 && (pq != 1 || precision != 12))
+ return -EINVAL;
+
+ /* quantization table destination identifier */
+ tq = pq_tq & 0xf;
+ if (tq > 3)
+ return -EINVAL;
+
+ /* quantization table element */
+ qk = stream->curr;
+ ret = jpeg_skip(stream, pq ? 128 : 64);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (tables) {
+ tables[tq].start = qk;
+ tables[tq].length = pq ? 128 : 64;
+ }
+
+ len -= pq ? 129 : 65;
+ }
+
+ return 0;
+}
+
+/* B.2.4.2 Huffman table-specification syntax */
+static int jpeg_parse_huffman_tables(struct jpeg_stream *stream,
+ struct v4l2_jpeg_reference *tables)
+{
+ int mt;
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ /* Table B.5 - Huffman table specification parameter sizes and values */
+ if (len < 2 + 17)
+ return -EINVAL;
+
+ for (len -= 2; len >= 17; len -= 17 + mt) {
+ u8 tc, th, *table;
+ int tc_th = jpeg_get_byte(stream);
+ int i, ret;
+
+ if (tc_th < 0)
+ return tc_th;
+
+ /* table class - 0 = DC, 1 = AC */
+ tc = (tc_th >> 4) & 0xf;
+ if (tc > 1)
+ return -EINVAL;
+
+ /* huffman table destination identifier */
+ th = tc_th & 0xf;
+ /* only two Huffman tables for baseline DCT */
+ if (th > 1)
+ return -EINVAL;
+
+ /* BITS - number of Huffman codes with length i */
+ table = stream->curr;
+ mt = 0;
+ for (i = 0; i < 16; i++) {
+ int li;
+
+ li = jpeg_get_byte(stream);
+ if (li < 0)
+ return li;
+
+ mt += li;
+ }
+ /* HUFFVAL - values associated with each Huffman code */
+ ret = jpeg_skip(stream, mt);
+ if (ret < 0)
+ return ret;
+
+ if (tables) {
+ tables[(tc << 1) | th].start = table;
+ tables[(tc << 1) | th].length = stream->curr - table;
+ }
+ }
+
+ return jpeg_skip(stream, len - 2);
+}
+
+/* B.2.4.4 Restart interval definition syntax */
+static int jpeg_parse_restart_interval(struct jpeg_stream *stream,
+ u16 *restart_interval)
+{
+ int len = jpeg_get_word_be(stream);
+ int ri;
+
+ if (len < 0)
+ return len;
+ if (len != 4)
+ return -EINVAL;
+
+ ri = jpeg_get_word_be(stream);
+ if (ri < 0)
+ return ri;
+
+ *restart_interval = ri;
+
+ return 0;
+}
+
+static int jpeg_skip_segment(struct jpeg_stream *stream)
+{
+ int len = jpeg_get_word_be(stream);
+
+ if (len < 0)
+ return len;
+ if (len < 2)
+ return -EINVAL;
+
+ return jpeg_skip(stream, len - 2);
+}
+
+/* Rec. ITU-T T.872 (06/2012) 6.5.3 */
+static int jpeg_parse_app14_data(struct jpeg_stream *stream,
+ enum v4l2_jpeg_app14_tf *tf)
+{
+ int ret;
+ int lp;
+ int skip;
+
+ lp = jpeg_get_word_be(stream);
+ if (lp < 0)
+ return lp;
+
+ /* Check for "Adobe\0" in Ap1..6 */
+ if (stream->curr + 6 > stream->end ||
+ strncmp(stream->curr, "Adobe\0", 6))
+ return jpeg_skip(stream, lp - 2);
+
+ /* get to Ap12 */
+ ret = jpeg_skip(stream, 11);
+ if (ret < 0)
+ return ret;
+
+ ret = jpeg_get_byte(stream);
+ if (ret < 0)
+ return ret;
+
+ *tf = ret;
+
+ /* skip the rest of the segment, this ensures at least it is complete */
+ skip = lp - 2 - 11 - 1;
+ return jpeg_skip(stream, skip);
+}
+
+/**
+ * v4l2_jpeg_parse_header - locate marker segments and optionally parse headers
+ * @buf: address of the JPEG buffer, should start with a SOI marker
+ * @len: length of the JPEG buffer
+ * @out: returns marker segment positions and optionally parsed headers
+ *
+ * The out->scan_header pointer must be initialized to NULL or point to a valid
+ * v4l2_jpeg_scan_header structure. The out->huffman_tables and
+ * out->quantization_tables pointers must be initialized to NULL or point to a
+ * valid array of 4 v4l2_jpeg_reference structures each.
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out)
+{
+ struct jpeg_stream stream;
+ int marker;
+ int ret = 0;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+
+ out->num_dht = 0;
+ out->num_dqt = 0;
+
+ /* the first bytes must be SOI, B.2.1 High-level syntax */
+ if (jpeg_get_word_be(&stream) != SOI)
+ return -EINVAL;
+
+ /* init value to signal if this marker is not present */
+ out->app14_tf = V4L2_JPEG_APP14_TF_UNKNOWN;
+
+ /* loop through marker segments */
+ while ((marker = jpeg_next_marker(&stream)) >= 0) {
+ switch (marker) {
+ /* baseline DCT, extended sequential DCT */
+ case SOF0 ... SOF1:
+ ret = jpeg_reference_segment(&stream, &out->sof);
+ if (ret < 0)
+ return ret;
+ ret = jpeg_parse_frame_header(&stream, marker,
+ &out->frame);
+ break;
+ /* progressive, lossless */
+ case SOF2 ... SOF3:
+ /* differential coding */
+ case SOF5 ... SOF7:
+ /* arithmetic coding */
+ case SOF9 ... SOF11:
+ case SOF13 ... SOF15:
+ case DAC:
+ case TEM:
+ return -EINVAL;
+
+ case DHT:
+ ret = jpeg_reference_segment(&stream,
+ &out->dht[out->num_dht++ % 4]);
+ if (ret < 0)
+ return ret;
+ if (!out->huffman_tables) {
+ ret = jpeg_skip_segment(&stream);
+ break;
+ }
+ ret = jpeg_parse_huffman_tables(&stream,
+ out->huffman_tables);
+ break;
+ case DQT:
+ ret = jpeg_reference_segment(&stream,
+ &out->dqt[out->num_dqt++ % 4]);
+ if (ret < 0)
+ return ret;
+ if (!out->quantization_tables) {
+ ret = jpeg_skip_segment(&stream);
+ break;
+ }
+ ret = jpeg_parse_quantization_tables(&stream,
+ out->frame.precision,
+ out->quantization_tables);
+ break;
+ case DRI:
+ ret = jpeg_parse_restart_interval(&stream,
+ &out->restart_interval);
+ break;
+ case APP14:
+ ret = jpeg_parse_app14_data(&stream,
+ &out->app14_tf);
+ break;
+ case SOS:
+ ret = jpeg_reference_segment(&stream, &out->sos);
+ if (ret < 0)
+ return ret;
+ ret = jpeg_parse_scan_header(&stream, out->scan);
+ /*
+ * stop parsing, the scan header marks the beginning of
+ * the entropy coded segment
+ */
+ out->ecs_offset = stream.curr - (u8 *)buf;
+ return ret;
+
+ /* markers without parameters */
+ case RST0 ... RST7: /* restart */
+ case SOI: /* start of image */
+ case EOI: /* end of image */
+ break;
+
+ /* skip unknown or unsupported marker segments */
+ default:
+ ret = jpeg_skip_segment(&stream);
+ break;
+ }
+ if (ret < 0)
+ return ret;
+ }
+
+ return marker;
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_header);
+
+/**
+ * v4l2_jpeg_parse_frame_header - parse frame header
+ * @buf: address of the frame header, after the SOF0 marker
+ * @len: length of the frame header
+ * @frame_header: returns the parsed frame header
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_frame_header(void *buf, size_t len,
+ struct v4l2_jpeg_frame_header *frame_header)
+{
+ struct jpeg_stream stream;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+ return jpeg_parse_frame_header(&stream, SOF0, frame_header);
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_frame_header);
+
+/**
+ * v4l2_jpeg_parse_scan_header - parse scan header
+ * @buf: address of the scan header, after the SOS marker
+ * @len: length of the scan header
+ * @scan_header: returns the parsed scan header
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_scan_header(void *buf, size_t len,
+ struct v4l2_jpeg_scan_header *scan_header)
+{
+ struct jpeg_stream stream;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+ return jpeg_parse_scan_header(&stream, scan_header);
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_scan_header);
+
+/**
+ * v4l2_jpeg_parse_quantization_tables - parse quantization tables segment
+ * @buf: address of the quantization table segment, after the DQT marker
+ * @len: length of the quantization table segment
+ * @precision: sample precision (P) in bits per component
+ * @q_tables: returns four references into the buffer for the
+ * four possible quantization table destinations
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision,
+ struct v4l2_jpeg_reference *q_tables)
+{
+ struct jpeg_stream stream;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+ return jpeg_parse_quantization_tables(&stream, precision, q_tables);
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_quantization_tables);
+
+/**
+ * v4l2_jpeg_parse_huffman_tables - parse huffman tables segment
+ * @buf: address of the Huffman table segment, after the DHT marker
+ * @len: length of the Huffman table segment
+ * @huffman_tables: returns four references into the buffer for the
+ * four possible Huffman table destinations, in
+ * the order DC0, DC1, AC0, AC1
+ *
+ * Returns 0 or negative error if parsing failed.
+ */
+int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len,
+ struct v4l2_jpeg_reference *huffman_tables)
+{
+ struct jpeg_stream stream;
+
+ stream.curr = buf;
+ stream.end = stream.curr + len;
+ return jpeg_parse_huffman_tables(&stream, huffman_tables);
+}
+EXPORT_SYMBOL_GPL(v4l2_jpeg_parse_huffman_tables);
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
new file mode 100644
index 0000000000..52d349e72b
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/*
+ * Media Controller ancillary functions
+ *
+ * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
+ * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
+ * Copyright (C) 2006-2010 Nokia Corporation
+ * Copyright (c) 2016 Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-core.h>
+
+int v4l2_mc_create_media_graph(struct media_device *mdev)
+
+{
+ struct media_entity *entity;
+ struct media_entity *if_vid = NULL, *if_aud = NULL;
+ struct media_entity *tuner = NULL, *decoder = NULL;
+ struct media_entity *io_v4l = NULL, *io_vbi = NULL, *io_swradio = NULL;
+ bool is_webcam = false;
+ u32 flags;
+ int ret, pad_sink, pad_source;
+
+ if (!mdev)
+ return 0;
+
+ media_device_for_each_entity(entity, mdev) {
+ switch (entity->function) {
+ case MEDIA_ENT_F_IF_VID_DECODER:
+ if_vid = entity;
+ break;
+ case MEDIA_ENT_F_IF_AUD_DECODER:
+ if_aud = entity;
+ break;
+ case MEDIA_ENT_F_TUNER:
+ tuner = entity;
+ break;
+ case MEDIA_ENT_F_ATV_DECODER:
+ decoder = entity;
+ break;
+ case MEDIA_ENT_F_IO_V4L:
+ io_v4l = entity;
+ break;
+ case MEDIA_ENT_F_IO_VBI:
+ io_vbi = entity;
+ break;
+ case MEDIA_ENT_F_IO_SWRADIO:
+ io_swradio = entity;
+ break;
+ case MEDIA_ENT_F_CAM_SENSOR:
+ is_webcam = true;
+ break;
+ }
+ }
+
+ /* It should have at least one I/O entity */
+ if (!io_v4l && !io_vbi && !io_swradio) {
+ dev_warn(mdev->dev, "Didn't find any I/O entity\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Here, webcams are modelled on a very simple way: the sensor is
+ * connected directly to the I/O entity. All dirty details, like
+ * scaler and crop HW are hidden. While such mapping is not enough
+ * for mc-centric hardware, it is enough for v4l2 interface centric
+ * PC-consumer's hardware.
+ */
+ if (is_webcam) {
+ if (!io_v4l) {
+ dev_warn(mdev->dev, "Didn't find a MEDIA_ENT_F_IO_V4L\n");
+ return -EINVAL;
+ }
+
+ media_device_for_each_entity(entity, mdev) {
+ if (entity->function != MEDIA_ENT_F_CAM_SENSOR)
+ continue;
+ ret = media_create_pad_link(entity, 0,
+ io_v4l, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "Failed to create a sensor link\n");
+ return ret;
+ }
+ }
+ if (!decoder)
+ return 0;
+ }
+
+ /* The device isn't a webcam. So, it should have a decoder */
+ if (!decoder) {
+ dev_warn(mdev->dev, "Decoder not found\n");
+ return -EINVAL;
+ }
+
+ /* Link the tuner and IF video output pads */
+ if (tuner) {
+ if (if_vid) {
+ pad_source = media_get_pad_index(tuner,
+ MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_ANALOG);
+ pad_sink = media_get_pad_index(if_vid,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "Couldn't get tuner and/or PLL pad(s): (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(tuner, pad_source,
+ if_vid, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "Couldn't create tuner->PLL link)\n");
+ return ret;
+ }
+
+ pad_source = media_get_pad_index(if_vid,
+ MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_ANALOG);
+ pad_sink = media_get_pad_index(decoder,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "get decoder and/or PLL pad(s): (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(if_vid, pad_source,
+ decoder, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link PLL to decoder\n");
+ return ret;
+ }
+ } else {
+ pad_source = media_get_pad_index(tuner,
+ MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_ANALOG);
+ pad_sink = media_get_pad_index(decoder,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s): (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(tuner, pad_source,
+ decoder, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+ }
+
+ if (if_aud) {
+ pad_source = media_get_pad_index(tuner,
+ MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_AUDIO);
+ pad_sink = media_get_pad_index(if_aud,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_AUDIO);
+ if (pad_source < 0 || pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner and/or decoder pad(s) for audio: (%d, %d)\n",
+ pad_source, pad_sink);
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(tuner, pad_source,
+ if_aud, pad_sink,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link tuner->audio PLL\n");
+ return ret;
+ }
+ } else {
+ if_aud = tuner;
+ }
+
+ }
+
+ /* Create demod to V4L, VBI and SDR radio links */
+ if (io_v4l) {
+ pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_DV);
+ if (pad_source < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder output pad for V4L I/O\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(decoder, pad_source,
+ io_v4l, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link decoder output to V4L I/O\n");
+ return ret;
+ }
+ }
+
+ if (io_swradio) {
+ pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_DV);
+ if (pad_source < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder output pad for SDR\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(decoder, pad_source,
+ io_swradio, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link decoder output to SDR\n");
+ return ret;
+ }
+ }
+
+ if (io_vbi) {
+ pad_source = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE,
+ PAD_SIGNAL_DV);
+ if (pad_source < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder output pad for VBI\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(decoder, pad_source,
+ io_vbi, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_warn(mdev->dev, "couldn't link decoder output to VBI\n");
+ return ret;
+ }
+ }
+
+ /* Create links for the media connectors */
+ flags = MEDIA_LNK_FL_ENABLED;
+ media_device_for_each_entity(entity, mdev) {
+ switch (entity->function) {
+ case MEDIA_ENT_F_CONN_RF:
+ if (!tuner)
+ continue;
+ pad_sink = media_get_pad_index(tuner, MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get tuner analog pad sink\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(entity, 0, tuner,
+ pad_sink,
+ flags);
+ break;
+ case MEDIA_ENT_F_CONN_SVIDEO:
+ case MEDIA_ENT_F_CONN_COMPOSITE:
+ pad_sink = media_get_pad_index(decoder,
+ MEDIA_PAD_FL_SINK,
+ PAD_SIGNAL_ANALOG);
+ if (pad_sink < 0) {
+ dev_warn(mdev->dev, "couldn't get decoder analog pad sink\n");
+ return -EINVAL;
+ }
+ ret = media_create_pad_link(entity, 0, decoder,
+ pad_sink,
+ flags);
+ break;
+ default:
+ continue;
+ }
+ if (ret)
+ return ret;
+
+ flags = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph);
+
+int v4l_enable_media_source(struct video_device *vdev)
+{
+ struct media_device *mdev = vdev->entity.graph_obj.mdev;
+ int ret = 0, err;
+
+ if (!mdev)
+ return 0;
+
+ mutex_lock(&mdev->graph_mutex);
+ if (!mdev->enable_source)
+ goto end;
+ err = mdev->enable_source(&vdev->entity, &vdev->pipe);
+ if (err)
+ ret = -EBUSY;
+end:
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l_enable_media_source);
+
+void v4l_disable_media_source(struct video_device *vdev)
+{
+ struct media_device *mdev = vdev->entity.graph_obj.mdev;
+
+ if (mdev) {
+ mutex_lock(&mdev->graph_mutex);
+ if (mdev->disable_source)
+ mdev->disable_source(&vdev->entity);
+ mutex_unlock(&mdev->graph_mutex);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l_disable_media_source);
+
+int v4l_vb2q_enable_media_source(struct vb2_queue *q)
+{
+ struct v4l2_fh *fh = q->owner;
+
+ if (fh && fh->vdev)
+ return v4l_enable_media_source(fh->vdev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source);
+
+int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
+ struct media_pad *sink, u32 flags)
+{
+ struct fwnode_handle *endpoint;
+
+ if (!(sink->flags & MEDIA_PAD_FL_SINK))
+ return -EINVAL;
+
+ fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
+ struct fwnode_handle *remote_ep;
+ int src_idx, sink_idx, ret;
+ struct media_pad *src;
+
+ src_idx = media_entity_get_fwnode_pad(&src_sd->entity,
+ endpoint,
+ MEDIA_PAD_FL_SOURCE);
+ if (src_idx < 0)
+ continue;
+
+ remote_ep = fwnode_graph_get_remote_endpoint(endpoint);
+ if (!remote_ep)
+ continue;
+
+ /*
+ * ask the sink to verify it owns the remote endpoint,
+ * and translate to a sink pad.
+ */
+ sink_idx = media_entity_get_fwnode_pad(sink->entity,
+ remote_ep,
+ MEDIA_PAD_FL_SINK);
+ fwnode_handle_put(remote_ep);
+
+ if (sink_idx < 0 || sink_idx != sink->index)
+ continue;
+
+ /*
+ * the source endpoint corresponds to one of its source pads,
+ * the source endpoint connects to an endpoint at the sink
+ * entity, and the sink endpoint corresponds to the sink
+ * pad requested, so we have found an endpoint connection
+ * that works, create the media link for it.
+ */
+
+ src = &src_sd->entity.pads[src_idx];
+
+ /* skip if link already exists */
+ if (media_entity_find_link(src, sink))
+ continue;
+
+ dev_dbg(src_sd->dev, "creating link %s:%d -> %s:%d\n",
+ src_sd->entity.name, src_idx,
+ sink->entity->name, sink_idx);
+
+ ret = media_create_pad_link(&src_sd->entity, src_idx,
+ sink->entity, sink_idx, flags);
+ if (ret) {
+ dev_err(src_sd->dev,
+ "link %s:%d -> %s:%d failed with %d\n",
+ src_sd->entity.name, src_idx,
+ sink->entity->name, sink_idx, ret);
+
+ fwnode_handle_put(endpoint);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links_to_pad);
+
+int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd,
+ struct v4l2_subdev *sink_sd)
+{
+ unsigned int i;
+
+ for (i = 0; i < sink_sd->entity.num_pads; i++) {
+ struct media_pad *pad = &sink_sd->entity.pads[i];
+ int ret;
+
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ ret = v4l2_create_fwnode_links_to_pad(src_sd, pad, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_create_fwnode_links);
+
+/* -----------------------------------------------------------------------------
+ * Pipeline power management
+ *
+ * Entities must be powered up when part of a pipeline that contains at least
+ * one open video device node.
+ *
+ * To achieve this use the entity use_count field to track the number of users.
+ * For entities corresponding to video device nodes the use_count field stores
+ * the users count of the node. For entities corresponding to subdevs the
+ * use_count field stores the total number of users of all video device nodes
+ * in the pipeline.
+ *
+ * The v4l2_pipeline_pm_{get, put}() functions must be called in the open() and
+ * close() handlers of video device nodes. It increments or decrements the use
+ * count of all subdev entities in the pipeline.
+ *
+ * To react to link management on powered pipelines, the link setup notification
+ * callback updates the use count of all entities in the source and sink sides
+ * of the link.
+ */
+
+/*
+ * pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int pipeline_pm_use_count(struct media_entity *entity,
+ struct media_graph *graph)
+{
+ int use = 0;
+
+ media_graph_walk_start(graph, entity);
+
+ while ((entity = media_graph_walk_next(graph))) {
+ if (is_media_entity_v4l2_video_device(entity))
+ use += entity->use_count;
+ }
+
+ return use;
+}
+
+/*
+ * pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = is_media_entity_v4l2_subdev(entity)
+ ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+ if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ if (entity->use_count == 0 && change < 0 && subdev != NULL)
+ v4l2_subdev_call(subdev, core, s_power, 0);
+
+ return 0;
+}
+
+/*
+ * pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int pipeline_pm_power(struct media_entity *entity, int change,
+ struct media_graph *graph)
+{
+ struct media_entity *first = entity;
+ int ret = 0;
+
+ if (!change)
+ return 0;
+
+ media_graph_walk_start(graph, entity);
+
+ while (!ret && (entity = media_graph_walk_next(graph)))
+ if (is_media_entity_v4l2_subdev(entity))
+ ret = pipeline_pm_power_one(entity, change);
+
+ if (!ret)
+ return ret;
+
+ media_graph_walk_start(graph, first);
+
+ while ((first = media_graph_walk_next(graph))
+ && first != entity)
+ if (is_media_entity_v4l2_subdev(first))
+ pipeline_pm_power_one(first, -change);
+
+ return ret;
+}
+
+static int v4l2_pipeline_pm_use(struct media_entity *entity, unsigned int use)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ int change = use ? 1 : -1;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Apply use count to node. */
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ /* Apply power change to connected non-nodes. */
+ ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk);
+ if (ret < 0)
+ entity->use_count -= change;
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ return ret;
+}
+
+int v4l2_pipeline_pm_get(struct media_entity *entity)
+{
+ return v4l2_pipeline_pm_use(entity, 1);
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_get);
+
+void v4l2_pipeline_pm_put(struct media_entity *entity)
+{
+ /* Powering off entities shouldn't fail. */
+ WARN_ON(v4l2_pipeline_pm_use(entity, 0));
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_put);
+
+int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct media_graph *graph = &link->graph_obj.mdev->pm_count_walk;
+ struct media_entity *source = link->source->entity;
+ struct media_entity *sink = link->sink->entity;
+ int source_use;
+ int sink_use;
+ int ret = 0;
+
+ source_use = pipeline_pm_use_count(source, graph);
+ sink_use = pipeline_pm_use_count(sink, graph);
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ /* Powering off entities is assumed to never fail. */
+ pipeline_pm_power(source, -sink_use, graph);
+ pipeline_pm_power(sink, -source_use, graph);
+ return 0;
+ }
+
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+
+ ret = pipeline_pm_power(source, sink_use, graph);
+ if (ret < 0)
+ return ret;
+
+ ret = pipeline_pm_power(sink, source_use, graph);
+ if (ret < 0)
+ pipeline_pm_power(source, -sink_use, graph);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_link_notify);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
new file mode 100644
index 0000000000..0cc30397fb
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -0,0 +1,1619 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Memory-to-memory device framework for Video for Linux 2 and vb2.
+ *
+ * Helper functions for devices that use vb2 buffers for both their
+ * source and destination.
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/media-device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+
+MODULE_DESCRIPTION("Mem to mem device framework for vb2");
+MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
+
+static bool debug;
+module_param(debug, bool, 0644);
+
+#define dprintk(fmt, arg...) \
+ do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
+ } while (0)
+
+
+/* Instance is already queued on the job_queue */
+#define TRANS_QUEUED (1 << 0)
+/* Instance is currently running in hardware */
+#define TRANS_RUNNING (1 << 1)
+/* Instance is currently aborting */
+#define TRANS_ABORT (1 << 2)
+
+
+/* The job queue is not running new jobs */
+#define QUEUE_PAUSED (1 << 0)
+
+
+/* Offset base for buffers on the destination queue - used to distinguish
+ * between source and destination buffers when mmapping - they receive the same
+ * offsets but for different queues */
+#define DST_QUEUE_OFF_BASE (1 << 30)
+
+enum v4l2_m2m_entity_type {
+ MEM2MEM_ENT_TYPE_SOURCE,
+ MEM2MEM_ENT_TYPE_SINK,
+ MEM2MEM_ENT_TYPE_PROC
+};
+
+static const char * const m2m_entity_name[] = {
+ "source",
+ "sink",
+ "proc"
+};
+
+/**
+ * struct v4l2_m2m_dev - per-device context
+ * @source: &struct media_entity pointer with the source entity
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_register_media_controller().
+ * @source_pad: &struct media_pad with the source pad.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_register_media_controller().
+ * @sink: &struct media_entity pointer with the sink entity
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_register_media_controller().
+ * @sink_pad: &struct media_pad with the sink pad.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_register_media_controller().
+ * @proc: &struct media_entity pointer with the M2M device itself.
+ * @proc_pads: &struct media_pad with the @proc pads.
+ * Used only when the M2M device is registered via
+ * v4l2_m2m_unregister_media_controller().
+ * @intf_devnode: &struct media_intf devnode pointer with the interface
+ * with controls the M2M device.
+ * @curr_ctx: currently running instance
+ * @job_queue: instances queued to run
+ * @job_spinlock: protects job_queue
+ * @job_work: worker to run queued jobs.
+ * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED.
+ * @m2m_ops: driver callbacks
+ */
+struct v4l2_m2m_dev {
+ struct v4l2_m2m_ctx *curr_ctx;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_entity *source;
+ struct media_pad source_pad;
+ struct media_entity sink;
+ struct media_pad sink_pad;
+ struct media_entity proc;
+ struct media_pad proc_pads[2];
+ struct media_intf_devnode *intf_devnode;
+#endif
+
+ struct list_head job_queue;
+ spinlock_t job_spinlock;
+ struct work_struct job_work;
+ unsigned long job_queue_flags;
+
+ const struct v4l2_m2m_ops *m2m_ops;
+};
+
+static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &m2m_ctx->out_q_ctx;
+ else
+ return &m2m_ctx->cap_q_ctx;
+}
+
+struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_queue_ctx *q_ctx;
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ if (!q_ctx)
+ return NULL;
+
+ return &q_ctx->q;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_vq);
+
+struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
+ }
+
+ b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return &b->vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
+
+struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
+ }
+
+ b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return &b->vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
+
+struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ if (list_empty(&q_ctx->rdy_queue)) {
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+ return NULL;
+ }
+ b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
+ list_del(&b->list);
+ q_ctx->num_rdy--;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+
+ return &b->vb;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
+
+void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
+ list_del(&b->list);
+ q_ctx->num_rdy--;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
+
+struct vb2_v4l2_buffer *
+v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
+
+{
+ struct v4l2_m2m_buffer *b, *tmp;
+ struct vb2_v4l2_buffer *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
+ if (b->vb.vb2_buf.index == idx) {
+ list_del(&b->list);
+ q_ctx->num_rdy--;
+ ret = &b->vb;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
+
+/*
+ * Scheduling handlers
+ */
+
+void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+ void *ret = NULL;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (m2m_dev->curr_ctx)
+ ret = m2m_dev->curr_ctx->priv;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
+
+/**
+ * v4l2_m2m_try_run() - select next job to perform and run it if possible
+ * @m2m_dev: per-device context
+ *
+ * Get next transaction (if present) from the waiting jobs list and run it.
+ *
+ * Note that this function can run on a given v4l2_m2m_ctx context,
+ * but call .device_run for another context.
+ */
+static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (NULL != m2m_dev->curr_ctx) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Another instance is running, won't run now\n");
+ return;
+ }
+
+ if (list_empty(&m2m_dev->job_queue)) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("No job pending\n");
+ return;
+ }
+
+ if (m2m_dev->job_queue_flags & QUEUE_PAUSED) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("Running new jobs is paused\n");
+ return;
+ }
+
+ m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
+ struct v4l2_m2m_ctx, queue);
+ m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
+ m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
+}
+
+/*
+ * __v4l2_m2m_try_queue() - queue a job
+ * @m2m_dev: m2m device
+ * @m2m_ctx: m2m context
+ *
+ * Check if this context is ready to queue a job.
+ *
+ * This function can run in interrupt context.
+ */
+static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags_job;
+ struct vb2_v4l2_buffer *dst, *src;
+
+ dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
+
+ if (!m2m_ctx->out_q_ctx.q.streaming
+ || !m2m_ctx->cap_q_ctx.q.streaming) {
+ dprintk("Streaming needs to be on for both queues\n");
+ return;
+ }
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+
+ /* If the context is aborted then don't schedule it */
+ if (m2m_ctx->job_flags & TRANS_ABORT) {
+ dprintk("Aborted context\n");
+ goto job_unlock;
+ }
+
+ if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ dprintk("On job queue already\n");
+ goto job_unlock;
+ }
+
+ src = v4l2_m2m_next_src_buf(m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+ if (!src && !m2m_ctx->out_q_ctx.buffered) {
+ dprintk("No input buffers available\n");
+ goto job_unlock;
+ }
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
+ dprintk("No output buffers available\n");
+ goto job_unlock;
+ }
+
+ m2m_ctx->new_frame = true;
+
+ if (src && dst && dst->is_held &&
+ dst->vb2_buf.copied_timestamp &&
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
+ dprintk("Timestamp mismatch, returning held capture buffer\n");
+ dst->is_held = false;
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
+ dprintk("No output buffers available after returning held buffer\n");
+ goto job_unlock;
+ }
+ }
+
+ if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
+
+ if (m2m_ctx->has_stopped) {
+ dprintk("Device has stopped\n");
+ goto job_unlock;
+ }
+
+ if (m2m_dev->m2m_ops->job_ready
+ && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
+ dprintk("Driver not ready\n");
+ goto job_unlock;
+ }
+
+ list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
+ m2m_ctx->job_flags |= TRANS_QUEUED;
+
+job_unlock:
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+}
+
+/**
+ * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
+ * @m2m_ctx: m2m context
+ *
+ * Check if this context is ready to queue a job. If suitable,
+ * run the next queued job on the mem2mem device.
+ *
+ * This function shouldn't run in interrupt context.
+ *
+ * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
+ * and then run another job for another context.
+ */
+void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
+
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+ v4l2_m2m_try_run(m2m_dev);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
+
+/**
+ * v4l2_m2m_device_run_work() - run pending jobs for the context
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void v4l2_m2m_device_run_work(struct work_struct *work)
+{
+ struct v4l2_m2m_dev *m2m_dev =
+ container_of(work, struct v4l2_m2m_dev, job_work);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+
+/**
+ * v4l2_m2m_cancel_job() - cancel pending jobs for the context
+ * @m2m_ctx: m2m context with jobs to be canceled
+ *
+ * In case of streamoff or release called on any context,
+ * 1] If the context is currently running, then abort job will be called
+ * 2] If the context is queued, then the context will be removed from
+ * the job_queue
+ */
+static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ unsigned long flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+
+ m2m_ctx->job_flags |= TRANS_ABORT;
+ if (m2m_ctx->job_flags & TRANS_RUNNING) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ if (m2m_dev->m2m_ops->job_abort)
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
+ wait_event(m2m_ctx->finished,
+ !(m2m_ctx->job_flags & TRANS_RUNNING));
+ } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("m2m_ctx: %p had been on queue and was removed\n",
+ m2m_ctx);
+ } else {
+ /* Do nothing, was not on queue/running */
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ }
+}
+
+/*
+ * Schedule the next job, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ /*
+ * This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes.
+ */
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+
+ /*
+ * We might be running in atomic context,
+ * but the job must be run in non-atomic context.
+ */
+ schedule_work(&m2m_dev->job_work);
+}
+
+/*
+ * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
+ dprintk("Called by an instance not currently running\n");
+ return false;
+ }
+
+ list_del(&m2m_dev->curr_ctx->queue);
+ m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ wake_up(&m2m_dev->curr_ctx->finished);
+ m2m_dev->curr_ctx = NULL;
+ return true;
+}
+
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+ bool schedule_next;
+
+ /*
+ * This function should not be used for drivers that support
+ * holding capture buffers. Those should use
+ * v4l2_m2m_buf_done_and_job_finish() instead.
+ */
+ WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
+}
+EXPORT_SYMBOL(v4l2_m2m_job_finish);
+
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ bool schedule_next = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (WARN_ON(!src_buf || !dst_buf))
+ goto unlock;
+ dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ if (!dst_buf->is_held) {
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst_buf, state);
+ }
+ /*
+ * If the request API is being used, returning the OUTPUT
+ * (src) buffer will wake-up any process waiting on the
+ * request file descriptor.
+ *
+ * Therefore, return the CAPTURE (dst) buffer first,
+ * to avoid signalling the request file descriptor
+ * before the CAPTURE buffer is done.
+ */
+ v4l2_m2m_buf_done(src_buf, state);
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+unlock:
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
+}
+EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
+
+void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+ struct v4l2_m2m_ctx *curr_ctx;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ m2m_dev->job_queue_flags |= QUEUE_PAUSED;
+ curr_ctx = m2m_dev->curr_ctx;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (curr_ctx)
+ wait_event(curr_ctx->finished,
+ !(curr_ctx->job_flags & TRANS_RUNNING));
+}
+EXPORT_SYMBOL(v4l2_m2m_suspend);
+
+void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ m2m_dev->job_queue_flags &= ~QUEUE_PAUSED;
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+EXPORT_SYMBOL(v4l2_m2m_resume);
+
+int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
+ ret = vb2_reqbufs(vq, reqbufs);
+ /* If count == 0, then the owner has released all buffers and he
+ is no longer owner of the queue. Otherwise we have an owner. */
+ if (ret == 0)
+ vq->owner = reqbufs->count ? file->private_data : NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
+
+static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
+ struct v4l2_buffer *buf)
+{
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) {
+ if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
+ unsigned int i;
+
+ for (i = 0; i < buf->length; ++i)
+ buf->m.planes[i].m.mem_offset
+ += DST_QUEUE_OFF_BASE;
+ } else {
+ buf->m.offset += DST_QUEUE_OFF_BASE;
+ }
+ }
+}
+
+int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_querybuf(vq, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
+
+/*
+ * This will add the LAST flag and mark the buffer management
+ * state as stopped.
+ * This is called when the last capture buffer must be flagged as LAST
+ * in draining mode from the encoder/decoder driver buf_queue() callback
+ * or from v4l2_update_last_buf_state() when a capture buffer is available.
+ */
+void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+
+ v4l2_m2m_mark_stopped(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done);
+
+/* When stop command is issued, update buffer management state */
+static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct vb2_v4l2_buffer *next_dst_buf;
+
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ return 0;
+
+ m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx);
+ m2m_ctx->is_draining = true;
+
+ /*
+ * The processing of the last output buffer queued before
+ * the STOP command is expected to mark the buffer management
+ * state as stopped with v4l2_m2m_mark_stopped().
+ */
+ if (m2m_ctx->last_src_buf)
+ return 0;
+
+ /*
+ * In case the output queue is empty, try to mark the last capture
+ * buffer as LAST.
+ */
+ next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (!next_dst_buf) {
+ /*
+ * Wait for the next queued one in encoder/decoder driver
+ * buf_queue() callback using the v4l2_m2m_dst_buf_is_last()
+ * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet
+ * streaming.
+ */
+ m2m_ctx->next_buf_last = true;
+ return 0;
+ }
+
+ v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf);
+
+ return 0;
+}
+
+/*
+ * Updates the encoding/decoding buffer management state, should
+ * be called from encoder/decoder drivers start_streaming()
+ */
+void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q)
+{
+ /* If start streaming again, untag the last output buffer */
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ m2m_ctx->last_src_buf = NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state);
+
+/*
+ * Updates the encoding/decoding buffer management state, should
+ * be called from encoder/decoder driver stop_streaming()
+ */
+void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q)
+{
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ /*
+ * If in draining state, either mark next dst buffer as
+ * done or flag next one to be marked as done either
+ * in encoder/decoder driver buf_queue() callback using
+ * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf()
+ * if encoder/decoder is not yet streaming
+ */
+ if (m2m_ctx->is_draining) {
+ struct vb2_v4l2_buffer *next_dst_buf;
+
+ m2m_ctx->last_src_buf = NULL;
+ next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx);
+ if (!next_dst_buf)
+ m2m_ctx->next_buf_last = true;
+ else
+ v4l2_m2m_last_buffer_done(m2m_ctx,
+ next_dst_buf);
+ }
+ } else {
+ v4l2_m2m_clear_state(m2m_ctx);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state);
+
+static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned int i;
+
+ if (WARN_ON(q->is_output))
+ return;
+ if (list_empty(&q->queued_list))
+ return;
+
+ vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry);
+ for (i = 0; i < vb->num_planes; i++)
+ vb2_set_plane_payload(vb, i, 0);
+
+ /*
+ * Since the buffer hasn't been queued to the ready queue,
+ * mark is active and owned before marking it LAST and DONE
+ */
+ vb->state = VB2_BUF_STATE_ACTIVE;
+ atomic_inc(&q->owned_by_drv_count);
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+ vbuf->field = V4L2_FIELD_NONE;
+
+ v4l2_m2m_last_buffer_done(m2m_ctx, vbuf);
+}
+
+int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
+ (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
+ dprintk("%s: requests cannot be used with capture buffers\n",
+ __func__);
+ return -EPERM;
+ }
+
+ ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ /*
+ * If the capture queue is streaming, but streaming hasn't started
+ * on the device, but was asked to stop, mark the previously queued
+ * buffer as DONE with LAST flag since it won't be queued on the
+ * device.
+ */
+ if (V4L2_TYPE_IS_CAPTURE(vq->type) &&
+ vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) &&
+ (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx)))
+ v4l2_m2m_force_last_buf_done(m2m_ctx, vq);
+ else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
+
+int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
+
+int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_buffer *buf)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
+ ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
+ if (ret)
+ return ret;
+
+ /* Adjust MMAP memory offsets for the CAPTURE queue */
+ v4l2_m2m_adjust_mem_offset(vq, buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
+
+int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_create_buffers *create)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
+ return vb2_create_bufs(vq, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
+
+int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_exportbuffer *eb)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
+ return vb2_expbuf(vq, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
+
+int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(m2m_ctx, type);
+ ret = vb2_streamon(vq, type);
+ if (!ret)
+ v4l2_m2m_try_schedule(m2m_ctx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
+
+int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ unsigned long flags_job, flags;
+ int ret;
+
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
+
+ q_ctx = get_queue_ctx(m2m_ctx, type);
+ ret = vb2_streamoff(&q_ctx->q, type);
+ if (ret)
+ return ret;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
+ /* We should not be scheduled anymore, since we're dropping a queue. */
+ if (m2m_ctx->job_flags & TRANS_QUEUED)
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags = 0;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ /* Drop queue, since streamoff returns device to the same state as after
+ * calling reqbufs. */
+ INIT_LIST_HEAD(&q_ctx->rdy_queue);
+ q_ctx->num_rdy = 0;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+
+ if (m2m_dev->curr_ctx == m2m_ctx) {
+ m2m_dev->curr_ctx = NULL;
+ wake_up(&m2m_ctx->finished);
+ }
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
+
+static __poll_t v4l2_m2m_poll_for_data(struct file *file,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
+{
+ struct vb2_queue *src_q, *dst_q;
+ __poll_t rc = 0;
+ unsigned long flags;
+
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
+
+ /*
+ * There has to be at least one buffer queued on each queued_list, which
+ * means either in driver already or waiting for driver to claim it
+ * and start processing.
+ */
+ if ((!vb2_is_streaming(src_q) || src_q->error ||
+ list_empty(&src_q->queued_list)) &&
+ (!vb2_is_streaming(dst_q) || dst_q->error ||
+ (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued)))
+ return EPOLLERR;
+
+ spin_lock_irqsave(&src_q->done_lock, flags);
+ if (!list_empty(&src_q->done_list))
+ rc |= EPOLLOUT | EPOLLWRNORM;
+ spin_unlock_irqrestore(&src_q->done_lock, flags);
+
+ spin_lock_irqsave(&dst_q->done_lock, flags);
+ /*
+ * If the last buffer was dequeued from the capture queue, signal
+ * userspace. DQBUF(CAPTURE) will return -EPIPE.
+ */
+ if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued)
+ rc |= EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irqrestore(&dst_q->done_lock, flags);
+
+ return rc;
+}
+
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct poll_table_struct *wait)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t rc = 0;
+
+ /*
+ * poll_wait() MUST be called on the first invocation on all the
+ * potential queues of interest, even if we are not interested in their
+ * events during this first call. Failure to do so will result in
+ * queue's events to be ignored because the poll_table won't be capable
+ * of adding new wait queues thereafter.
+ */
+ poll_wait(file, &src_q->done_wq, wait);
+ poll_wait(file, &dst_q->done_wq, wait);
+
+ if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
+ rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ rc |= EPOLLPRI;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
+
+int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct vm_area_struct *vma)
+{
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct vb2_queue *vq;
+
+ if (offset < DST_QUEUE_OFF_BASE) {
+ vq = v4l2_m2m_get_src_vq(m2m_ctx);
+ } else {
+ vq = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ }
+
+ return vb2_mmap(vq, vma);
+}
+EXPORT_SYMBOL(v4l2_m2m_mmap);
+
+#ifndef CONFIG_MMU
+unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct v4l2_fh *fh = file->private_data;
+ unsigned long offset = pgoff << PAGE_SHIFT;
+ struct vb2_queue *vq;
+
+ if (offset < DST_QUEUE_OFF_BASE) {
+ vq = v4l2_m2m_get_src_vq(fh->m2m_ctx);
+ } else {
+ vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx);
+ pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ }
+
+ return vb2_get_unmapped_area(vq, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area);
+#endif
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
+{
+ media_remove_intf_links(&m2m_dev->intf_devnode->intf);
+ media_devnode_remove(m2m_dev->intf_devnode);
+
+ media_entity_remove_links(m2m_dev->source);
+ media_entity_remove_links(&m2m_dev->sink);
+ media_entity_remove_links(&m2m_dev->proc);
+ media_device_unregister_entity(m2m_dev->source);
+ media_device_unregister_entity(&m2m_dev->sink);
+ media_device_unregister_entity(&m2m_dev->proc);
+ kfree(m2m_dev->source->name);
+ kfree(m2m_dev->sink.name);
+ kfree(m2m_dev->proc.name);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
+
+static int v4l2_m2m_register_entity(struct media_device *mdev,
+ struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
+ struct video_device *vdev, int function)
+{
+ struct media_entity *entity;
+ struct media_pad *pads;
+ char *name;
+ unsigned int len;
+ int num_pads;
+ int ret;
+
+ switch (type) {
+ case MEM2MEM_ENT_TYPE_SOURCE:
+ entity = m2m_dev->source;
+ pads = &m2m_dev->source_pad;
+ pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ num_pads = 1;
+ break;
+ case MEM2MEM_ENT_TYPE_SINK:
+ entity = &m2m_dev->sink;
+ pads = &m2m_dev->sink_pad;
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ num_pads = 1;
+ break;
+ case MEM2MEM_ENT_TYPE_PROC:
+ entity = &m2m_dev->proc;
+ pads = m2m_dev->proc_pads;
+ pads[0].flags = MEDIA_PAD_FL_SINK;
+ pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ num_pads = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
+ if (type != MEM2MEM_ENT_TYPE_PROC) {
+ entity->info.dev.major = VIDEO_MAJOR;
+ entity->info.dev.minor = vdev->minor;
+ }
+ len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
+ name = kmalloc(len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
+ entity->name = name;
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+ if (ret)
+ return ret;
+ ret = media_device_register_entity(mdev, entity);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function)
+{
+ struct media_device *mdev = vdev->v4l2_dev->mdev;
+ struct media_link *link;
+ int ret;
+
+ if (!mdev)
+ return 0;
+
+ /* A memory-to-memory device consists in two
+ * DMA engine and one video processing entities.
+ * The DMA engine entities are linked to a V4L interface
+ */
+
+ /* Create the three entities with their pads */
+ m2m_dev->source = &vdev->entity;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
+ if (ret)
+ return ret;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_PROC, vdev, function);
+ if (ret)
+ goto err_rel_entity0;
+ ret = v4l2_m2m_register_entity(mdev, m2m_dev,
+ MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
+ if (ret)
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+ ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+
+ /* Create video interface */
+ m2m_dev->intf_devnode = media_devnode_create(mdev,
+ MEDIA_INTF_T_V4L_VIDEO, 0,
+ VIDEO_MAJOR, vdev->minor);
+ if (!m2m_dev->intf_devnode) {
+ ret = -ENOMEM;
+ goto err_rm_links1;
+ }
+
+ /* Connect the two DMA engines to the interface */
+ link = media_create_intf_link(m2m_dev->source,
+ &m2m_dev->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+
+ link = media_create_intf_link(&m2m_dev->sink,
+ &m2m_dev->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_intf_link;
+ }
+ return 0;
+
+err_rm_intf_link:
+ media_remove_intf_links(&m2m_dev->intf_devnode->intf);
+err_rm_devnode:
+ media_devnode_remove(m2m_dev->intf_devnode);
+err_rm_links1:
+ media_entity_remove_links(&m2m_dev->sink);
+err_rm_links0:
+ media_entity_remove_links(&m2m_dev->proc);
+ media_entity_remove_links(m2m_dev->source);
+err_rel_entity2:
+ media_device_unregister_entity(&m2m_dev->proc);
+ kfree(m2m_dev->proc.name);
+err_rel_entity1:
+ media_device_unregister_entity(&m2m_dev->sink);
+ kfree(m2m_dev->sink.name);
+err_rel_entity0:
+ media_device_unregister_entity(m2m_dev->source);
+ kfree(m2m_dev->source->name);
+ return ret;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
+#endif
+
+struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+
+ if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
+ return ERR_PTR(-EINVAL);
+
+ m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
+ if (!m2m_dev)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_dev->curr_ctx = NULL;
+ m2m_dev->m2m_ops = m2m_ops;
+ INIT_LIST_HEAD(&m2m_dev->job_queue);
+ spin_lock_init(&m2m_dev->job_spinlock);
+ INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
+
+ return m2m_dev;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_init);
+
+void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
+{
+ kfree(m2m_dev);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_release);
+
+struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
+ void *drv_priv,
+ int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
+{
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
+ int ret;
+
+ m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
+ if (!m2m_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ m2m_ctx->priv = drv_priv;
+ m2m_ctx->m2m_dev = m2m_dev;
+ init_waitqueue_head(&m2m_ctx->finished);
+
+ out_q_ctx = &m2m_ctx->out_q_ctx;
+ cap_q_ctx = &m2m_ctx->cap_q_ctx;
+
+ INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
+ INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
+ spin_lock_init(&out_q_ctx->rdy_spinlock);
+ spin_lock_init(&cap_q_ctx->rdy_spinlock);
+
+ INIT_LIST_HEAD(&m2m_ctx->queue);
+
+ ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
+
+ if (ret)
+ goto err;
+ /*
+ * Both queues should use same the mutex to lock the m2m context.
+ * This lock is used in some v4l2_m2m_* helpers.
+ */
+ if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ m2m_ctx->q_lock = out_q_ctx->q.lock;
+
+ return m2m_ctx;
+err:
+ kfree(m2m_ctx);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
+
+void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
+
+ vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
+ vb2_queue_release(&m2m_ctx->out_q_ctx.q);
+
+ kfree(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
+
+void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ struct v4l2_m2m_buffer *b = container_of(vbuf,
+ struct v4l2_m2m_buffer, vb);
+ struct v4l2_m2m_queue_ctx *q_ctx;
+ unsigned long flags;
+
+ q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
+ if (!q_ctx)
+ return;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ list_add_tail(&b->list, &q_ctx->rdy_queue);
+ q_ctx->num_rdy++;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+
+void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
+ struct vb2_v4l2_buffer *cap_vb,
+ bool copy_frame_flags)
+{
+ u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ if (copy_frame_flags)
+ mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME |
+ V4L2_BUF_FLAG_BFRAME;
+
+ cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp;
+
+ if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ cap_vb->timecode = out_vb->timecode;
+ cap_vb->field = out_vb->field;
+ cap_vb->flags &= ~mask;
+ cap_vb->flags |= out_vb->flags & mask;
+ cap_vb->vb2_buf.copied_timestamp = 1;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata);
+
+void v4l2_m2m_request_queue(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+ struct v4l2_m2m_ctx *m2m_ctx = NULL;
+
+ /*
+ * Queue all objects. Note that buffer objects are at the end of the
+ * objects list, after all other object types. Once buffer objects
+ * are queued, the driver might delete them immediately (if the driver
+ * processes the buffer at once), so we have to use
+ * list_for_each_entry_safe() to handle the case where the object we
+ * queue is deleted.
+ */
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
+ struct v4l2_m2m_ctx *m2m_ctx_obj;
+ struct vb2_buffer *vb;
+
+ if (!obj->ops->queue)
+ continue;
+
+ if (vb2_request_object_is_buffer(obj)) {
+ /* Sanity checks */
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
+ m2m_ctx_obj = container_of(vb->vb2_queue,
+ struct v4l2_m2m_ctx,
+ out_q_ctx.q);
+ WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
+ m2m_ctx = m2m_ctx_obj;
+ }
+
+ /*
+ * The buffer we queue here can in theory be immediately
+ * unbound, hence the use of list_for_each_entry_safe()
+ * above and why we call the queue op last.
+ */
+ obj->ops->queue(obj);
+ }
+
+ WARN_ON(!m2m_ctx);
+
+ if (m2m_ctx)
+ v4l2_m2m_try_schedule(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
+
+/* Videobuf2 ioctl helpers */
+
+int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
+
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
+
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
+
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
+
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
+
+int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
+
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
+
+int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
+
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
+
+int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
+ return -EINVAL;
+
+ ec->flags = 0;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
+
+int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
+ return -EINVAL;
+
+ dc->flags = 0;
+
+ if (dc->cmd == V4L2_DEC_CMD_STOP) {
+ dc->stop.pts = 0;
+ } else if (dc->cmd == V4L2_DEC_CMD_START) {
+ dc->start.speed = 0;
+ dc->start.format = V4L2_DEC_START_FMT_NONE;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
+
+/*
+ * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START
+ * Should be called from the encoder driver encoder_cmd() callback
+ */
+int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_encoder_cmd *ec)
+{
+ if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
+ return -EINVAL;
+
+ if (ec->cmd == V4L2_ENC_CMD_STOP)
+ return v4l2_update_last_buf_state(m2m_ctx);
+
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ m2m_ctx->has_stopped = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd);
+
+/*
+ * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START
+ * Should be called from the decoder driver decoder_cmd() callback
+ */
+int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
+ return -EINVAL;
+
+ if (dc->cmd == V4L2_DEC_CMD_STOP)
+ return v4l2_update_last_buf_state(m2m_ctx);
+
+ if (m2m_ctx->is_draining)
+ return -EBUSY;
+
+ if (m2m_ctx->has_stopped)
+ m2m_ctx->has_stopped = false;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd);
+
+int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd);
+
+int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd);
+
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_FLUSH)
+ return -EINVAL;
+
+ dc->flags = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
+
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct vb2_v4l2_buffer *out_vb, *cap_vb;
+ struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
+ unsigned long flags;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
+ cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
+
+ /*
+ * If there is an out buffer pending, then clear any HOLD flag.
+ *
+ * By clearing this flag we ensure that when this output
+ * buffer is processed any held capture buffer will be released.
+ */
+ if (out_vb) {
+ out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ } else if (cap_vb && cap_vb->is_held) {
+ /*
+ * If there were no output buffers, but there is a
+ * capture buffer that is held, then release that
+ * buffer.
+ */
+ cap_vb->is_held = false;
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
+ v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
+
+/*
+ * v4l2_file_operations helpers. It is assumed here same lock is used
+ * for the output and the capture buffer queue.
+ */
+
+int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct v4l2_fh *fh = file->private_data;
+
+ return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
+
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
+ __poll_t ret;
+
+ if (m2m_ctx->q_lock)
+ mutex_lock(m2m_ctx->q_lock);
+
+ ret = v4l2_m2m_poll(file, m2m_ctx, wait);
+
+ if (m2m_ctx->q_lock)
+ mutex_unlock(m2m_ctx->q_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
+
diff --git a/drivers/media/v4l2-core/v4l2-spi.c b/drivers/media/v4l2-core/v4l2-spi.c
new file mode 100644
index 0000000000..eadecdff73
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-spi.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * v4l2-spi - SPI helpers for Video4Linux2
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+
+void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd)
+{
+ struct spi_device *spi = v4l2_get_subdevdata(sd);
+
+ if (spi && !spi->dev.of_node && !spi->dev.fwnode)
+ spi_unregister_device(spi);
+}
+
+void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
+ const struct v4l2_subdev_ops *ops)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
+ /* the owner is the same as the spi_device's driver owner */
+ sd->owner = spi->dev.driver->owner;
+ sd->dev = &spi->dev;
+ /* spi_device and v4l2_subdev point to one another */
+ v4l2_set_subdevdata(sd, spi);
+ spi_set_drvdata(spi, sd);
+ /* initialize name */
+ snprintf(sd->name, sizeof(sd->name), "%s %s",
+ spi->dev.driver->name, dev_name(&spi->dev));
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
+
+struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
+ struct spi_master *master,
+ struct spi_board_info *info)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct spi_device *spi = NULL;
+
+ if (!v4l2_dev)
+ return NULL;
+ if (info->modalias[0])
+ request_module(info->modalias);
+
+ spi = spi_new_device(master, info);
+
+ if (!spi || !spi->dev.driver)
+ goto error;
+
+ if (!try_module_get(spi->dev.driver->owner))
+ goto error;
+
+ sd = spi_get_drvdata(spi);
+
+ /*
+ * Register with the v4l2_device which increases the module's
+ * use count as well.
+ */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(spi->dev.driver->owner);
+
+error:
+ /*
+ * If we have a client but no subdev, then something went wrong and
+ * we must unregister the client.
+ */
+ if (!sd)
+ spi_unregister_device(spi);
+
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-subdev-priv.h b/drivers/media/v4l2-core/v4l2-subdev-priv.h
new file mode 100644
index 0000000000..52391d6d8a
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-subdev-priv.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * V4L2 sub-device pivate header.
+ *
+ * Copyright (C) 2023 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef _V4L2_SUBDEV_PRIV_H_
+#define _V4L2_SUBDEV_PRIV_H_
+
+int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd);
+void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd);
+
+#endif
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
new file mode 100644
index 0000000000..31752c06d1
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -0,0 +1,2247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * V4L2 sub-device
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ */
+
+#include <linux/export.h>
+#include <linux/ioctl.h>
+#include <linux/leds.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+/*
+ * The Streams API is an experimental feature. To use the Streams API, set
+ * 'v4l2_subdev_enable_streams_api' to 1 below.
+ */
+
+static bool v4l2_subdev_enable_streams_api;
+#endif
+
+/*
+ * Maximum stream ID is 63 for now, as we use u64 bitmask to represent a set
+ * of streams.
+ *
+ * Note that V4L2_FRAME_DESC_ENTRY_MAX is related: V4L2_FRAME_DESC_ENTRY_MAX
+ * restricts the total number of streams in a pad, although the stream ID is
+ * not restricted.
+ */
+#define V4L2_SUBDEV_MAX_STREAM_ID 63
+
+#include "v4l2-subdev-priv.h"
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
+{
+ struct v4l2_subdev_state *state;
+ static struct lock_class_key key;
+
+ state = __v4l2_subdev_state_alloc(sd, "fh->state->lock", &key);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ fh->state = state;
+
+ return 0;
+}
+
+static void subdev_fh_free(struct v4l2_subdev_fh *fh)
+{
+ __v4l2_subdev_state_free(fh->state);
+ fh->state = NULL;
+}
+
+static int subdev_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_subdev_fh *subdev_fh;
+ int ret;
+
+ subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
+ if (subdev_fh == NULL)
+ return -ENOMEM;
+
+ ret = subdev_fh_init(subdev_fh, sd);
+ if (ret) {
+ kfree(subdev_fh);
+ return ret;
+ }
+
+ v4l2_fh_init(&subdev_fh->vfh, vdev);
+ v4l2_fh_add(&subdev_fh->vfh);
+ file->private_data = &subdev_fh->vfh;
+
+ if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
+ struct module *owner;
+
+ owner = sd->entity.graph_obj.mdev->dev->driver->owner;
+ if (!try_module_get(owner)) {
+ ret = -EBUSY;
+ goto err;
+ }
+ subdev_fh->owner = owner;
+ }
+
+ if (sd->internal_ops && sd->internal_ops->open) {
+ ret = sd->internal_ops->open(sd, subdev_fh);
+ if (ret < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ module_put(subdev_fh->owner);
+ v4l2_fh_del(&subdev_fh->vfh);
+ v4l2_fh_exit(&subdev_fh->vfh);
+ subdev_fh_free(subdev_fh);
+ kfree(subdev_fh);
+
+ return ret;
+}
+
+static int subdev_close(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+
+ if (sd->internal_ops && sd->internal_ops->close)
+ sd->internal_ops->close(sd, subdev_fh);
+ module_put(subdev_fh->owner);
+ v4l2_fh_del(vfh);
+ v4l2_fh_exit(vfh);
+ subdev_fh_free(subdev_fh);
+ kfree(subdev_fh);
+ file->private_data = NULL;
+
+ return 0;
+}
+#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+static int subdev_open(struct file *file)
+{
+ return -ENODEV;
+}
+
+static int subdev_close(struct file *file)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
+static inline int check_which(u32 which)
+{
+ if (which != V4L2_SUBDEV_FORMAT_TRY &&
+ which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (sd->entity.num_pads) {
+ if (pad >= sd->entity.num_pads)
+ return -EINVAL;
+ return 0;
+ }
+#endif
+ /* allow pad 0 on subdevices not registered as media entities */
+ if (pad > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int check_state(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
+ u32 which, u32 pad, u32 stream)
+{
+ if (sd->flags & V4L2_SUBDEV_FL_STREAMS) {
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ if (!v4l2_subdev_state_get_stream_format(state, pad, stream))
+ return -EINVAL;
+ return 0;
+#else
+ return -EINVAL;
+#endif
+ }
+
+ if (stream != 0)
+ return -EINVAL;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY && (!state || !state->pads))
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int check_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ if (!format)
+ return -EINVAL;
+
+ return check_which(format->which) ? : check_pad(sd, format->pad) ? :
+ check_state(sd, state, format->which, format->pad, format->stream);
+}
+
+static int call_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ return check_format(sd, state, format) ? :
+ sd->ops->pad->get_fmt(sd, state, format);
+}
+
+static int call_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ return check_format(sd, state, format) ? :
+ sd->ops->pad->set_fmt(sd, state, format);
+}
+
+static int call_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (!code)
+ return -EINVAL;
+
+ return check_which(code->which) ? : check_pad(sd, code->pad) ? :
+ check_state(sd, state, code->which, code->pad, code->stream) ? :
+ sd->ops->pad->enum_mbus_code(sd, state, code);
+}
+
+static int call_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (!fse)
+ return -EINVAL;
+
+ return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
+ check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
+ sd->ops->pad->enum_frame_size(sd, state, fse);
+}
+
+static inline int check_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ if (!fi)
+ return -EINVAL;
+
+ return check_pad(sd, fi->pad);
+}
+
+static int call_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ return check_frame_interval(sd, fi) ? :
+ sd->ops->video->g_frame_interval(sd, fi);
+}
+
+static int call_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ return check_frame_interval(sd, fi) ? :
+ sd->ops->video->s_frame_interval(sd, fi);
+}
+
+static int call_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ if (!fie)
+ return -EINVAL;
+
+ return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
+ check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
+ sd->ops->pad->enum_frame_interval(sd, state, fie);
+}
+
+static inline int check_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ if (!sel)
+ return -EINVAL;
+
+ return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
+ check_state(sd, state, sel->which, sel->pad, sel->stream);
+}
+
+static int call_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ return check_selection(sd, state, sel) ? :
+ sd->ops->pad->get_selection(sd, state, sel);
+}
+
+static int call_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ return check_selection(sd, state, sel) ? :
+ sd->ops->pad->set_selection(sd, state, sel);
+}
+
+static inline int check_edid(struct v4l2_subdev *sd,
+ struct v4l2_subdev_edid *edid)
+{
+ if (!edid)
+ return -EINVAL;
+
+ if (edid->blocks && edid->edid == NULL)
+ return -EINVAL;
+
+ return check_pad(sd, edid->pad);
+}
+
+static int call_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ return check_edid(sd, edid) ? : sd->ops->pad->get_edid(sd, edid);
+}
+
+static int call_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ return check_edid(sd, edid) ? : sd->ops->pad->set_edid(sd, edid);
+}
+
+static int call_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ if (!cap)
+ return -EINVAL;
+
+ return check_pad(sd, cap->pad) ? :
+ sd->ops->pad->dv_timings_cap(sd, cap);
+}
+
+static int call_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *dvt)
+{
+ if (!dvt)
+ return -EINVAL;
+
+ return check_pad(sd, dvt->pad) ? :
+ sd->ops->pad->enum_dv_timings(sd, dvt);
+}
+
+static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_config *config)
+{
+ return check_pad(sd, pad) ? :
+ sd->ops->pad->get_mbus_config(sd, pad, config);
+}
+
+static int call_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int ret;
+
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ if (enable)
+ led_set_brightness(sd->privacy_led,
+ sd->privacy_led->max_brightness);
+ else
+ led_set_brightness(sd->privacy_led, 0);
+ }
+#endif
+ ret = sd->ops->video->s_stream(sd, enable);
+
+ if (!enable && ret < 0) {
+ dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
+ return 0;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+/*
+ * Create state-management wrapper for pad ops dealing with subdev state. The
+ * wrapper handles the case where the caller does not provide the called
+ * subdev's state. This should be removed when all the callers are fixed.
+ */
+#define DEFINE_STATE_WRAPPER(f, arg_type) \
+ static int call_##f##_state(struct v4l2_subdev *sd, \
+ struct v4l2_subdev_state *_state, \
+ arg_type *arg) \
+ { \
+ struct v4l2_subdev_state *state = _state; \
+ int ret; \
+ if (!_state) \
+ state = v4l2_subdev_lock_and_get_active_state(sd); \
+ ret = call_##f(sd, state, arg); \
+ if (!_state && state) \
+ v4l2_subdev_unlock_state(state); \
+ return ret; \
+ }
+
+#else /* CONFIG_MEDIA_CONTROLLER */
+
+#define DEFINE_STATE_WRAPPER(f, arg_type) \
+ static int call_##f##_state(struct v4l2_subdev *sd, \
+ struct v4l2_subdev_state *state, \
+ arg_type *arg) \
+ { \
+ return call_##f(sd, state, arg); \
+ }
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
+DEFINE_STATE_WRAPPER(get_fmt, struct v4l2_subdev_format);
+DEFINE_STATE_WRAPPER(set_fmt, struct v4l2_subdev_format);
+DEFINE_STATE_WRAPPER(enum_mbus_code, struct v4l2_subdev_mbus_code_enum);
+DEFINE_STATE_WRAPPER(enum_frame_size, struct v4l2_subdev_frame_size_enum);
+DEFINE_STATE_WRAPPER(enum_frame_interval, struct v4l2_subdev_frame_interval_enum);
+DEFINE_STATE_WRAPPER(get_selection, struct v4l2_subdev_selection);
+DEFINE_STATE_WRAPPER(set_selection, struct v4l2_subdev_selection);
+
+static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
+ .get_fmt = call_get_fmt_state,
+ .set_fmt = call_set_fmt_state,
+ .enum_mbus_code = call_enum_mbus_code_state,
+ .enum_frame_size = call_enum_frame_size_state,
+ .enum_frame_interval = call_enum_frame_interval_state,
+ .get_selection = call_get_selection_state,
+ .set_selection = call_set_selection_state,
+ .get_edid = call_get_edid,
+ .set_edid = call_set_edid,
+ .dv_timings_cap = call_dv_timings_cap,
+ .enum_dv_timings = call_enum_dv_timings,
+ .get_mbus_config = call_get_mbus_config,
+};
+
+static const struct v4l2_subdev_video_ops v4l2_subdev_call_video_wrappers = {
+ .g_frame_interval = call_g_frame_interval,
+ .s_frame_interval = call_s_frame_interval,
+ .s_stream = call_s_stream,
+};
+
+const struct v4l2_subdev_ops v4l2_subdev_call_wrappers = {
+ .pad = &v4l2_subdev_call_pad_wrappers,
+ .video = &v4l2_subdev_call_video_wrappers,
+};
+EXPORT_SYMBOL(v4l2_subdev_call_wrappers);
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+
+static struct v4l2_subdev_state *
+subdev_ioctl_get_state(struct v4l2_subdev *sd, struct v4l2_subdev_fh *subdev_fh,
+ unsigned int cmd, void *arg)
+{
+ u32 which;
+
+ switch (cmd) {
+ default:
+ return NULL;
+ case VIDIOC_SUBDEV_G_FMT:
+ case VIDIOC_SUBDEV_S_FMT:
+ which = ((struct v4l2_subdev_format *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_G_CROP:
+ case VIDIOC_SUBDEV_S_CROP:
+ which = ((struct v4l2_subdev_crop *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_ENUM_MBUS_CODE:
+ which = ((struct v4l2_subdev_mbus_code_enum *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
+ which = ((struct v4l2_subdev_frame_size_enum *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
+ which = ((struct v4l2_subdev_frame_interval_enum *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_G_SELECTION:
+ case VIDIOC_SUBDEV_S_SELECTION:
+ which = ((struct v4l2_subdev_selection *)arg)->which;
+ break;
+ case VIDIOC_SUBDEV_G_ROUTING:
+ case VIDIOC_SUBDEV_S_ROUTING:
+ which = ((struct v4l2_subdev_routing *)arg)->which;
+ break;
+ }
+
+ return which == V4L2_SUBDEV_FORMAT_TRY ?
+ subdev_fh->state :
+ v4l2_subdev_get_unlocked_active_state(sd);
+}
+
+static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
+ struct v4l2_subdev_state *state)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+ bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
+ bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
+ bool client_supports_streams = subdev_fh->client_caps &
+ V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+ int rval;
+
+ /*
+ * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
+ * Remove this when the API is no longer experimental.
+ */
+ if (!v4l2_subdev_enable_streams_api)
+ streams_subdev = false;
+
+ switch (cmd) {
+ case VIDIOC_SUBDEV_QUERYCAP: {
+ struct v4l2_subdev_capability *cap = arg;
+
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+ cap->version = LINUX_VERSION_CODE;
+ cap->capabilities =
+ (ro_subdev ? V4L2_SUBDEV_CAP_RO_SUBDEV : 0) |
+ (streams_subdev ? V4L2_SUBDEV_CAP_STREAMS : 0);
+
+ return 0;
+ }
+
+ case VIDIOC_QUERYCTRL:
+ /*
+ * TODO: this really should be folded into v4l2_queryctrl (this
+ * currently returns -EINVAL for NULL control handlers).
+ * However, v4l2_queryctrl() is still called directly by
+ * drivers as well and until that has been addressed I believe
+ * it is safer to do the check here. The same is true for the
+ * other control ioctls below.
+ */
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_queryctrl(vfh->ctrl_handler, arg);
+
+ case VIDIOC_QUERY_EXT_CTRL:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
+
+ case VIDIOC_QUERYMENU:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_querymenu(vfh->ctrl_handler, arg);
+
+ case VIDIOC_G_CTRL:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_g_ctrl(vfh->ctrl_handler, arg);
+
+ case VIDIOC_S_CTRL:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
+
+ case VIDIOC_G_EXT_CTRLS:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler,
+ vdev, sd->v4l2_dev->mdev, arg);
+
+ case VIDIOC_S_EXT_CTRLS:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
+ vdev, sd->v4l2_dev->mdev, arg);
+
+ case VIDIOC_TRY_EXT_CTRLS:
+ if (!vfh->ctrl_handler)
+ return -ENOTTY;
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler,
+ vdev, sd->v4l2_dev->mdev, arg);
+
+ case VIDIOC_DQEVENT:
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ case VIDIOC_DBG_G_REGISTER:
+ {
+ struct v4l2_dbg_register *p = arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return v4l2_subdev_call(sd, core, g_register, p);
+ }
+ case VIDIOC_DBG_S_REGISTER:
+ {
+ struct v4l2_dbg_register *p = arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return v4l2_subdev_call(sd, core, s_register, p);
+ }
+ case VIDIOC_DBG_G_CHIP_INFO:
+ {
+ struct v4l2_dbg_chip_info *p = arg;
+
+ if (p->match.type != V4L2_CHIP_MATCH_SUBDEV || p->match.addr)
+ return -EINVAL;
+ if (sd->ops->core && sd->ops->core->s_register)
+ p->flags |= V4L2_CHIP_FL_WRITABLE;
+ if (sd->ops->core && sd->ops->core->g_register)
+ p->flags |= V4L2_CHIP_FL_READABLE;
+ strscpy(p->name, sd->name, sizeof(p->name));
+ return 0;
+ }
+#endif
+
+ case VIDIOC_LOG_STATUS: {
+ int ret;
+
+ pr_info("%s: ================= START STATUS =================\n",
+ sd->name);
+ ret = v4l2_subdev_call(sd, core, log_status);
+ pr_info("%s: ================== END STATUS ==================\n",
+ sd->name);
+ return ret;
+ }
+
+ case VIDIOC_SUBDEV_G_FMT: {
+ struct v4l2_subdev_format *format = arg;
+
+ if (!client_supports_streams)
+ format->stream = 0;
+
+ memset(format->reserved, 0, sizeof(format->reserved));
+ memset(format->format.reserved, 0, sizeof(format->format.reserved));
+ return v4l2_subdev_call(sd, pad, get_fmt, state, format);
+ }
+
+ case VIDIOC_SUBDEV_S_FMT: {
+ struct v4l2_subdev_format *format = arg;
+
+ if (format->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
+ if (!client_supports_streams)
+ format->stream = 0;
+
+ memset(format->reserved, 0, sizeof(format->reserved));
+ memset(format->format.reserved, 0, sizeof(format->format.reserved));
+ return v4l2_subdev_call(sd, pad, set_fmt, state, format);
+ }
+
+ case VIDIOC_SUBDEV_G_CROP: {
+ struct v4l2_subdev_crop *crop = arg;
+ struct v4l2_subdev_selection sel;
+
+ if (!client_supports_streams)
+ crop->stream = 0;
+
+ memset(crop->reserved, 0, sizeof(crop->reserved));
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
+ sel.target = V4L2_SEL_TGT_CROP;
+
+ rval = v4l2_subdev_call(
+ sd, pad, get_selection, state, &sel);
+
+ crop->rect = sel.r;
+
+ return rval;
+ }
+
+ case VIDIOC_SUBDEV_S_CROP: {
+ struct v4l2_subdev_crop *crop = arg;
+ struct v4l2_subdev_selection sel;
+
+ if (crop->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
+ if (!client_supports_streams)
+ crop->stream = 0;
+
+ memset(crop->reserved, 0, sizeof(crop->reserved));
+ memset(&sel, 0, sizeof(sel));
+ sel.which = crop->which;
+ sel.pad = crop->pad;
+ sel.target = V4L2_SEL_TGT_CROP;
+ sel.r = crop->rect;
+
+ rval = v4l2_subdev_call(
+ sd, pad, set_selection, state, &sel);
+
+ crop->rect = sel.r;
+
+ return rval;
+ }
+
+ case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
+ struct v4l2_subdev_mbus_code_enum *code = arg;
+
+ if (!client_supports_streams)
+ code->stream = 0;
+
+ memset(code->reserved, 0, sizeof(code->reserved));
+ return v4l2_subdev_call(sd, pad, enum_mbus_code, state,
+ code);
+ }
+
+ case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
+ struct v4l2_subdev_frame_size_enum *fse = arg;
+
+ if (!client_supports_streams)
+ fse->stream = 0;
+
+ memset(fse->reserved, 0, sizeof(fse->reserved));
+ return v4l2_subdev_call(sd, pad, enum_frame_size, state,
+ fse);
+ }
+
+ case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (!client_supports_streams)
+ fi->stream = 0;
+
+ memset(fi->reserved, 0, sizeof(fi->reserved));
+ return v4l2_subdev_call(sd, video, g_frame_interval, arg);
+ }
+
+ case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval *fi = arg;
+
+ if (ro_subdev)
+ return -EPERM;
+
+ if (!client_supports_streams)
+ fi->stream = 0;
+
+ memset(fi->reserved, 0, sizeof(fi->reserved));
+ return v4l2_subdev_call(sd, video, s_frame_interval, arg);
+ }
+
+ case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
+ struct v4l2_subdev_frame_interval_enum *fie = arg;
+
+ if (!client_supports_streams)
+ fie->stream = 0;
+
+ memset(fie->reserved, 0, sizeof(fie->reserved));
+ return v4l2_subdev_call(sd, pad, enum_frame_interval, state,
+ fie);
+ }
+
+ case VIDIOC_SUBDEV_G_SELECTION: {
+ struct v4l2_subdev_selection *sel = arg;
+
+ if (!client_supports_streams)
+ sel->stream = 0;
+
+ memset(sel->reserved, 0, sizeof(sel->reserved));
+ return v4l2_subdev_call(
+ sd, pad, get_selection, state, sel);
+ }
+
+ case VIDIOC_SUBDEV_S_SELECTION: {
+ struct v4l2_subdev_selection *sel = arg;
+
+ if (sel->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
+ if (!client_supports_streams)
+ sel->stream = 0;
+
+ memset(sel->reserved, 0, sizeof(sel->reserved));
+ return v4l2_subdev_call(
+ sd, pad, set_selection, state, sel);
+ }
+
+ case VIDIOC_G_EDID: {
+ struct v4l2_subdev_edid *edid = arg;
+
+ return v4l2_subdev_call(sd, pad, get_edid, edid);
+ }
+
+ case VIDIOC_S_EDID: {
+ struct v4l2_subdev_edid *edid = arg;
+
+ return v4l2_subdev_call(sd, pad, set_edid, edid);
+ }
+
+ case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
+ struct v4l2_dv_timings_cap *cap = arg;
+
+ return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
+ }
+
+ case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
+ struct v4l2_enum_dv_timings *dvt = arg;
+
+ return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
+ }
+
+ case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
+ return v4l2_subdev_call(sd, video, query_dv_timings, arg);
+
+ case VIDIOC_SUBDEV_G_DV_TIMINGS:
+ return v4l2_subdev_call(sd, video, g_dv_timings, arg);
+
+ case VIDIOC_SUBDEV_S_DV_TIMINGS:
+ if (ro_subdev)
+ return -EPERM;
+
+ return v4l2_subdev_call(sd, video, s_dv_timings, arg);
+
+ case VIDIOC_SUBDEV_G_STD:
+ return v4l2_subdev_call(sd, video, g_std, arg);
+
+ case VIDIOC_SUBDEV_S_STD: {
+ v4l2_std_id *std = arg;
+
+ if (ro_subdev)
+ return -EPERM;
+
+ return v4l2_subdev_call(sd, video, s_std, *std);
+ }
+
+ case VIDIOC_SUBDEV_ENUMSTD: {
+ struct v4l2_standard *p = arg;
+ v4l2_std_id id;
+
+ if (v4l2_subdev_call(sd, video, g_tvnorms, &id))
+ return -EINVAL;
+
+ return v4l_video_std_enumstd(p, id);
+ }
+
+ case VIDIOC_SUBDEV_QUERYSTD:
+ return v4l2_subdev_call(sd, video, querystd, arg);
+
+ case VIDIOC_SUBDEV_G_ROUTING: {
+ struct v4l2_subdev_routing *routing = arg;
+ struct v4l2_subdev_krouting *krouting;
+
+ if (!v4l2_subdev_enable_streams_api)
+ return -ENOIOCTLCMD;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
+ return -ENOIOCTLCMD;
+
+ memset(routing->reserved, 0, sizeof(routing->reserved));
+
+ krouting = &state->routing;
+
+ if (routing->num_routes < krouting->num_routes) {
+ routing->num_routes = krouting->num_routes;
+ return -ENOSPC;
+ }
+
+ memcpy((struct v4l2_subdev_route *)(uintptr_t)routing->routes,
+ krouting->routes,
+ krouting->num_routes * sizeof(*krouting->routes));
+ routing->num_routes = krouting->num_routes;
+
+ return 0;
+ }
+
+ case VIDIOC_SUBDEV_S_ROUTING: {
+ struct v4l2_subdev_routing *routing = arg;
+ struct v4l2_subdev_route *routes =
+ (struct v4l2_subdev_route *)(uintptr_t)routing->routes;
+ struct v4l2_subdev_krouting krouting = {};
+ unsigned int i;
+
+ if (!v4l2_subdev_enable_streams_api)
+ return -ENOIOCTLCMD;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
+ return -ENOIOCTLCMD;
+
+ if (routing->which != V4L2_SUBDEV_FORMAT_TRY && ro_subdev)
+ return -EPERM;
+
+ memset(routing->reserved, 0, sizeof(routing->reserved));
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ const struct v4l2_subdev_route *route = &routes[i];
+ const struct media_pad *pads = sd->entity.pads;
+
+ if (route->sink_stream > V4L2_SUBDEV_MAX_STREAM_ID ||
+ route->source_stream > V4L2_SUBDEV_MAX_STREAM_ID)
+ return -EINVAL;
+
+ if (route->sink_pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(pads[route->sink_pad].flags &
+ MEDIA_PAD_FL_SINK))
+ return -EINVAL;
+
+ if (route->source_pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!(pads[route->source_pad].flags &
+ MEDIA_PAD_FL_SOURCE))
+ return -EINVAL;
+ }
+
+ krouting.num_routes = routing->num_routes;
+ krouting.routes = routes;
+
+ return v4l2_subdev_call(sd, pad, set_routing, state,
+ routing->which, &krouting);
+ }
+
+ case VIDIOC_SUBDEV_G_CLIENT_CAP: {
+ struct v4l2_subdev_client_capability *client_cap = arg;
+
+ client_cap->capabilities = subdev_fh->client_caps;
+
+ return 0;
+ }
+
+ case VIDIOC_SUBDEV_S_CLIENT_CAP: {
+ struct v4l2_subdev_client_capability *client_cap = arg;
+
+ /*
+ * Clear V4L2_SUBDEV_CLIENT_CAP_STREAMS if streams API is not
+ * enabled. Remove this when streams API is no longer
+ * experimental.
+ */
+ if (!v4l2_subdev_enable_streams_api)
+ client_cap->capabilities &= ~V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+
+ /* Filter out unsupported capabilities */
+ client_cap->capabilities &= V4L2_SUBDEV_CLIENT_CAP_STREAMS;
+
+ subdev_fh->client_caps = client_cap->capabilities;
+
+ return 0;
+ }
+
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+
+ return 0;
+}
+
+static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->lock;
+ long ret = -ENODEV;
+
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+
+ if (video_is_registered(vdev)) {
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+ struct v4l2_subdev_state *state;
+
+ state = subdev_ioctl_get_state(sd, subdev_fh, cmd, arg);
+
+ if (state)
+ v4l2_subdev_lock_state(state);
+
+ ret = subdev_do_ioctl(file, cmd, arg, state);
+
+ if (state)
+ v4l2_subdev_unlock_state(state);
+ }
+
+ if (lock)
+ mutex_unlock(lock);
+ return ret;
+}
+
+static long subdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, subdev_do_ioctl_lock);
+}
+
+#ifdef CONFIG_COMPAT
+static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
+}
+#endif
+
+#else /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+static long subdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENODEV;
+}
+
+#ifdef CONFIG_COMPAT
+static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENODEV;
+}
+#endif
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
+static __poll_t subdev_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *fh = file->private_data;
+
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return EPOLLERR;
+
+ poll_wait(file, &fh->wait, wait);
+
+ if (v4l2_event_pending(fh))
+ return EPOLLPRI;
+
+ return 0;
+}
+
+const struct v4l2_file_operations v4l2_subdev_fops = {
+ .owner = THIS_MODULE,
+ .open = subdev_open,
+ .unlocked_ioctl = subdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = subdev_compat_ioctl32,
+#endif
+ .release = subdev_close,
+ .poll = subdev_poll,
+};
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+
+int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity,
+ struct fwnode_endpoint *endpoint)
+{
+ struct fwnode_handle *fwnode;
+ struct v4l2_subdev *sd;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ return -EINVAL;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ fwnode = fwnode_graph_get_port_parent(endpoint->local_fwnode);
+ fwnode_handle_put(fwnode);
+
+ if (device_match_fwnode(sd->dev, fwnode))
+ return endpoint->port;
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_fwnode_pad_1_to_1);
+
+int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ bool pass = true;
+
+ /* The width, height and code must match. */
+ if (source_fmt->format.width != sink_fmt->format.width) {
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: width does not match (source %u, sink %u)\n",
+ __func__,
+ source_fmt->format.width, sink_fmt->format.width);
+ pass = false;
+ }
+
+ if (source_fmt->format.height != sink_fmt->format.height) {
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: height does not match (source %u, sink %u)\n",
+ __func__,
+ source_fmt->format.height, sink_fmt->format.height);
+ pass = false;
+ }
+
+ if (source_fmt->format.code != sink_fmt->format.code) {
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: media bus code does not match (source 0x%8.8x, sink 0x%8.8x)\n",
+ __func__,
+ source_fmt->format.code, sink_fmt->format.code);
+ pass = false;
+ }
+
+ /* The field order must match, or the sink field order must be NONE
+ * to support interlaced hardware connected to bridges that support
+ * progressive formats only.
+ */
+ if (source_fmt->format.field != sink_fmt->format.field &&
+ sink_fmt->format.field != V4L2_FIELD_NONE) {
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: field does not match (source %u, sink %u)\n",
+ __func__,
+ source_fmt->format.field, sink_fmt->format.field);
+ pass = false;
+ }
+
+ if (pass)
+ return 0;
+
+ dev_dbg(sd->entity.graph_obj.mdev->dev,
+ "%s: link was \"%s\":%u -> \"%s\":%u\n", __func__,
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+
+ return -EPIPE;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
+
+static int
+v4l2_subdev_link_validate_get_format(struct media_pad *pad, u32 stream,
+ struct v4l2_subdev_format *fmt,
+ bool states_locked)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ if (!is_media_entity_v4l2_subdev(pad->entity)) {
+ WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
+ "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
+ pad->entity->function, pad->entity->name);
+
+ return -EINVAL;
+ }
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+ fmt->stream = stream;
+
+ if (states_locked)
+ state = v4l2_subdev_get_locked_active_state(sd);
+ else
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ ret = v4l2_subdev_call(sd, pad, get_fmt, state, fmt);
+
+ if (!states_locked && state)
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+
+static void __v4l2_link_validate_get_streams(struct media_pad *pad,
+ u64 *streams_mask,
+ bool states_locked)
+{
+ struct v4l2_subdev_route *route;
+ struct v4l2_subdev_state *state;
+ struct v4l2_subdev *subdev;
+
+ subdev = media_entity_to_v4l2_subdev(pad->entity);
+
+ *streams_mask = 0;
+
+ if (states_locked)
+ state = v4l2_subdev_get_locked_active_state(subdev);
+ else
+ state = v4l2_subdev_lock_and_get_active_state(subdev);
+
+ if (WARN_ON(!state))
+ return;
+
+ for_each_active_route(&state->routing, route) {
+ u32 route_pad;
+ u32 route_stream;
+
+ if (pad->flags & MEDIA_PAD_FL_SOURCE) {
+ route_pad = route->source_pad;
+ route_stream = route->source_stream;
+ } else {
+ route_pad = route->sink_pad;
+ route_stream = route->sink_stream;
+ }
+
+ if (route_pad != pad->index)
+ continue;
+
+ *streams_mask |= BIT_ULL(route_stream);
+ }
+
+ if (!states_locked)
+ v4l2_subdev_unlock_state(state);
+}
+
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
+static void v4l2_link_validate_get_streams(struct media_pad *pad,
+ u64 *streams_mask,
+ bool states_locked)
+{
+ struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(pad->entity);
+
+ if (!(subdev->flags & V4L2_SUBDEV_FL_STREAMS)) {
+ /* Non-streams subdevs have an implicit stream 0 */
+ *streams_mask = BIT_ULL(0);
+ return;
+ }
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+ __v4l2_link_validate_get_streams(pad, streams_mask, states_locked);
+#else
+ /* This shouldn't happen */
+ *streams_mask = 0;
+#endif
+}
+
+static int v4l2_subdev_link_validate_locked(struct media_link *link, bool states_locked)
+{
+ struct v4l2_subdev *sink_subdev =
+ media_entity_to_v4l2_subdev(link->sink->entity);
+ struct device *dev = sink_subdev->entity.graph_obj.mdev->dev;
+ u64 source_streams_mask;
+ u64 sink_streams_mask;
+ u64 dangling_sink_streams;
+ u32 stream;
+ int ret;
+
+ dev_dbg(dev, "validating link \"%s\":%u -> \"%s\":%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+
+ v4l2_link_validate_get_streams(link->source, &source_streams_mask, states_locked);
+ v4l2_link_validate_get_streams(link->sink, &sink_streams_mask, states_locked);
+
+ /*
+ * It is ok to have more source streams than sink streams as extra
+ * source streams can just be ignored by the receiver, but having extra
+ * sink streams is an error as streams must have a source.
+ */
+ dangling_sink_streams = (source_streams_mask ^ sink_streams_mask) &
+ sink_streams_mask;
+ if (dangling_sink_streams) {
+ dev_err(dev, "Dangling sink streams: mask %#llx\n",
+ dangling_sink_streams);
+ return -EINVAL;
+ }
+
+ /* Validate source and sink stream formats */
+
+ for (stream = 0; stream < sizeof(sink_streams_mask) * 8; ++stream) {
+ struct v4l2_subdev_format sink_fmt, source_fmt;
+
+ if (!(sink_streams_mask & BIT_ULL(stream)))
+ continue;
+
+ dev_dbg(dev, "validating stream \"%s\":%u:%u -> \"%s\":%u:%u\n",
+ link->source->entity->name, link->source->index, stream,
+ link->sink->entity->name, link->sink->index, stream);
+
+ ret = v4l2_subdev_link_validate_get_format(link->source, stream,
+ &source_fmt, states_locked);
+ if (ret < 0) {
+ dev_dbg(dev,
+ "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
+ link->source->entity->name, link->source->index,
+ stream);
+ continue;
+ }
+
+ ret = v4l2_subdev_link_validate_get_format(link->sink, stream,
+ &sink_fmt, states_locked);
+ if (ret < 0) {
+ dev_dbg(dev,
+ "Failed to get format for \"%s\":%u:%u (but that's ok)\n",
+ link->sink->entity->name, link->sink->index,
+ stream);
+ continue;
+ }
+
+ /* TODO: add stream number to link_validate() */
+ ret = v4l2_subdev_call(sink_subdev, pad, link_validate, link,
+ &source_fmt, &sink_fmt);
+ if (!ret)
+ continue;
+
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ ret = v4l2_subdev_link_validate_default(sink_subdev, link,
+ &source_fmt, &sink_fmt);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int v4l2_subdev_link_validate(struct media_link *link)
+{
+ struct v4l2_subdev *source_sd, *sink_sd;
+ struct v4l2_subdev_state *source_state, *sink_state;
+ bool states_locked;
+ int ret;
+
+ if (!is_media_entity_v4l2_subdev(link->sink->entity) ||
+ !is_media_entity_v4l2_subdev(link->source->entity)) {
+ pr_warn_once("%s of link '%s':%u->'%s':%u is not a V4L2 sub-device, driver bug!\n",
+ !is_media_entity_v4l2_subdev(link->sink->entity) ?
+ "sink" : "source",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+ return 0;
+ }
+
+ sink_sd = media_entity_to_v4l2_subdev(link->sink->entity);
+ source_sd = media_entity_to_v4l2_subdev(link->source->entity);
+
+ sink_state = v4l2_subdev_get_unlocked_active_state(sink_sd);
+ source_state = v4l2_subdev_get_unlocked_active_state(source_sd);
+
+ states_locked = sink_state && source_state;
+
+ if (states_locked) {
+ v4l2_subdev_lock_state(sink_state);
+ v4l2_subdev_lock_state(source_state);
+ }
+
+ ret = v4l2_subdev_link_validate_locked(link, states_locked);
+
+ if (states_locked) {
+ v4l2_subdev_unlock_state(sink_state);
+ v4l2_subdev_unlock_state(source_state);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
+
+bool v4l2_subdev_has_pad_interdep(struct media_entity *entity,
+ unsigned int pad0, unsigned int pad1)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct v4l2_subdev_krouting *routing;
+ struct v4l2_subdev_state *state;
+ unsigned int i;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ routing = &state->routing;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ if ((route->sink_pad == pad0 && route->source_pad == pad1) ||
+ (route->source_pad == pad0 && route->sink_pad == pad1)) {
+ v4l2_subdev_unlock_state(state);
+ return true;
+ }
+ }
+
+ v4l2_subdev_unlock_state(state);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_has_pad_interdep);
+
+struct v4l2_subdev_state *
+__v4l2_subdev_state_alloc(struct v4l2_subdev *sd, const char *lock_name,
+ struct lock_class_key *lock_key)
+{
+ struct v4l2_subdev_state *state;
+ int ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ __mutex_init(&state->_lock, lock_name, lock_key);
+ if (sd->state_lock)
+ state->lock = sd->state_lock;
+ else
+ state->lock = &state->_lock;
+
+ /* Drivers that support streams do not need the legacy pad config */
+ if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS) && sd->entity.num_pads) {
+ state->pads = kvcalloc(sd->entity.num_pads,
+ sizeof(*state->pads), GFP_KERNEL);
+ if (!state->pads) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ /*
+ * There can be no race at this point, but we lock the state anyway to
+ * satisfy lockdep checks.
+ */
+ v4l2_subdev_lock_state(state);
+ ret = v4l2_subdev_call(sd, pad, init_cfg, state);
+ v4l2_subdev_unlock_state(state);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto err;
+
+ return state;
+
+err:
+ if (state && state->pads)
+ kvfree(state->pads);
+
+ kfree(state);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_alloc);
+
+void __v4l2_subdev_state_free(struct v4l2_subdev_state *state)
+{
+ if (!state)
+ return;
+
+ mutex_destroy(&state->_lock);
+
+ kfree(state->routing.routes);
+ kvfree(state->stream_configs.configs);
+ kvfree(state->pads);
+ kfree(state);
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_state_free);
+
+int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
+ struct lock_class_key *key)
+{
+ struct v4l2_subdev_state *state;
+
+ state = __v4l2_subdev_state_alloc(sd, name, key);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ sd->active_state = state;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
+
+void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
+
+ __v4l2_subdev_state_free(sd->active_state);
+ sd->active_state = NULL;
+
+ if (list_empty(&sd->async_subdev_endpoint_list))
+ return;
+
+ list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
+ async_subdev_endpoint_entry) {
+ list_del(&ase->async_subdev_endpoint_entry);
+
+ kfree(ase);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
+
+#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
+
+static int
+v4l2_subdev_init_stream_configs(struct v4l2_subdev_stream_configs *stream_configs,
+ const struct v4l2_subdev_krouting *routing)
+{
+ struct v4l2_subdev_stream_configs new_configs = { 0 };
+ struct v4l2_subdev_route *route;
+ u32 idx;
+
+ /* Count number of formats needed */
+ for_each_active_route(routing, route) {
+ /*
+ * Each route needs a format on both ends of the route.
+ */
+ new_configs.num_configs += 2;
+ }
+
+ if (new_configs.num_configs) {
+ new_configs.configs = kvcalloc(new_configs.num_configs,
+ sizeof(*new_configs.configs),
+ GFP_KERNEL);
+
+ if (!new_configs.configs)
+ return -ENOMEM;
+ }
+
+ /*
+ * Fill in the 'pad' and stream' value for each item in the array from
+ * the routing table
+ */
+ idx = 0;
+
+ for_each_active_route(routing, route) {
+ new_configs.configs[idx].pad = route->sink_pad;
+ new_configs.configs[idx].stream = route->sink_stream;
+
+ idx++;
+
+ new_configs.configs[idx].pad = route->source_pad;
+ new_configs.configs[idx].stream = route->source_stream;
+
+ idx++;
+ }
+
+ kvfree(stream_configs->configs);
+ *stream_configs = new_configs;
+
+ return 0;
+}
+
+int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (sd->flags & V4L2_SUBDEV_FL_STREAMS)
+ fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
+ format->stream);
+ else if (format->pad < sd->entity.num_pads && format->stream == 0)
+ fmt = v4l2_subdev_get_pad_format(sd, state, format->pad);
+ else
+ fmt = NULL;
+
+ if (!fmt)
+ return -EINVAL;
+
+ format->format = *fmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_fmt);
+
+int v4l2_subdev_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ const struct v4l2_subdev_krouting *routing)
+{
+ struct v4l2_subdev_krouting *dst = &state->routing;
+ const struct v4l2_subdev_krouting *src = routing;
+ struct v4l2_subdev_krouting new_routing = { 0 };
+ size_t bytes;
+ int r;
+
+ if (unlikely(check_mul_overflow((size_t)src->num_routes,
+ sizeof(*src->routes), &bytes)))
+ return -EOVERFLOW;
+
+ lockdep_assert_held(state->lock);
+
+ if (src->num_routes > 0) {
+ new_routing.routes = kmemdup(src->routes, bytes, GFP_KERNEL);
+ if (!new_routing.routes)
+ return -ENOMEM;
+ }
+
+ new_routing.num_routes = src->num_routes;
+
+ r = v4l2_subdev_init_stream_configs(&state->stream_configs,
+ &new_routing);
+ if (r) {
+ kfree(new_routing.routes);
+ return r;
+ }
+
+ kfree(dst->routes);
+ *dst = new_routing;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing);
+
+struct v4l2_subdev_route *
+__v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
+ struct v4l2_subdev_route *route)
+{
+ if (route)
+ ++route;
+ else
+ route = &routing->routes[0];
+
+ for (; route < routing->routes + routing->num_routes; ++route) {
+ if (!(route->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
+ continue;
+
+ return route;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
+
+int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ const struct v4l2_subdev_krouting *routing,
+ const struct v4l2_mbus_framefmt *fmt)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+ int ret;
+
+ ret = v4l2_subdev_set_routing(sd, state, routing);
+ if (ret)
+ return ret;
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i)
+ stream_configs->configs[i].fmt = *fmt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_set_routing_with_fmt);
+
+struct v4l2_mbus_framefmt *
+v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].fmt;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_format);
+
+struct v4l2_rect *
+v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].crop;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_crop);
+
+struct v4l2_rect *
+v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream)
+{
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+
+ lockdep_assert_held(state->lock);
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; ++i) {
+ if (stream_configs->configs[i].pad == pad &&
+ stream_configs->configs[i].stream == stream)
+ return &stream_configs->configs[i].compose;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_stream_compose);
+
+int v4l2_subdev_routing_find_opposite_end(const struct v4l2_subdev_krouting *routing,
+ u32 pad, u32 stream, u32 *other_pad,
+ u32 *other_stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ struct v4l2_subdev_route *route = &routing->routes[i];
+
+ if (route->source_pad == pad &&
+ route->source_stream == stream) {
+ if (other_pad)
+ *other_pad = route->sink_pad;
+ if (other_stream)
+ *other_stream = route->sink_stream;
+ return 0;
+ }
+
+ if (route->sink_pad == pad && route->sink_stream == stream) {
+ if (other_pad)
+ *other_pad = route->source_pad;
+ if (other_stream)
+ *other_stream = route->source_stream;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_routing_find_opposite_end);
+
+struct v4l2_mbus_framefmt *
+v4l2_subdev_state_get_opposite_stream_format(struct v4l2_subdev_state *state,
+ u32 pad, u32 stream)
+{
+ u32 other_pad, other_stream;
+ int ret;
+
+ ret = v4l2_subdev_routing_find_opposite_end(&state->routing,
+ pad, stream,
+ &other_pad, &other_stream);
+ if (ret)
+ return NULL;
+
+ return v4l2_subdev_state_get_stream_format(state, other_pad,
+ other_stream);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_state_get_opposite_stream_format);
+
+u64 v4l2_subdev_state_xlate_streams(const struct v4l2_subdev_state *state,
+ u32 pad0, u32 pad1, u64 *streams)
+{
+ const struct v4l2_subdev_krouting *routing = &state->routing;
+ struct v4l2_subdev_route *route;
+ u64 streams0 = 0;
+ u64 streams1 = 0;
+
+ for_each_active_route(routing, route) {
+ if (route->sink_pad == pad0 && route->source_pad == pad1 &&
+ (*streams & BIT_ULL(route->sink_stream))) {
+ streams0 |= BIT_ULL(route->sink_stream);
+ streams1 |= BIT_ULL(route->source_stream);
+ }
+ if (route->source_pad == pad0 && route->sink_pad == pad1 &&
+ (*streams & BIT_ULL(route->source_stream))) {
+ streams0 |= BIT_ULL(route->source_stream);
+ streams1 |= BIT_ULL(route->sink_stream);
+ }
+ }
+
+ *streams = streams0;
+ return streams1;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_state_xlate_streams);
+
+int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
+ const struct v4l2_subdev_krouting *routing,
+ enum v4l2_subdev_routing_restriction disallow)
+{
+ u32 *remote_pads = NULL;
+ unsigned int i, j;
+ int ret = -EINVAL;
+
+ if (disallow & (V4L2_SUBDEV_ROUTING_NO_STREAM_MIX |
+ V4L2_SUBDEV_ROUTING_NO_MULTIPLEXING)) {
+ remote_pads = kcalloc(sd->entity.num_pads, sizeof(*remote_pads),
+ GFP_KERNEL);
+ if (!remote_pads)
+ return -ENOMEM;
+
+ for (i = 0; i < sd->entity.num_pads; ++i)
+ remote_pads[i] = U32_MAX;
+ }
+
+ for (i = 0; i < routing->num_routes; ++i) {
+ const struct v4l2_subdev_route *route = &routing->routes[i];
+
+ /* Validate the sink and source pad numbers. */
+ if (route->sink_pad >= sd->entity.num_pads ||
+ !(sd->entity.pads[route->sink_pad].flags & MEDIA_PAD_FL_SINK)) {
+ dev_dbg(sd->dev, "route %u sink (%u) is not a sink pad\n",
+ i, route->sink_pad);
+ goto out;
+ }
+
+ if (route->source_pad >= sd->entity.num_pads ||
+ !(sd->entity.pads[route->source_pad].flags & MEDIA_PAD_FL_SOURCE)) {
+ dev_dbg(sd->dev, "route %u source (%u) is not a source pad\n",
+ i, route->source_pad);
+ goto out;
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX: all streams from a
+ * sink pad must be routed to a single source pad.
+ */
+ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX) {
+ if (remote_pads[route->sink_pad] != U32_MAX &&
+ remote_pads[route->sink_pad] != route->source_pad) {
+ dev_dbg(sd->dev,
+ "route %u attempts to mix %s streams\n",
+ i, "sink");
+ goto out;
+ }
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX: all streams on a
+ * source pad must originate from a single sink pad.
+ */
+ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_STREAM_MIX) {
+ if (remote_pads[route->source_pad] != U32_MAX &&
+ remote_pads[route->source_pad] != route->sink_pad) {
+ dev_dbg(sd->dev,
+ "route %u attempts to mix %s streams\n",
+ i, "source");
+ goto out;
+ }
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING: Pads on the sink
+ * side can not do stream multiplexing, i.e. there can be only
+ * a single stream in a sink pad.
+ */
+ if (disallow & V4L2_SUBDEV_ROUTING_NO_SINK_MULTIPLEXING) {
+ if (remote_pads[route->sink_pad] != U32_MAX) {
+ dev_dbg(sd->dev,
+ "route %u attempts to multiplex on %s pad %u\n",
+ i, "sink", route->sink_pad);
+ goto out;
+ }
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING: Pads on the
+ * source side can not do stream multiplexing, i.e. there can
+ * be only a single stream in a source pad.
+ */
+ if (disallow & V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING) {
+ if (remote_pads[route->source_pad] != U32_MAX) {
+ dev_dbg(sd->dev,
+ "route %u attempts to multiplex on %s pad %u\n",
+ i, "source", route->source_pad);
+ goto out;
+ }
+ }
+
+ if (remote_pads) {
+ remote_pads[route->sink_pad] = route->source_pad;
+ remote_pads[route->source_pad] = route->sink_pad;
+ }
+
+ for (j = i + 1; j < routing->num_routes; ++j) {
+ const struct v4l2_subdev_route *r = &routing->routes[j];
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_1_TO_N: No two routes can
+ * originate from the same (sink) stream.
+ */
+ if ((disallow & V4L2_SUBDEV_ROUTING_NO_1_TO_N) &&
+ route->sink_pad == r->sink_pad &&
+ route->sink_stream == r->sink_stream) {
+ dev_dbg(sd->dev,
+ "routes %u and %u originate from same sink (%u/%u)\n",
+ i, j, route->sink_pad,
+ route->sink_stream);
+ goto out;
+ }
+
+ /*
+ * V4L2_SUBDEV_ROUTING_NO_N_TO_1: No two routes can end
+ * at the same (source) stream.
+ */
+ if ((disallow & V4L2_SUBDEV_ROUTING_NO_N_TO_1) &&
+ route->source_pad == r->source_pad &&
+ route->source_stream == r->source_stream) {
+ dev_dbg(sd->dev,
+ "routes %u and %u end at same source (%u/%u)\n",
+ i, j, route->source_pad,
+ route->source_stream);
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+
+out:
+ kfree(remote_pads);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_routing_validate);
+
+static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask)
+{
+ struct device *dev = sd->entity.graph_obj.mdev->dev;
+ unsigned int i;
+ int ret;
+
+ /*
+ * The subdev doesn't implement pad-based stream enable, fall back
+ * on the .s_stream() operation. This can only be done for subdevs that
+ * have a single source pad, as sd->enabled_streams is global to the
+ * subdev.
+ */
+ if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < sd->entity.num_pads; ++i) {
+ if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ return -EOPNOTSUPP;
+ }
+
+ if (sd->enabled_streams & streams_mask) {
+ dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n",
+ streams_mask, sd->entity.name, pad);
+ return -EALREADY;
+ }
+
+ /* Start streaming when the first streams are enabled. */
+ if (!sd->enabled_streams) {
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret)
+ return ret;
+ }
+
+ sd->enabled_streams |= streams_mask;
+
+ return 0;
+}
+
+int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask)
+{
+ struct device *dev = sd->entity.graph_obj.mdev->dev;
+ struct v4l2_subdev_state *state;
+ u64 found_streams = 0;
+ unsigned int i;
+ int ret;
+
+ /* A few basic sanity checks first. */
+ if (pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!streams_mask)
+ return 0;
+
+ /* Fallback on .s_stream() if .enable_streams() isn't available. */
+ if (!sd->ops->pad || !sd->ops->pad->enable_streams)
+ return v4l2_subdev_enable_streams_fallback(sd, pad,
+ streams_mask);
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ /*
+ * Verify that the requested streams exist and that they are not
+ * already enabled.
+ */
+ for (i = 0; i < state->stream_configs.num_configs; ++i) {
+ struct v4l2_subdev_stream_config *cfg =
+ &state->stream_configs.configs[i];
+
+ if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
+ continue;
+
+ found_streams |= BIT_ULL(cfg->stream);
+
+ if (cfg->enabled) {
+ dev_dbg(dev, "stream %u already enabled on %s:%u\n",
+ cfg->stream, sd->entity.name, pad);
+ ret = -EALREADY;
+ goto done;
+ }
+ }
+
+ if (found_streams != streams_mask) {
+ dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
+ streams_mask & ~found_streams, sd->entity.name, pad);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask);
+
+ /* Call the .enable_streams() operation. */
+ ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
+ streams_mask);
+ if (ret) {
+ dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
+ streams_mask, ret);
+ goto done;
+ }
+
+ /* Mark the streams as enabled. */
+ for (i = 0; i < state->stream_configs.num_configs; ++i) {
+ struct v4l2_subdev_stream_config *cfg =
+ &state->stream_configs.configs[i];
+
+ if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
+ cfg->enabled = true;
+ }
+
+done:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_enable_streams);
+
+static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask)
+{
+ struct device *dev = sd->entity.graph_obj.mdev->dev;
+ unsigned int i;
+ int ret;
+
+ /*
+ * If the subdev doesn't implement pad-based stream enable, fall back
+ * on the .s_stream() operation. This can only be done for subdevs that
+ * have a single source pad, as sd->enabled_streams is global to the
+ * subdev.
+ */
+ if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < sd->entity.num_pads; ++i) {
+ if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ return -EOPNOTSUPP;
+ }
+
+ if ((sd->enabled_streams & streams_mask) != streams_mask) {
+ dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n",
+ streams_mask, sd->entity.name, pad);
+ return -EALREADY;
+ }
+
+ /* Stop streaming when the last streams are disabled. */
+ if (!(sd->enabled_streams & ~streams_mask)) {
+ ret = v4l2_subdev_call(sd, video, s_stream, 0);
+ if (ret)
+ return ret;
+ }
+
+ sd->enabled_streams &= ~streams_mask;
+
+ return 0;
+}
+
+int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
+ u64 streams_mask)
+{
+ struct device *dev = sd->entity.graph_obj.mdev->dev;
+ struct v4l2_subdev_state *state;
+ u64 found_streams = 0;
+ unsigned int i;
+ int ret;
+
+ /* A few basic sanity checks first. */
+ if (pad >= sd->entity.num_pads)
+ return -EINVAL;
+
+ if (!streams_mask)
+ return 0;
+
+ /* Fallback on .s_stream() if .disable_streams() isn't available. */
+ if (!sd->ops->pad || !sd->ops->pad->disable_streams)
+ return v4l2_subdev_disable_streams_fallback(sd, pad,
+ streams_mask);
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ /*
+ * Verify that the requested streams exist and that they are not
+ * already disabled.
+ */
+ for (i = 0; i < state->stream_configs.num_configs; ++i) {
+ struct v4l2_subdev_stream_config *cfg =
+ &state->stream_configs.configs[i];
+
+ if (cfg->pad != pad || !(streams_mask & BIT_ULL(cfg->stream)))
+ continue;
+
+ found_streams |= BIT_ULL(cfg->stream);
+
+ if (!cfg->enabled) {
+ dev_dbg(dev, "stream %u already disabled on %s:%u\n",
+ cfg->stream, sd->entity.name, pad);
+ ret = -EALREADY;
+ goto done;
+ }
+ }
+
+ if (found_streams != streams_mask) {
+ dev_dbg(dev, "streams 0x%llx not found on %s:%u\n",
+ streams_mask & ~found_streams, sd->entity.name, pad);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask);
+
+ /* Call the .disable_streams() operation. */
+ ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
+ streams_mask);
+ if (ret) {
+ dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
+ streams_mask, ret);
+ goto done;
+ }
+
+ /* Mark the streams as disabled. */
+ for (i = 0; i < state->stream_configs.num_configs; ++i) {
+ struct v4l2_subdev_stream_config *cfg =
+ &state->stream_configs.configs[i];
+
+ if (cfg->pad == pad && (streams_mask & BIT_ULL(cfg->stream)))
+ cfg->enabled = false;
+ }
+
+done:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_disable_streams);
+
+int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable)
+{
+ struct v4l2_subdev_state *state;
+ struct v4l2_subdev_route *route;
+ struct media_pad *pad;
+ u64 source_mask = 0;
+ int pad_index = -1;
+
+ /*
+ * Find the source pad. This helper is meant for subdevs that have a
+ * single source pad, so failures shouldn't happen, but catch them
+ * loudly nonetheless as they indicate a driver bug.
+ */
+ media_entity_for_each_pad(&sd->entity, pad) {
+ if (pad->flags & MEDIA_PAD_FL_SOURCE) {
+ pad_index = pad->index;
+ break;
+ }
+ }
+
+ if (WARN_ON(pad_index == -1))
+ return -EINVAL;
+
+ /*
+ * As there's a single source pad, just collect all the source streams.
+ */
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ for_each_active_route(&state->routing, route)
+ source_mask |= BIT_ULL(route->source_stream);
+
+ v4l2_subdev_unlock_state(state);
+
+ if (enable)
+ return v4l2_subdev_enable_streams(sd, pad_index, source_mask);
+ else
+ return v4l2_subdev_disable_streams(sd, pad_index, source_mask);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_s_stream_helper);
+
+#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
+
+void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
+{
+ INIT_LIST_HEAD(&sd->list);
+ BUG_ON(!ops);
+ sd->ops = ops;
+ sd->v4l2_dev = NULL;
+ sd->flags = 0;
+ sd->name[0] = '\0';
+ sd->grp_id = 0;
+ sd->dev_priv = NULL;
+ sd->host_priv = NULL;
+ sd->privacy_led = NULL;
+ INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ sd->entity.name = sd->name;
+ sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
+ sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+#endif
+}
+EXPORT_SYMBOL(v4l2_subdev_init);
+
+void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
+ const struct v4l2_event *ev)
+{
+ v4l2_event_queue(sd->devnode, ev);
+ v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
+
+int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
+{
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ sd->privacy_led = led_get(sd->dev, "privacy-led");
+ if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
+ return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
+ "getting privacy LED\n");
+
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ mutex_lock(&sd->privacy_led->led_access);
+ led_sysfs_disable(sd->privacy_led);
+ led_trigger_remove(sd->privacy_led);
+ led_set_brightness(sd->privacy_led, 0);
+ mutex_unlock(&sd->privacy_led->led_access);
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
+
+void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
+{
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ mutex_lock(&sd->privacy_led->led_access);
+ led_sysfs_enable(sd->privacy_led);
+ mutex_unlock(&sd->privacy_led->led_access);
+ led_put(sd->privacy_led);
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
diff --git a/drivers/media/v4l2-core/v4l2-trace.c b/drivers/media/v4l2-core/v4l2-trace.c
new file mode 100644
index 0000000000..95f3b02e1f
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <media/v4l2-common.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/v4l2.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_buf_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_dqbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_v4l2_qbuf);
diff --git a/drivers/media/v4l2-core/v4l2-vp9.c b/drivers/media/v4l2-core/v4l2-vp9.c
new file mode 100644
index 0000000000..859589f1fd
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-vp9.c
@@ -0,0 +1,1850 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 VP9 helpers.
+ *
+ * Copyright (C) 2021 Collabora, Ltd.
+ *
+ * Author: Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+ */
+
+#include <linux/module.h>
+
+#include <media/v4l2-vp9.h>
+
+const u8 v4l2_vp9_kf_y_mode_prob[10][10][9] = {
+ {
+ /* above = dc */
+ { 137, 30, 42, 148, 151, 207, 70, 52, 91 }, /*left = dc */
+ { 92, 45, 102, 136, 116, 180, 74, 90, 100 }, /*left = v */
+ { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, /*left = h */
+ { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, /*left = d45 */
+ { 72, 35, 36, 149, 68, 206, 68, 63, 105 }, /*left = d135*/
+ { 73, 31, 28, 138, 57, 124, 55, 122, 151 }, /*left = d117*/
+ { 67, 23, 21, 140, 126, 197, 40, 37, 171 }, /*left = d153*/
+ { 86, 27, 28, 128, 154, 212, 45, 43, 53 }, /*left = d207*/
+ { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, /*left = d63 */
+ { 59, 67, 44, 140, 161, 202, 78, 67, 119 }, /*left = tm */
+ }, { /* above = v */
+ { 63, 36, 126, 146, 123, 158, 60, 90, 96 }, /*left = dc */
+ { 43, 46, 168, 134, 107, 128, 69, 142, 92 }, /*left = v */
+ { 44, 29, 68, 159, 201, 177, 50, 57, 77 }, /*left = h */
+ { 58, 38, 76, 114, 97, 172, 78, 133, 92 }, /*left = d45 */
+ { 46, 41, 76, 140, 63, 184, 69, 112, 57 }, /*left = d135*/
+ { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, /*left = d117*/
+ { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, /*left = d153*/
+ { 52, 30, 74, 113, 130, 175, 51, 64, 58 }, /*left = d207*/
+ { 47, 35, 80, 100, 74, 143, 64, 163, 74 }, /*left = d63 */
+ { 36, 61, 116, 114, 128, 162, 80, 125, 82 }, /*left = tm */
+ }, { /* above = h */
+ { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, /*left = dc */
+ { 55, 44, 68, 166, 179, 192, 57, 57, 108 }, /*left = v */
+ { 42, 26, 11, 199, 241, 228, 23, 15, 85 }, /*left = h */
+ { 68, 42, 19, 131, 160, 199, 55, 52, 83 }, /*left = d45 */
+ { 58, 50, 25, 139, 115, 232, 39, 52, 118 }, /*left = d135*/
+ { 50, 35, 33, 153, 104, 162, 64, 59, 131 }, /*left = d117*/
+ { 44, 24, 16, 150, 177, 202, 33, 19, 156 }, /*left = d153*/
+ { 55, 27, 12, 153, 203, 218, 26, 27, 49 }, /*left = d207*/
+ { 53, 49, 21, 110, 116, 168, 59, 80, 76 }, /*left = d63 */
+ { 38, 72, 19, 168, 203, 212, 50, 50, 107 }, /*left = tm */
+ }, { /* above = d45 */
+ { 103, 26, 36, 129, 132, 201, 83, 80, 93 }, /*left = dc */
+ { 59, 38, 83, 112, 103, 162, 98, 136, 90 }, /*left = v */
+ { 62, 30, 23, 158, 200, 207, 59, 57, 50 }, /*left = h */
+ { 67, 30, 29, 84, 86, 191, 102, 91, 59 }, /*left = d45 */
+ { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, /*left = d135*/
+ { 53, 26, 34, 130, 56, 149, 84, 120, 103 }, /*left = d117*/
+ { 53, 21, 23, 133, 109, 210, 56, 77, 172 }, /*left = d153*/
+ { 77, 19, 29, 112, 142, 228, 55, 66, 36 }, /*left = d207*/
+ { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, /*left = d63 */
+ { 47, 47, 43, 114, 137, 181, 100, 99, 95 }, /*left = tm */
+ }, { /* above = d135 */
+ { 69, 23, 29, 128, 83, 199, 46, 44, 101 }, /*left = dc */
+ { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, /*left = v */
+ { 40, 29, 19, 161, 180, 207, 43, 24, 91 }, /*left = h */
+ { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, /*left = d45 */
+ { 52, 31, 22, 158, 40, 209, 58, 62, 89 }, /*left = d135*/
+ { 44, 31, 29, 147, 46, 158, 56, 102, 198 }, /*left = d117*/
+ { 35, 19, 12, 135, 87, 209, 41, 45, 167 }, /*left = d153*/
+ { 55, 25, 21, 118, 95, 215, 38, 39, 66 }, /*left = d207*/
+ { 51, 38, 25, 113, 58, 164, 70, 93, 97 }, /*left = d63 */
+ { 47, 54, 34, 146, 108, 203, 72, 103, 151 }, /*left = tm */
+ }, { /* above = d117 */
+ { 64, 19, 37, 156, 66, 138, 49, 95, 133 }, /*left = dc */
+ { 46, 27, 80, 150, 55, 124, 55, 121, 135 }, /*left = v */
+ { 36, 23, 27, 165, 149, 166, 54, 64, 118 }, /*left = h */
+ { 53, 21, 36, 131, 63, 163, 60, 109, 81 }, /*left = d45 */
+ { 40, 26, 35, 154, 40, 185, 51, 97, 123 }, /*left = d135*/
+ { 35, 19, 34, 179, 19, 97, 48, 129, 124 }, /*left = d117*/
+ { 36, 20, 26, 136, 62, 164, 33, 77, 154 }, /*left = d153*/
+ { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, /*left = d207*/
+ { 45, 26, 28, 129, 45, 129, 49, 147, 123 }, /*left = d63 */
+ { 38, 44, 51, 136, 74, 162, 57, 97, 121 }, /*left = tm */
+ }, { /* above = d153 */
+ { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, /*left = dc */
+ { 56, 39, 58, 133, 117, 173, 48, 53, 187 }, /*left = v */
+ { 35, 21, 12, 161, 212, 207, 20, 23, 145 }, /*left = h */
+ { 56, 29, 19, 117, 109, 181, 55, 68, 112 }, /*left = d45 */
+ { 47, 29, 17, 153, 64, 220, 59, 51, 114 }, /*left = d135*/
+ { 46, 16, 24, 136, 76, 147, 41, 64, 172 }, /*left = d117*/
+ { 34, 17, 11, 108, 152, 187, 13, 15, 209 }, /*left = d153*/
+ { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, /*left = d207*/
+ { 55, 30, 18, 122, 79, 179, 44, 88, 116 }, /*left = d63 */
+ { 37, 49, 25, 129, 168, 164, 41, 54, 148 }, /*left = tm */
+ }, { /* above = d207 */
+ { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, /*left = dc */
+ { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, /*left = v */
+ { 47, 25, 17, 175, 222, 220, 24, 30, 86 }, /*left = h */
+ { 68, 36, 17, 106, 102, 206, 59, 74, 74 }, /*left = d45 */
+ { 57, 39, 23, 151, 68, 216, 55, 63, 58 }, /*left = d135*/
+ { 49, 30, 35, 141, 70, 168, 82, 40, 115 }, /*left = d117*/
+ { 51, 25, 15, 136, 129, 202, 38, 35, 139 }, /*left = d153*/
+ { 68, 26, 16, 111, 141, 215, 29, 28, 28 }, /*left = d207*/
+ { 59, 39, 19, 114, 75, 180, 77, 104, 42 }, /*left = d63 */
+ { 40, 61, 26, 126, 152, 206, 61, 59, 93 }, /*left = tm */
+ }, { /* above = d63 */
+ { 78, 23, 39, 111, 117, 170, 74, 124, 94 }, /*left = dc */
+ { 48, 34, 86, 101, 92, 146, 78, 179, 134 }, /*left = v */
+ { 47, 22, 24, 138, 187, 178, 68, 69, 59 }, /*left = h */
+ { 56, 25, 33, 105, 112, 187, 95, 177, 129 }, /*left = d45 */
+ { 48, 31, 27, 114, 63, 183, 82, 116, 56 }, /*left = d135*/
+ { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, /*left = d117*/
+ { 42, 17, 24, 109, 97, 177, 56, 76, 122 }, /*left = d153*/
+ { 58, 18, 28, 105, 139, 182, 70, 92, 63 }, /*left = d207*/
+ { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, /*left = d63 */
+ { 36, 38, 48, 92, 122, 165, 88, 137, 91 }, /*left = tm */
+ }, { /* above = tm */
+ { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, /*left = dc */
+ { 44, 78, 115, 132, 119, 173, 71, 112, 93 }, /*left = v */
+ { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, /*left = h */
+ { 58, 47, 36, 124, 137, 193, 80, 82, 78 }, /*left = d45 */
+ { 49, 50, 35, 144, 95, 205, 63, 78, 59 }, /*left = d135*/
+ { 41, 53, 52, 148, 71, 142, 65, 128, 51 }, /*left = d117*/
+ { 40, 36, 28, 143, 143, 202, 40, 55, 137 }, /*left = d153*/
+ { 52, 34, 29, 129, 183, 227, 42, 35, 43 }, /*left = d207*/
+ { 42, 44, 44, 104, 105, 164, 64, 130, 80 }, /*left = d63 */
+ { 43, 81, 53, 140, 169, 204, 68, 84, 72 }, /*left = tm */
+ }
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_kf_y_mode_prob);
+
+const u8 v4l2_vp9_kf_partition_probs[16][3] = {
+ /* 8x8 -> 4x4 */
+ { 158, 97, 94 }, /* a/l both not split */
+ { 93, 24, 99 }, /* a split, l not split */
+ { 85, 119, 44 }, /* l split, a not split */
+ { 62, 59, 67 }, /* a/l both split */
+ /* 16x16 -> 8x8 */
+ { 149, 53, 53 }, /* a/l both not split */
+ { 94, 20, 48 }, /* a split, l not split */
+ { 83, 53, 24 }, /* l split, a not split */
+ { 52, 18, 18 }, /* a/l both split */
+ /* 32x32 -> 16x16 */
+ { 150, 40, 39 }, /* a/l both not split */
+ { 78, 12, 26 }, /* a split, l not split */
+ { 67, 33, 11 }, /* l split, a not split */
+ { 24, 7, 5 }, /* a/l both split */
+ /* 64x64 -> 32x32 */
+ { 174, 35, 49 }, /* a/l both not split */
+ { 68, 11, 27 }, /* a split, l not split */
+ { 57, 15, 9 }, /* l split, a not split */
+ { 12, 3, 3 }, /* a/l both split */
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_kf_partition_probs);
+
+const u8 v4l2_vp9_kf_uv_mode_prob[10][9] = {
+ { 144, 11, 54, 157, 195, 130, 46, 58, 108 }, /* y = dc */
+ { 118, 15, 123, 148, 131, 101, 44, 93, 131 }, /* y = v */
+ { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, /* y = h */
+ { 120, 11, 50, 123, 163, 135, 64, 77, 103 }, /* y = d45 */
+ { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, /* y = d135 */
+ { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, /* y = d117 */
+ { 115, 9, 28, 141, 161, 167, 21, 25, 193 }, /* y = d153 */
+ { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, /* y = d207 */
+ { 116, 12, 64, 120, 140, 125, 49, 115, 121 }, /* y = d63 */
+ { 102, 19, 66, 162, 182, 122, 35, 59, 128 } /* y = tm */
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_kf_uv_mode_prob);
+
+const struct v4l2_vp9_frame_context v4l2_vp9_default_probs = {
+ .tx8 = {
+ { 100 },
+ { 66 },
+ },
+ .tx16 = {
+ { 20, 152 },
+ { 15, 101 },
+ },
+ .tx32 = {
+ { 3, 136, 37 },
+ { 5, 52, 13 },
+ },
+ .coef = {
+ { /* tx = 4x4 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 195, 29, 183 },
+ { 84, 49, 136 },
+ { 8, 42, 71 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 31, 107, 169 },
+ { 35, 99, 159 },
+ { 17, 82, 140 },
+ { 8, 66, 114 },
+ { 2, 44, 76 },
+ { 1, 19, 32 },
+ },
+ { /* Coeff Band 2 */
+ { 40, 132, 201 },
+ { 29, 114, 187 },
+ { 13, 91, 157 },
+ { 7, 75, 127 },
+ { 3, 58, 95 },
+ { 1, 28, 47 },
+ },
+ { /* Coeff Band 3 */
+ { 69, 142, 221 },
+ { 42, 122, 201 },
+ { 15, 91, 159 },
+ { 6, 67, 121 },
+ { 1, 42, 77 },
+ { 1, 17, 31 },
+ },
+ { /* Coeff Band 4 */
+ { 102, 148, 228 },
+ { 67, 117, 204 },
+ { 17, 82, 154 },
+ { 6, 59, 114 },
+ { 2, 39, 75 },
+ { 1, 15, 29 },
+ },
+ { /* Coeff Band 5 */
+ { 156, 57, 233 },
+ { 119, 57, 212 },
+ { 58, 48, 163 },
+ { 29, 40, 124 },
+ { 12, 30, 81 },
+ { 3, 12, 31 }
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 191, 107, 226 },
+ { 124, 117, 204 },
+ { 25, 99, 155 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 29, 148, 210 },
+ { 37, 126, 194 },
+ { 8, 93, 157 },
+ { 2, 68, 118 },
+ { 1, 39, 69 },
+ { 1, 17, 33 },
+ },
+ { /* Coeff Band 2 */
+ { 41, 151, 213 },
+ { 27, 123, 193 },
+ { 3, 82, 144 },
+ { 1, 58, 105 },
+ { 1, 32, 60 },
+ { 1, 13, 26 },
+ },
+ { /* Coeff Band 3 */
+ { 59, 159, 220 },
+ { 23, 126, 198 },
+ { 4, 88, 151 },
+ { 1, 66, 114 },
+ { 1, 38, 71 },
+ { 1, 18, 34 },
+ },
+ { /* Coeff Band 4 */
+ { 114, 136, 232 },
+ { 51, 114, 207 },
+ { 11, 83, 155 },
+ { 3, 56, 105 },
+ { 1, 33, 65 },
+ { 1, 17, 34 },
+ },
+ { /* Coeff Band 5 */
+ { 149, 65, 234 },
+ { 121, 57, 215 },
+ { 61, 49, 166 },
+ { 28, 36, 114 },
+ { 12, 25, 76 },
+ { 3, 16, 42 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 214, 49, 220 },
+ { 132, 63, 188 },
+ { 42, 65, 137 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 85, 137, 221 },
+ { 104, 131, 216 },
+ { 49, 111, 192 },
+ { 21, 87, 155 },
+ { 2, 49, 87 },
+ { 1, 16, 28 },
+ },
+ { /* Coeff Band 2 */
+ { 89, 163, 230 },
+ { 90, 137, 220 },
+ { 29, 100, 183 },
+ { 10, 70, 135 },
+ { 2, 42, 81 },
+ { 1, 17, 33 },
+ },
+ { /* Coeff Band 3 */
+ { 108, 167, 237 },
+ { 55, 133, 222 },
+ { 15, 97, 179 },
+ { 4, 72, 135 },
+ { 1, 45, 85 },
+ { 1, 19, 38 },
+ },
+ { /* Coeff Band 4 */
+ { 124, 146, 240 },
+ { 66, 124, 224 },
+ { 17, 88, 175 },
+ { 4, 58, 122 },
+ { 1, 36, 75 },
+ { 1, 18, 37 },
+ },
+ { /* Coeff Band 5 */
+ { 141, 79, 241 },
+ { 126, 70, 227 },
+ { 66, 58, 182 },
+ { 30, 44, 136 },
+ { 12, 34, 96 },
+ { 2, 20, 47 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 229, 99, 249 },
+ { 143, 111, 235 },
+ { 46, 109, 192 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 82, 158, 236 },
+ { 94, 146, 224 },
+ { 25, 117, 191 },
+ { 9, 87, 149 },
+ { 3, 56, 99 },
+ { 1, 33, 57 },
+ },
+ { /* Coeff Band 2 */
+ { 83, 167, 237 },
+ { 68, 145, 222 },
+ { 10, 103, 177 },
+ { 2, 72, 131 },
+ { 1, 41, 79 },
+ { 1, 20, 39 },
+ },
+ { /* Coeff Band 3 */
+ { 99, 167, 239 },
+ { 47, 141, 224 },
+ { 10, 104, 178 },
+ { 2, 73, 133 },
+ { 1, 44, 85 },
+ { 1, 22, 47 },
+ },
+ { /* Coeff Band 4 */
+ { 127, 145, 243 },
+ { 71, 129, 228 },
+ { 17, 93, 177 },
+ { 3, 61, 124 },
+ { 1, 41, 84 },
+ { 1, 21, 52 },
+ },
+ { /* Coeff Band 5 */
+ { 157, 78, 244 },
+ { 140, 72, 231 },
+ { 69, 58, 184 },
+ { 31, 44, 137 },
+ { 14, 38, 105 },
+ { 8, 23, 61 },
+ },
+ },
+ },
+ },
+ { /* tx = 8x8 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 125, 34, 187 },
+ { 52, 41, 133 },
+ { 6, 31, 56 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 37, 109, 153 },
+ { 51, 102, 147 },
+ { 23, 87, 128 },
+ { 8, 67, 101 },
+ { 1, 41, 63 },
+ { 1, 19, 29 },
+ },
+ { /* Coeff Band 2 */
+ { 31, 154, 185 },
+ { 17, 127, 175 },
+ { 6, 96, 145 },
+ { 2, 73, 114 },
+ { 1, 51, 82 },
+ { 1, 28, 45 },
+ },
+ { /* Coeff Band 3 */
+ { 23, 163, 200 },
+ { 10, 131, 185 },
+ { 2, 93, 148 },
+ { 1, 67, 111 },
+ { 1, 41, 69 },
+ { 1, 14, 24 },
+ },
+ { /* Coeff Band 4 */
+ { 29, 176, 217 },
+ { 12, 145, 201 },
+ { 3, 101, 156 },
+ { 1, 69, 111 },
+ { 1, 39, 63 },
+ { 1, 14, 23 },
+ },
+ { /* Coeff Band 5 */
+ { 57, 192, 233 },
+ { 25, 154, 215 },
+ { 6, 109, 167 },
+ { 3, 78, 118 },
+ { 1, 48, 69 },
+ { 1, 21, 29 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 202, 105, 245 },
+ { 108, 106, 216 },
+ { 18, 90, 144 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 33, 172, 219 },
+ { 64, 149, 206 },
+ { 14, 117, 177 },
+ { 5, 90, 141 },
+ { 2, 61, 95 },
+ { 1, 37, 57 },
+ },
+ { /* Coeff Band 2 */
+ { 33, 179, 220 },
+ { 11, 140, 198 },
+ { 1, 89, 148 },
+ { 1, 60, 104 },
+ { 1, 33, 57 },
+ { 1, 12, 21 },
+ },
+ { /* Coeff Band 3 */
+ { 30, 181, 221 },
+ { 8, 141, 198 },
+ { 1, 87, 145 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 20 },
+ },
+ { /* Coeff Band 4 */
+ { 32, 186, 224 },
+ { 7, 142, 198 },
+ { 1, 86, 143 },
+ { 1, 58, 100 },
+ { 1, 31, 55 },
+ { 1, 12, 22 },
+ },
+ { /* Coeff Band 5 */
+ { 57, 192, 227 },
+ { 20, 143, 204 },
+ { 3, 96, 154 },
+ { 1, 68, 112 },
+ { 1, 42, 69 },
+ { 1, 19, 32 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 212, 35, 215 },
+ { 113, 47, 169 },
+ { 29, 48, 105 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 74, 129, 203 },
+ { 106, 120, 203 },
+ { 49, 107, 178 },
+ { 19, 84, 144 },
+ { 4, 50, 84 },
+ { 1, 15, 25 },
+ },
+ { /* Coeff Band 2 */
+ { 71, 172, 217 },
+ { 44, 141, 209 },
+ { 15, 102, 173 },
+ { 6, 76, 133 },
+ { 2, 51, 89 },
+ { 1, 24, 42 },
+ },
+ { /* Coeff Band 3 */
+ { 64, 185, 231 },
+ { 31, 148, 216 },
+ { 8, 103, 175 },
+ { 3, 74, 131 },
+ { 1, 46, 81 },
+ { 1, 18, 30 },
+ },
+ { /* Coeff Band 4 */
+ { 65, 196, 235 },
+ { 25, 157, 221 },
+ { 5, 105, 174 },
+ { 1, 67, 120 },
+ { 1, 38, 69 },
+ { 1, 15, 30 },
+ },
+ { /* Coeff Band 5 */
+ { 65, 204, 238 },
+ { 30, 156, 224 },
+ { 7, 107, 177 },
+ { 2, 70, 124 },
+ { 1, 42, 73 },
+ { 1, 18, 34 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 225, 86, 251 },
+ { 144, 104, 235 },
+ { 42, 99, 181 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 85, 175, 239 },
+ { 112, 165, 229 },
+ { 29, 136, 200 },
+ { 12, 103, 162 },
+ { 6, 77, 123 },
+ { 2, 53, 84 },
+ },
+ { /* Coeff Band 2 */
+ { 75, 183, 239 },
+ { 30, 155, 221 },
+ { 3, 106, 171 },
+ { 1, 74, 128 },
+ { 1, 44, 76 },
+ { 1, 17, 28 },
+ },
+ { /* Coeff Band 3 */
+ { 73, 185, 240 },
+ { 27, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 75, 127 },
+ { 1, 42, 73 },
+ { 1, 17, 29 },
+ },
+ { /* Coeff Band 4 */
+ { 62, 190, 238 },
+ { 21, 159, 222 },
+ { 2, 107, 172 },
+ { 1, 72, 122 },
+ { 1, 40, 71 },
+ { 1, 18, 32 },
+ },
+ { /* Coeff Band 5 */
+ { 61, 199, 240 },
+ { 27, 161, 226 },
+ { 4, 113, 180 },
+ { 1, 76, 129 },
+ { 1, 46, 80 },
+ { 1, 23, 41 },
+ },
+ },
+ },
+ },
+ { /* tx = 16x16 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 7, 27, 153 },
+ { 5, 30, 95 },
+ { 1, 16, 30 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 50, 75, 127 },
+ { 57, 75, 124 },
+ { 27, 67, 108 },
+ { 10, 54, 86 },
+ { 1, 33, 52 },
+ { 1, 12, 18 },
+ },
+ { /* Coeff Band 2 */
+ { 43, 125, 151 },
+ { 26, 108, 148 },
+ { 7, 83, 122 },
+ { 2, 59, 89 },
+ { 1, 38, 60 },
+ { 1, 17, 27 },
+ },
+ { /* Coeff Band 3 */
+ { 23, 144, 163 },
+ { 13, 112, 154 },
+ { 2, 75, 117 },
+ { 1, 50, 81 },
+ { 1, 31, 51 },
+ { 1, 14, 23 },
+ },
+ { /* Coeff Band 4 */
+ { 18, 162, 185 },
+ { 6, 123, 171 },
+ { 1, 78, 125 },
+ { 1, 51, 86 },
+ { 1, 31, 54 },
+ { 1, 14, 23 },
+ },
+ { /* Coeff Band 5 */
+ { 15, 199, 227 },
+ { 3, 150, 204 },
+ { 1, 91, 146 },
+ { 1, 55, 95 },
+ { 1, 30, 53 },
+ { 1, 11, 20 },
+ }
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 19, 55, 240 },
+ { 19, 59, 196 },
+ { 3, 52, 105 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 41, 166, 207 },
+ { 104, 153, 199 },
+ { 31, 123, 181 },
+ { 14, 101, 152 },
+ { 5, 72, 106 },
+ { 1, 36, 52 },
+ },
+ { /* Coeff Band 2 */
+ { 35, 176, 211 },
+ { 12, 131, 190 },
+ { 2, 88, 144 },
+ { 1, 60, 101 },
+ { 1, 36, 60 },
+ { 1, 16, 28 },
+ },
+ { /* Coeff Band 3 */
+ { 28, 183, 213 },
+ { 8, 134, 191 },
+ { 1, 86, 142 },
+ { 1, 56, 96 },
+ { 1, 30, 53 },
+ { 1, 12, 20 },
+ },
+ { /* Coeff Band 4 */
+ { 20, 190, 215 },
+ { 4, 135, 192 },
+ { 1, 84, 139 },
+ { 1, 53, 91 },
+ { 1, 28, 49 },
+ { 1, 11, 20 },
+ },
+ { /* Coeff Band 5 */
+ { 13, 196, 216 },
+ { 2, 137, 192 },
+ { 1, 86, 143 },
+ { 1, 57, 99 },
+ { 1, 32, 56 },
+ { 1, 13, 24 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 211, 29, 217 },
+ { 96, 47, 156 },
+ { 22, 43, 87 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 78, 120, 193 },
+ { 111, 116, 186 },
+ { 46, 102, 164 },
+ { 15, 80, 128 },
+ { 2, 49, 76 },
+ { 1, 18, 28 },
+ },
+ { /* Coeff Band 2 */
+ { 71, 161, 203 },
+ { 42, 132, 192 },
+ { 10, 98, 150 },
+ { 3, 69, 109 },
+ { 1, 44, 70 },
+ { 1, 18, 29 },
+ },
+ { /* Coeff Band 3 */
+ { 57, 186, 211 },
+ { 30, 140, 196 },
+ { 4, 93, 146 },
+ { 1, 62, 102 },
+ { 1, 38, 65 },
+ { 1, 16, 27 },
+ },
+ { /* Coeff Band 4 */
+ { 47, 199, 217 },
+ { 14, 145, 196 },
+ { 1, 88, 142 },
+ { 1, 57, 98 },
+ { 1, 36, 62 },
+ { 1, 15, 26 },
+ },
+ { /* Coeff Band 5 */
+ { 26, 219, 229 },
+ { 5, 155, 207 },
+ { 1, 94, 151 },
+ { 1, 60, 104 },
+ { 1, 36, 62 },
+ { 1, 16, 28 },
+ }
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 233, 29, 248 },
+ { 146, 47, 220 },
+ { 43, 52, 140 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 100, 163, 232 },
+ { 179, 161, 222 },
+ { 63, 142, 204 },
+ { 37, 113, 174 },
+ { 26, 89, 137 },
+ { 18, 68, 97 },
+ },
+ { /* Coeff Band 2 */
+ { 85, 181, 230 },
+ { 32, 146, 209 },
+ { 7, 100, 164 },
+ { 3, 71, 121 },
+ { 1, 45, 77 },
+ { 1, 18, 30 },
+ },
+ { /* Coeff Band 3 */
+ { 65, 187, 230 },
+ { 20, 148, 207 },
+ { 2, 97, 159 },
+ { 1, 68, 116 },
+ { 1, 40, 70 },
+ { 1, 14, 29 },
+ },
+ { /* Coeff Band 4 */
+ { 40, 194, 227 },
+ { 8, 147, 204 },
+ { 1, 94, 155 },
+ { 1, 65, 112 },
+ { 1, 39, 66 },
+ { 1, 14, 26 },
+ },
+ { /* Coeff Band 5 */
+ { 16, 208, 228 },
+ { 3, 151, 207 },
+ { 1, 98, 160 },
+ { 1, 67, 117 },
+ { 1, 41, 74 },
+ { 1, 17, 31 },
+ },
+ },
+ },
+ },
+ { /* tx = 32x32 */
+ { /* block Type 0 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 17, 38, 140 },
+ { 7, 34, 80 },
+ { 1, 17, 29 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 37, 75, 128 },
+ { 41, 76, 128 },
+ { 26, 66, 116 },
+ { 12, 52, 94 },
+ { 2, 32, 55 },
+ { 1, 10, 16 },
+ },
+ { /* Coeff Band 2 */
+ { 50, 127, 154 },
+ { 37, 109, 152 },
+ { 16, 82, 121 },
+ { 5, 59, 85 },
+ { 1, 35, 54 },
+ { 1, 13, 20 },
+ },
+ { /* Coeff Band 3 */
+ { 40, 142, 167 },
+ { 17, 110, 157 },
+ { 2, 71, 112 },
+ { 1, 44, 72 },
+ { 1, 27, 45 },
+ { 1, 11, 17 },
+ },
+ { /* Coeff Band 4 */
+ { 30, 175, 188 },
+ { 9, 124, 169 },
+ { 1, 74, 116 },
+ { 1, 48, 78 },
+ { 1, 30, 49 },
+ { 1, 11, 18 },
+ },
+ { /* Coeff Band 5 */
+ { 10, 222, 223 },
+ { 2, 150, 194 },
+ { 1, 83, 128 },
+ { 1, 48, 79 },
+ { 1, 27, 45 },
+ { 1, 11, 17 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 36, 41, 235 },
+ { 29, 36, 193 },
+ { 10, 27, 111 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 85, 165, 222 },
+ { 177, 162, 215 },
+ { 110, 135, 195 },
+ { 57, 113, 168 },
+ { 23, 83, 120 },
+ { 10, 49, 61 },
+ },
+ { /* Coeff Band 2 */
+ { 85, 190, 223 },
+ { 36, 139, 200 },
+ { 5, 90, 146 },
+ { 1, 60, 103 },
+ { 1, 38, 65 },
+ { 1, 18, 30 },
+ },
+ { /* Coeff Band 3 */
+ { 72, 202, 223 },
+ { 23, 141, 199 },
+ { 2, 86, 140 },
+ { 1, 56, 97 },
+ { 1, 36, 61 },
+ { 1, 16, 27 },
+ },
+ { /* Coeff Band 4 */
+ { 55, 218, 225 },
+ { 13, 145, 200 },
+ { 1, 86, 141 },
+ { 1, 57, 99 },
+ { 1, 35, 61 },
+ { 1, 13, 22 },
+ },
+ { /* Coeff Band 5 */
+ { 15, 235, 212 },
+ { 1, 132, 184 },
+ { 1, 84, 139 },
+ { 1, 57, 97 },
+ { 1, 34, 56 },
+ { 1, 14, 23 },
+ },
+ },
+ },
+ { /* block Type 1 */
+ { /* Intra */
+ { /* Coeff Band 0 */
+ { 181, 21, 201 },
+ { 61, 37, 123 },
+ { 10, 38, 71 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 47, 106, 172 },
+ { 95, 104, 173 },
+ { 42, 93, 159 },
+ { 18, 77, 131 },
+ { 4, 50, 81 },
+ { 1, 17, 23 },
+ },
+ { /* Coeff Band 2 */
+ { 62, 147, 199 },
+ { 44, 130, 189 },
+ { 28, 102, 154 },
+ { 18, 75, 115 },
+ { 2, 44, 65 },
+ { 1, 12, 19 },
+ },
+ { /* Coeff Band 3 */
+ { 55, 153, 210 },
+ { 24, 130, 194 },
+ { 3, 93, 146 },
+ { 1, 61, 97 },
+ { 1, 31, 50 },
+ { 1, 10, 16 },
+ },
+ { /* Coeff Band 4 */
+ { 49, 186, 223 },
+ { 17, 148, 204 },
+ { 1, 96, 142 },
+ { 1, 53, 83 },
+ { 1, 26, 44 },
+ { 1, 11, 17 },
+ },
+ { /* Coeff Band 5 */
+ { 13, 217, 212 },
+ { 2, 136, 180 },
+ { 1, 78, 124 },
+ { 1, 50, 83 },
+ { 1, 29, 49 },
+ { 1, 14, 23 },
+ },
+ },
+ { /* Inter */
+ { /* Coeff Band 0 */
+ { 197, 13, 247 },
+ { 82, 17, 222 },
+ { 25, 17, 162 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ { 0, 0, 0 },
+ },
+ { /* Coeff Band 1 */
+ { 126, 186, 247 },
+ { 234, 191, 243 },
+ { 176, 177, 234 },
+ { 104, 158, 220 },
+ { 66, 128, 186 },
+ { 55, 90, 137 },
+ },
+ { /* Coeff Band 2 */
+ { 111, 197, 242 },
+ { 46, 158, 219 },
+ { 9, 104, 171 },
+ { 2, 65, 125 },
+ { 1, 44, 80 },
+ { 1, 17, 91 },
+ },
+ { /* Coeff Band 3 */
+ { 104, 208, 245 },
+ { 39, 168, 224 },
+ { 3, 109, 162 },
+ { 1, 79, 124 },
+ { 1, 50, 102 },
+ { 1, 43, 102 },
+ },
+ { /* Coeff Band 4 */
+ { 84, 220, 246 },
+ { 31, 177, 231 },
+ { 2, 115, 180 },
+ { 1, 79, 134 },
+ { 1, 55, 77 },
+ { 1, 60, 79 },
+ },
+ { /* Coeff Band 5 */
+ { 43, 243, 240 },
+ { 8, 180, 217 },
+ { 1, 115, 166 },
+ { 1, 84, 121 },
+ { 1, 51, 67 },
+ { 1, 16, 6 },
+ },
+ },
+ },
+ },
+ },
+
+ .skip = { 192, 128, 64 },
+ .inter_mode = {
+ { 2, 173, 34 },
+ { 7, 145, 85 },
+ { 7, 166, 63 },
+ { 7, 94, 66 },
+ { 8, 64, 46 },
+ { 17, 81, 31 },
+ { 25, 29, 30 },
+ },
+ .interp_filter = {
+ { 235, 162 },
+ { 36, 255 },
+ { 34, 3 },
+ { 149, 144 },
+ },
+ .is_inter = { 9, 102, 187, 225 },
+ .comp_mode = { 239, 183, 119, 96, 41 },
+ .single_ref = {
+ { 33, 16 },
+ { 77, 74 },
+ { 142, 142 },
+ { 172, 170 },
+ { 238, 247 },
+ },
+ .comp_ref = { 50, 126, 123, 221, 226 },
+ .y_mode = {
+ { 65, 32, 18, 144, 162, 194, 41, 51, 98 },
+ { 132, 68, 18, 165, 217, 196, 45, 40, 78 },
+ { 173, 80, 19, 176, 240, 193, 64, 35, 46 },
+ { 221, 135, 38, 194, 248, 121, 96, 85, 29 },
+ },
+ .uv_mode = {
+ { 120, 7, 76, 176, 208, 126, 28, 54, 103 } /* y = dc */,
+ { 48, 12, 154, 155, 139, 90, 34, 117, 119 } /* y = v */,
+ { 67, 6, 25, 204, 243, 158, 13, 21, 96 } /* y = h */,
+ { 97, 5, 44, 131, 176, 139, 48, 68, 97 } /* y = d45 */,
+ { 83, 5, 42, 156, 111, 152, 26, 49, 152 } /* y = d135 */,
+ { 80, 5, 58, 178, 74, 83, 33, 62, 145 } /* y = d117 */,
+ { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */,
+ { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d207 */,
+ { 77, 7, 64, 116, 132, 122, 37, 126, 120 } /* y = d63 */,
+ { 101, 21, 107, 181, 192, 103, 19, 67, 125 } /* y = tm */
+ },
+ .partition = {
+ /* 8x8 -> 4x4 */
+ { 199, 122, 141 } /* a/l both not split */,
+ { 147, 63, 159 } /* a split, l not split */,
+ { 148, 133, 118 } /* l split, a not split */,
+ { 121, 104, 114 } /* a/l both split */,
+ /* 16x16 -> 8x8 */
+ { 174, 73, 87 } /* a/l both not split */,
+ { 92, 41, 83 } /* a split, l not split */,
+ { 82, 99, 50 } /* l split, a not split */,
+ { 53, 39, 39 } /* a/l both split */,
+ /* 32x32 -> 16x16 */
+ { 177, 58, 59 } /* a/l both not split */,
+ { 68, 26, 63 } /* a split, l not split */,
+ { 52, 79, 25 } /* l split, a not split */,
+ { 17, 14, 12 } /* a/l both split */,
+ /* 64x64 -> 32x32 */
+ { 222, 34, 30 } /* a/l both not split */,
+ { 72, 16, 44 } /* a split, l not split */,
+ { 58, 32, 12 } /* l split, a not split */,
+ { 10, 7, 6 } /* a/l both split */,
+ },
+
+ .mv = {
+ .joint = { 32, 64, 96 },
+ .sign = { 128, 128 },
+ .classes = {
+ { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 },
+ { 216, 128, 176, 160, 176, 176, 192, 198, 198, 208 },
+ },
+ .class0_bit = { 216, 208 },
+ .bits = {
+ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240},
+ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240},
+ },
+ .class0_fr = {
+ {
+ { 128, 128, 64 },
+ { 96, 112, 64 },
+ },
+ {
+ { 128, 128, 64 },
+ { 96, 112, 64 },
+ },
+ },
+ .fr = {
+ { 64, 96, 64 },
+ { 64, 96, 64 },
+ },
+ .class0_hp = { 160, 160 },
+ .hp = { 128, 128 },
+ },
+};
+EXPORT_SYMBOL_GPL(v4l2_vp9_default_probs);
+
+static u32 fastdiv(u32 dividend, u16 divisor)
+{
+#define DIV_INV(d) ((u32)(((1ULL << 32) + ((d) - 1)) / (d)))
+#define DIVS_INV(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9) \
+ DIV_INV(d0), DIV_INV(d1), DIV_INV(d2), DIV_INV(d3), \
+ DIV_INV(d4), DIV_INV(d5), DIV_INV(d6), DIV_INV(d7), \
+ DIV_INV(d8), DIV_INV(d9)
+
+ static const u32 inv[] = {
+ DIV_INV(2), DIV_INV(3), DIV_INV(4), DIV_INV(5),
+ DIV_INV(6), DIV_INV(7), DIV_INV(8), DIV_INV(9),
+ DIVS_INV(10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
+ DIVS_INV(20, 21, 22, 23, 24, 25, 26, 27, 28, 29),
+ DIVS_INV(30, 31, 32, 33, 34, 35, 36, 37, 38, 39),
+ DIVS_INV(40, 41, 42, 43, 44, 45, 46, 47, 48, 49),
+ DIVS_INV(50, 51, 52, 53, 54, 55, 56, 57, 58, 59),
+ DIVS_INV(60, 61, 62, 63, 64, 65, 66, 67, 68, 69),
+ DIVS_INV(70, 71, 72, 73, 74, 75, 76, 77, 78, 79),
+ DIVS_INV(80, 81, 82, 83, 84, 85, 86, 87, 88, 89),
+ DIVS_INV(90, 91, 92, 93, 94, 95, 96, 97, 98, 99),
+ DIVS_INV(100, 101, 102, 103, 104, 105, 106, 107, 108, 109),
+ DIVS_INV(110, 111, 112, 113, 114, 115, 116, 117, 118, 119),
+ DIVS_INV(120, 121, 122, 123, 124, 125, 126, 127, 128, 129),
+ DIVS_INV(130, 131, 132, 133, 134, 135, 136, 137, 138, 139),
+ DIVS_INV(140, 141, 142, 143, 144, 145, 146, 147, 148, 149),
+ DIVS_INV(150, 151, 152, 153, 154, 155, 156, 157, 158, 159),
+ DIVS_INV(160, 161, 162, 163, 164, 165, 166, 167, 168, 169),
+ DIVS_INV(170, 171, 172, 173, 174, 175, 176, 177, 178, 179),
+ DIVS_INV(180, 181, 182, 183, 184, 185, 186, 187, 188, 189),
+ DIVS_INV(190, 191, 192, 193, 194, 195, 196, 197, 198, 199),
+ DIVS_INV(200, 201, 202, 203, 204, 205, 206, 207, 208, 209),
+ DIVS_INV(210, 211, 212, 213, 214, 215, 216, 217, 218, 219),
+ DIVS_INV(220, 221, 222, 223, 224, 225, 226, 227, 228, 229),
+ DIVS_INV(230, 231, 232, 233, 234, 235, 236, 237, 238, 239),
+ DIVS_INV(240, 241, 242, 243, 244, 245, 246, 247, 248, 249),
+ DIV_INV(250), DIV_INV(251), DIV_INV(252), DIV_INV(253),
+ DIV_INV(254), DIV_INV(255), DIV_INV(256),
+ };
+
+ if (divisor == 0)
+ return 0;
+ else if (divisor == 1)
+ return dividend;
+
+ if (WARN_ON(divisor - 2 >= ARRAY_SIZE(inv)))
+ return dividend;
+
+ return ((u64)dividend * inv[divisor - 2]) >> 32;
+}
+
+/* 6.3.6 inv_recenter_nonneg(v, m) */
+static int inv_recenter_nonneg(int v, int m)
+{
+ if (v > 2 * m)
+ return v;
+
+ if (v & 1)
+ return m - ((v + 1) >> 1);
+
+ return m + (v >> 1);
+}
+
+/*
+ * part of 6.3.5 inv_remap_prob(deltaProb, prob)
+ * delta = inv_map_table[deltaProb] done by userspace
+ */
+static int update_prob(int delta, int prob)
+{
+ if (!delta)
+ return prob;
+
+ return prob <= 128 ?
+ 1 + inv_recenter_nonneg(delta, prob - 1) :
+ 255 - inv_recenter_nonneg(delta, 255 - prob);
+}
+
+/* Counterpart to 6.3.2 tx_mode_probs() */
+static void update_tx_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) {
+ u8 *p8x8 = probs->tx8[i];
+ u8 *p16x16 = probs->tx16[i];
+ u8 *p32x32 = probs->tx32[i];
+ const u8 *d8x8 = deltas->tx8[i];
+ const u8 *d16x16 = deltas->tx16[i];
+ const u8 *d32x32 = deltas->tx32[i];
+
+ p8x8[0] = update_prob(d8x8[0], p8x8[0]);
+ p16x16[0] = update_prob(d16x16[0], p16x16[0]);
+ p16x16[1] = update_prob(d16x16[1], p16x16[1]);
+ p32x32[0] = update_prob(d32x32[0], p32x32[0]);
+ p32x32[1] = update_prob(d32x32[1], p32x32[1]);
+ p32x32[2] = update_prob(d32x32[2], p32x32[2]);
+ }
+}
+
+#define BAND_6(band) ((band) == 0 ? 3 : 6)
+
+static void update_coeff(const u8 deltas[6][6][3], u8 probs[6][6][3])
+{
+ int l, m, n;
+
+ for (l = 0; l < 6; l++)
+ for (m = 0; m < BAND_6(l); m++) {
+ u8 *p = probs[l][m];
+ const u8 *d = deltas[l][m];
+
+ for (n = 0; n < 3; n++)
+ p[n] = update_prob(d[n], p[n]);
+ }
+}
+
+/* Counterpart to 6.3.7 read_coef_probs() */
+static void update_coef_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ int i, j, k;
+
+ for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
+ for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
+ for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
+ update_coeff(deltas->coef[i][j][k], probs->coef[i][j][k]);
+
+ if (deltas->tx_mode == i)
+ break;
+ }
+}
+
+/* Counterpart to 6.3.8 read_skip_prob() */
+static void update_skip_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->skip); i++)
+ probs->skip[i] = update_prob(deltas->skip[i], probs->skip[i]);
+}
+
+/* Counterpart to 6.3.9 read_inter_mode_probs() */
+static void update_inter_mode_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) {
+ u8 *p = probs->inter_mode[i];
+ const u8 *d = deltas->inter_mode[i];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ p[2] = update_prob(d[2], p[2]);
+ }
+}
+
+/* Counterpart to 6.3.10 read_interp_filter_probs() */
+static void update_interp_filter_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) {
+ u8 *p = probs->interp_filter[i];
+ const u8 *d = deltas->interp_filter[i];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ }
+}
+
+/* Counterpart to 6.3.11 read_is_inter_probs() */
+static void update_is_inter_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++)
+ probs->is_inter[i] = update_prob(deltas->is_inter[i], probs->is_inter[i]);
+}
+
+/* 6.3.12 frame_reference_mode() done entirely in userspace */
+
+/* Counterpart to 6.3.13 frame_reference_mode_probs() */
+static void
+update_frame_reference_mode_probs(unsigned int reference_mode,
+ struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i;
+
+ if (reference_mode == V4L2_VP9_REFERENCE_MODE_SELECT)
+ for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++)
+ probs->comp_mode[i] = update_prob(deltas->comp_mode[i],
+ probs->comp_mode[i]);
+
+ if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE)
+ for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) {
+ u8 *p = probs->single_ref[i];
+ const u8 *d = deltas->single_ref[i];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ }
+
+ if (reference_mode != V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE)
+ for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++)
+ probs->comp_ref[i] = update_prob(deltas->comp_ref[i], probs->comp_ref[i]);
+}
+
+/* Counterpart to 6.3.14 read_y_mode_probs() */
+static void update_y_mode_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++)
+ for (j = 0; j < ARRAY_SIZE(probs->y_mode[0]); ++j)
+ probs->y_mode[i][j] =
+ update_prob(deltas->y_mode[i][j], probs->y_mode[i][j]);
+}
+
+/* Counterpart to 6.3.15 read_partition_probs() */
+static void update_partition_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas)
+{
+ int i, j;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++) {
+ u8 *p = probs->partition[i * 4 + j];
+ const u8 *d = deltas->partition[i * 4 + j];
+
+ p[0] = update_prob(d[0], p[0]);
+ p[1] = update_prob(d[1], p[1]);
+ p[2] = update_prob(d[2], p[2]);
+ }
+}
+
+static inline int update_mv_prob(int delta, int prob)
+{
+ if (!delta)
+ return prob;
+
+ return delta;
+}
+
+/* Counterpart to 6.3.16 mv_probs() */
+static void update_mv_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ u8 *p = probs->mv.joint;
+ const u8 *d = deltas->mv.joint;
+ unsigned int i, j;
+
+ p[0] = update_mv_prob(d[0], p[0]);
+ p[1] = update_mv_prob(d[1], p[1]);
+ p[2] = update_mv_prob(d[2], p[2]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) {
+ p = probs->mv.sign;
+ d = deltas->mv.sign;
+ p[i] = update_mv_prob(d[i], p[i]);
+
+ p = probs->mv.classes[i];
+ d = deltas->mv.classes[i];
+ for (j = 0; j < ARRAY_SIZE(probs->mv.classes[0]); j++)
+ p[j] = update_mv_prob(d[j], p[j]);
+
+ p = probs->mv.class0_bit;
+ d = deltas->mv.class0_bit;
+ p[i] = update_mv_prob(d[i], p[i]);
+
+ p = probs->mv.bits[i];
+ d = deltas->mv.bits[i];
+ for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++)
+ p[j] = update_mv_prob(d[j], p[j]);
+
+ for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) {
+ p = probs->mv.class0_fr[i][j];
+ d = deltas->mv.class0_fr[i][j];
+
+ p[0] = update_mv_prob(d[0], p[0]);
+ p[1] = update_mv_prob(d[1], p[1]);
+ p[2] = update_mv_prob(d[2], p[2]);
+ }
+
+ p = probs->mv.fr[i];
+ d = deltas->mv.fr[i];
+ for (j = 0; j < ARRAY_SIZE(probs->mv.fr[i]); j++)
+ p[j] = update_mv_prob(d[j], p[j]);
+
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV) {
+ p = probs->mv.class0_hp;
+ d = deltas->mv.class0_hp;
+ p[i] = update_mv_prob(d[i], p[i]);
+
+ p = probs->mv.hp;
+ d = deltas->mv.hp;
+ p[i] = update_mv_prob(d[i], p[i]);
+ }
+ }
+}
+
+/* Counterpart to 6.3 compressed_header(), but parsing has been done in userspace. */
+void v4l2_vp9_fw_update_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_ctrl_vp9_compressed_hdr *deltas,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ if (deltas->tx_mode == V4L2_VP9_TX_MODE_SELECT)
+ update_tx_probs(probs, deltas);
+
+ update_coef_probs(probs, deltas, dec_params);
+
+ update_skip_probs(probs, deltas);
+
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY)
+ return;
+
+ update_inter_mode_probs(probs, deltas);
+
+ if (dec_params->interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE)
+ update_interp_filter_probs(probs, deltas);
+
+ update_is_inter_probs(probs, deltas);
+
+ update_frame_reference_mode_probs(dec_params->reference_mode, probs, deltas);
+
+ update_y_mode_probs(probs, deltas);
+
+ update_partition_probs(probs, deltas);
+
+ update_mv_probs(probs, deltas, dec_params);
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_fw_update_probs);
+
+u8 v4l2_vp9_reset_frame_ctx(const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct v4l2_vp9_frame_context *frame_context)
+{
+ int i;
+
+ u8 fctx_idx = dec_params->frame_context_idx;
+
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) {
+ /*
+ * setup_past_independence()
+ * We do nothing here. Instead of storing default probs in some intermediate
+ * location and then copying from that location to appropriate contexts
+ * in save_probs() below, we skip that step and save default probs directly
+ * to appropriate contexts.
+ */
+ if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME ||
+ dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT ||
+ dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_ALL)
+ for (i = 0; i < 4; ++i)
+ /* save_probs(i) */
+ memcpy(&frame_context[i], &v4l2_vp9_default_probs,
+ sizeof(v4l2_vp9_default_probs));
+ else if (dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_SPEC)
+ /* save_probs(fctx_idx) */
+ memcpy(&frame_context[fctx_idx], &v4l2_vp9_default_probs,
+ sizeof(v4l2_vp9_default_probs));
+ fctx_idx = 0;
+ }
+
+ return fctx_idx;
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_reset_frame_ctx);
+
+/* 8.4.1 Merge prob process */
+static u8 merge_prob(u8 pre_prob, u32 ct0, u32 ct1, u16 count_sat, u32 max_update_factor)
+{
+ u32 den, prob, count, factor;
+
+ den = ct0 + ct1;
+ if (!den) {
+ /*
+ * prob = 128, count = 0, update_factor = 0
+ * Round2's argument: pre_prob * 256
+ * (pre_prob * 256 + 128) >> 8 == pre_prob
+ */
+ return pre_prob;
+ }
+
+ prob = clamp(((ct0 << 8) + (den >> 1)) / den, (u32)1, (u32)255);
+ count = min_t(u32, den, count_sat);
+ factor = fastdiv(max_update_factor * count, count_sat);
+
+ /*
+ * Round2(pre_prob * (256 - factor) + prob * factor, 8)
+ * Round2(pre_prob * 256 + (prob - pre_prob) * factor, 8)
+ * (pre_prob * 256 >> 8) + (((prob - pre_prob) * factor + 128) >> 8)
+ */
+ return pre_prob + (((prob - pre_prob) * factor + 128) >> 8);
+}
+
+static inline u8 noncoef_merge_prob(u8 pre_prob, u32 ct0, u32 ct1)
+{
+ return merge_prob(pre_prob, ct0, ct1, 20, 128);
+}
+
+/* 8.4.2 Merge probs process */
+/*
+ * merge_probs() is a recursive function in the spec. We avoid recursion in the kernel.
+ * That said, the "tree" parameter of merge_probs() controls how deep the recursion goes.
+ * It turns out that in all cases the recursive calls boil down to a short-ish series
+ * of merge_prob() invocations (note no "s").
+ *
+ * Variant A
+ * ---------
+ * merge_probs(small_token_tree, 2):
+ * merge_prob(p[1], c[0], c[1] + c[2])
+ * merge_prob(p[2], c[1], c[2])
+ *
+ * Variant B
+ * ---------
+ * merge_probs(binary_tree, 0) or
+ * merge_probs(tx_size_8_tree, 0):
+ * merge_prob(p[0], c[0], c[1])
+ *
+ * Variant C
+ * ---------
+ * merge_probs(inter_mode_tree, 0):
+ * merge_prob(p[0], c[2], c[1] + c[0] + c[3])
+ * merge_prob(p[1], c[0], c[1] + c[3])
+ * merge_prob(p[2], c[1], c[3])
+ *
+ * Variant D
+ * ---------
+ * merge_probs(intra_mode_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + ... + c[9])
+ * merge_prob(p[1], c[9], c[1] + ... + c[8])
+ * merge_prob(p[2], c[1], c[2] + ... + c[8])
+ * merge_prob(p[3], c[2] + c[4] + c[5], c[3] + c[8] + c[6] + c[7])
+ * merge_prob(p[4], c[2], c[4] + c[5])
+ * merge_prob(p[5], c[4], c[5])
+ * merge_prob(p[6], c[3], c[8] + c[6] + c[7])
+ * merge_prob(p[7], c[8], c[6] + c[7])
+ * merge_prob(p[8], c[6], c[7])
+ *
+ * Variant E
+ * ---------
+ * merge_probs(partition_tree, 0) or
+ * merge_probs(tx_size_32_tree, 0) or
+ * merge_probs(mv_joint_tree, 0) or
+ * merge_probs(mv_fr_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + c[2] + c[3])
+ * merge_prob(p[1], c[1], c[2] + c[3])
+ * merge_prob(p[2], c[2], c[3])
+ *
+ * Variant F
+ * ---------
+ * merge_probs(interp_filter_tree, 0) or
+ * merge_probs(tx_size_16_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + c[2])
+ * merge_prob(p[1], c[1], c[2])
+ *
+ * Variant G
+ * ---------
+ * merge_probs(mv_class_tree, 0):
+ * merge_prob(p[0], c[0], c[1] + ... + c[10])
+ * merge_prob(p[1], c[1], c[2] + ... + c[10])
+ * merge_prob(p[2], c[2] + c[3], c[4] + ... + c[10])
+ * merge_prob(p[3], c[2], c[3])
+ * merge_prob(p[4], c[4] + c[5], c[6] + ... + c[10])
+ * merge_prob(p[5], c[4], c[5])
+ * merge_prob(p[6], c[6], c[7] + ... + c[10])
+ * merge_prob(p[7], c[7] + c[8], c[9] + c[10])
+ * merge_prob(p[8], c[7], c[8])
+ * merge_prob(p[9], c[9], [10])
+ */
+
+static inline void merge_probs_variant_a(u8 *p, const u32 *c, u16 count_sat, u32 update_factor)
+{
+ p[1] = merge_prob(p[1], c[0], c[1] + c[2], count_sat, update_factor);
+ p[2] = merge_prob(p[2], c[1], c[2], count_sat, update_factor);
+}
+
+static inline void merge_probs_variant_b(u8 *p, const u32 *c, u16 count_sat, u32 update_factor)
+{
+ p[0] = merge_prob(p[0], c[0], c[1], count_sat, update_factor);
+}
+
+static inline void merge_probs_variant_c(u8 *p, const u32 *c)
+{
+ p[0] = noncoef_merge_prob(p[0], c[2], c[1] + c[0] + c[3]);
+ p[1] = noncoef_merge_prob(p[1], c[0], c[1] + c[3]);
+ p[2] = noncoef_merge_prob(p[2], c[1], c[3]);
+}
+
+static void merge_probs_variant_d(u8 *p, const u32 *c)
+{
+ u32 sum = 0, s2;
+
+ sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9];
+
+ p[0] = noncoef_merge_prob(p[0], c[0], sum);
+ sum -= c[9];
+ p[1] = noncoef_merge_prob(p[1], c[9], sum);
+ sum -= c[1];
+ p[2] = noncoef_merge_prob(p[2], c[1], sum);
+ s2 = c[2] + c[4] + c[5];
+ sum -= s2;
+ p[3] = noncoef_merge_prob(p[3], s2, sum);
+ s2 -= c[2];
+ p[4] = noncoef_merge_prob(p[4], c[2], s2);
+ p[5] = noncoef_merge_prob(p[5], c[4], c[5]);
+ sum -= c[3];
+ p[6] = noncoef_merge_prob(p[6], c[3], sum);
+ sum -= c[8];
+ p[7] = noncoef_merge_prob(p[7], c[8], sum);
+ p[8] = noncoef_merge_prob(p[8], c[6], c[7]);
+}
+
+static inline void merge_probs_variant_e(u8 *p, const u32 *c)
+{
+ p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2] + c[3]);
+ p[1] = noncoef_merge_prob(p[1], c[1], c[2] + c[3]);
+ p[2] = noncoef_merge_prob(p[2], c[2], c[3]);
+}
+
+static inline void merge_probs_variant_f(u8 *p, const u32 *c)
+{
+ p[0] = noncoef_merge_prob(p[0], c[0], c[1] + c[2]);
+ p[1] = noncoef_merge_prob(p[1], c[1], c[2]);
+}
+
+static void merge_probs_variant_g(u8 *p, const u32 *c)
+{
+ u32 sum;
+
+ sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10];
+ p[0] = noncoef_merge_prob(p[0], c[0], sum);
+ sum -= c[1];
+ p[1] = noncoef_merge_prob(p[1], c[1], sum);
+ sum -= c[2] + c[3];
+ p[2] = noncoef_merge_prob(p[2], c[2] + c[3], sum);
+ p[3] = noncoef_merge_prob(p[3], c[2], c[3]);
+ sum -= c[4] + c[5];
+ p[4] = noncoef_merge_prob(p[4], c[4] + c[5], sum);
+ p[5] = noncoef_merge_prob(p[5], c[4], c[5]);
+ sum -= c[6];
+ p[6] = noncoef_merge_prob(p[6], c[6], sum);
+ p[7] = noncoef_merge_prob(p[7], c[7] + c[8], c[9] + c[10]);
+ p[8] = noncoef_merge_prob(p[8], c[7], c[8]);
+ p[9] = noncoef_merge_prob(p[9], c[9], c[10]);
+}
+
+/* 8.4.3 Coefficient probability adaptation process */
+static inline void adapt_probs_variant_a_coef(u8 *p, const u32 *c, u32 update_factor)
+{
+ merge_probs_variant_a(p, c, 24, update_factor);
+}
+
+static inline void adapt_probs_variant_b_coef(u8 *p, const u32 *c, u32 update_factor)
+{
+ merge_probs_variant_b(p, c, 24, update_factor);
+}
+
+static void _adapt_coeff(unsigned int i, unsigned int j, unsigned int k,
+ struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_vp9_frame_symbol_counts *counts,
+ u32 uf)
+{
+ s32 l, m;
+
+ for (l = 0; l < ARRAY_SIZE(probs->coef[0][0][0]); l++) {
+ for (m = 0; m < BAND_6(l); m++) {
+ u8 *p = probs->coef[i][j][k][l][m];
+ const u32 counts_more_coefs[2] = {
+ *counts->eob[i][j][k][l][m][1],
+ *counts->eob[i][j][k][l][m][0] - *counts->eob[i][j][k][l][m][1],
+ };
+
+ adapt_probs_variant_a_coef(p, *counts->coeff[i][j][k][l][m], uf);
+ adapt_probs_variant_b_coef(p, counts_more_coefs, uf);
+ }
+ }
+}
+
+static void _adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
+ const struct v4l2_vp9_frame_symbol_counts *counts,
+ unsigned int uf)
+{
+ unsigned int i, j, k;
+
+ for (i = 0; i < ARRAY_SIZE(probs->coef); i++)
+ for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
+ for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
+ _adapt_coeff(i, j, k, probs, counts, uf);
+}
+
+void v4l2_vp9_adapt_coef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ bool use_128,
+ bool frame_is_intra)
+{
+ if (frame_is_intra) {
+ _adapt_coef_probs(probs, counts, 112);
+ } else {
+ if (use_128)
+ _adapt_coef_probs(probs, counts, 128);
+ else
+ _adapt_coef_probs(probs, counts, 112);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_coef_probs);
+
+/* 8.4.4 Non coefficient probability adaptation process, adapt_probs() */
+static inline void adapt_probs_variant_b(u8 *p, const u32 *c)
+{
+ merge_probs_variant_b(p, c, 20, 128);
+}
+
+static inline void adapt_probs_variant_c(u8 *p, const u32 *c)
+{
+ merge_probs_variant_c(p, c);
+}
+
+static inline void adapt_probs_variant_d(u8 *p, const u32 *c)
+{
+ merge_probs_variant_d(p, c);
+}
+
+static inline void adapt_probs_variant_e(u8 *p, const u32 *c)
+{
+ merge_probs_variant_e(p, c);
+}
+
+static inline void adapt_probs_variant_f(u8 *p, const u32 *c)
+{
+ merge_probs_variant_f(p, c);
+}
+
+static inline void adapt_probs_variant_g(u8 *p, const u32 *c)
+{
+ merge_probs_variant_g(p, c);
+}
+
+/* 8.4.4 Non coefficient probability adaptation process, adapt_prob() */
+static inline u8 adapt_prob(u8 prob, const u32 counts[2])
+{
+ return noncoef_merge_prob(prob, counts[0], counts[1]);
+}
+
+/* 8.4.4 Non coefficient probability adaptation process */
+void v4l2_vp9_adapt_noncoef_probs(struct v4l2_vp9_frame_context *probs,
+ struct v4l2_vp9_frame_symbol_counts *counts,
+ u8 reference_mode, u8 interpolation_filter, u8 tx_mode,
+ u32 flags)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++)
+ probs->is_inter[i] = adapt_prob(probs->is_inter[i], (*counts->intra_inter)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++)
+ probs->comp_mode[i] = adapt_prob(probs->comp_mode[i], (*counts->comp)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++)
+ probs->comp_ref[i] = adapt_prob(probs->comp_ref[i], (*counts->comp_ref)[i]);
+
+ if (reference_mode != V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE)
+ for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++)
+ for (j = 0; j < ARRAY_SIZE(probs->single_ref[0]); j++)
+ probs->single_ref[i][j] = adapt_prob(probs->single_ref[i][j],
+ (*counts->single_ref)[i][j]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++)
+ adapt_probs_variant_c(probs->inter_mode[i], (*counts->mv_mode)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++)
+ adapt_probs_variant_d(probs->y_mode[i], (*counts->y_mode)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->uv_mode); i++)
+ adapt_probs_variant_d(probs->uv_mode[i], (*counts->uv_mode)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->partition); i++)
+ adapt_probs_variant_e(probs->partition[i], (*counts->partition)[i]);
+
+ for (i = 0; i < ARRAY_SIZE(probs->skip); i++)
+ probs->skip[i] = adapt_prob(probs->skip[i], (*counts->skip)[i]);
+
+ if (interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE)
+ for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++)
+ adapt_probs_variant_f(probs->interp_filter[i], (*counts->filter)[i]);
+
+ if (tx_mode == V4L2_VP9_TX_MODE_SELECT)
+ for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) {
+ adapt_probs_variant_b(probs->tx8[i], (*counts->tx8p)[i]);
+ adapt_probs_variant_f(probs->tx16[i], (*counts->tx16p)[i]);
+ adapt_probs_variant_e(probs->tx32[i], (*counts->tx32p)[i]);
+ }
+
+ adapt_probs_variant_e(probs->mv.joint, *counts->mv_joint);
+
+ for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) {
+ probs->mv.sign[i] = adapt_prob(probs->mv.sign[i], (*counts->sign)[i]);
+
+ adapt_probs_variant_g(probs->mv.classes[i], (*counts->classes)[i]);
+
+ probs->mv.class0_bit[i] = adapt_prob(probs->mv.class0_bit[i], (*counts->class0)[i]);
+
+ for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++)
+ probs->mv.bits[i][j] = adapt_prob(probs->mv.bits[i][j],
+ (*counts->bits)[i][j]);
+
+ for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++)
+ adapt_probs_variant_e(probs->mv.class0_fr[i][j],
+ (*counts->class0_fp)[i][j]);
+
+ adapt_probs_variant_e(probs->mv.fr[i], (*counts->fp)[i]);
+
+ if (!(flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV))
+ continue;
+
+ probs->mv.class0_hp[i] = adapt_prob(probs->mv.class0_hp[i],
+ (*counts->class0_hp)[i]);
+
+ probs->mv.hp[i] = adapt_prob(probs->mv.hp[i], (*counts->hp)[i]);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_adapt_noncoef_probs);
+
+bool
+v4l2_vp9_seg_feat_enabled(const u8 *feature_enabled,
+ unsigned int feature,
+ unsigned int segid)
+{
+ u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature);
+
+ return !!(feature_enabled[segid] & mask);
+}
+EXPORT_SYMBOL_GPL(v4l2_vp9_seg_feat_enabled);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("V4L2 VP9 Helpers");
+MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@collabora.com>");
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
new file mode 100644
index 0000000000..606a271bdd
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -0,0 +1,1198 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * generic helper functions for handling video4linux capture buffers
+ *
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
+ *
+ * Highly based on video-buf written originally by:
+ * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
+ * (c) 2006 Ted Walther and John Sokol
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <media/videobuf-core.h>
+#include <media/v4l2-common.h>
+
+#define MAGIC_BUFFER 0x20070728
+#define MAGIC_CHECK(is, should) \
+ do { \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR \
+ "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ } \
+ } while (0)
+
+static int debug;
+module_param(debug, int, 0644);
+
+MODULE_DESCRIPTION("helper module to manage video4linux buffers");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
+MODULE_LICENSE("GPL");
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
+ } while (0)
+
+/* --------------------------------------------------------------------- */
+
+#define CALL(q, f, arg...) \
+ ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
+#define CALLPTR(q, f, arg...) \
+ ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
+
+struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
+{
+ struct videobuf_buffer *vb;
+
+ BUG_ON(q->msize < sizeof(*vb));
+
+ if (!q->int_ops || !q->int_ops->alloc_vb) {
+ printk(KERN_ERR "No specific ops defined!\n");
+ BUG();
+ }
+
+ vb = q->int_ops->alloc_vb(q->msize);
+ if (NULL != vb) {
+ init_waitqueue_head(&vb->done);
+ vb->magic = MAGIC_BUFFER;
+ }
+
+ return vb;
+}
+EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
+
+static int state_neither_active_nor_queued(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ unsigned long flags;
+ bool rc;
+
+ spin_lock_irqsave(q->irqlock, flags);
+ rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
+ spin_unlock_irqrestore(q->irqlock, flags);
+ return rc;
+};
+
+int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
+ int non_blocking, int intr)
+{
+ bool is_ext_locked;
+ int ret = 0;
+
+ MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
+
+ if (non_blocking) {
+ if (state_neither_active_nor_queued(q, vb))
+ return 0;
+ return -EAGAIN;
+ }
+
+ is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
+
+ /* Release vdev lock to prevent this wait from blocking outside access to
+ the device. */
+ if (is_ext_locked)
+ mutex_unlock(q->ext_lock);
+ if (intr)
+ ret = wait_event_interruptible(vb->done,
+ state_neither_active_nor_queued(q, vb));
+ else
+ wait_event(vb->done, state_neither_active_nor_queued(q, vb));
+ /* Relock */
+ if (is_ext_locked)
+ mutex_lock(q->ext_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(videobuf_waiton);
+
+int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
+{
+ MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ return CALL(q, iolock, q, vb, fbuf);
+}
+EXPORT_SYMBOL_GPL(videobuf_iolock);
+
+void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
+{
+ if (q->int_ops->vaddr)
+ return q->int_ops->vaddr(buf);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
+
+/* --------------------------------------------------------------------- */
+
+
+void videobuf_queue_core_init(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct videobuf_qtype_ops *int_ops,
+ struct mutex *ext_lock)
+{
+ BUG_ON(!q);
+ memset(q, 0, sizeof(*q));
+ q->irqlock = irqlock;
+ q->ext_lock = ext_lock;
+ q->dev = dev;
+ q->type = type;
+ q->field = field;
+ q->msize = msize;
+ q->ops = ops;
+ q->priv_data = priv;
+ q->int_ops = int_ops;
+
+ /* All buffer operations are mandatory */
+ BUG_ON(!q->ops->buf_setup);
+ BUG_ON(!q->ops->buf_prepare);
+ BUG_ON(!q->ops->buf_queue);
+ BUG_ON(!q->ops->buf_release);
+
+ /* Lock is mandatory for queue_cancel to work */
+ BUG_ON(!irqlock);
+
+ /* Having implementations for abstract methods are mandatory */
+ BUG_ON(!q->int_ops);
+
+ mutex_init(&q->vb_lock);
+ init_waitqueue_head(&q->wait);
+ INIT_LIST_HEAD(&q->stream);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
+
+/* Locking: Only usage in bttv unsafe find way to remove */
+int videobuf_queue_is_busy(struct videobuf_queue *q)
+{
+ int i;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ if (q->streaming) {
+ dprintk(1, "busy: streaming active\n");
+ return 1;
+ }
+ if (q->reading) {
+ dprintk(1, "busy: pending read #1\n");
+ return 1;
+ }
+ if (q->read_buf) {
+ dprintk(1, "busy: pending read #2\n");
+ return 1;
+ }
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (q->bufs[i]->map) {
+ dprintk(1, "busy: buffer #%d mapped\n", i);
+ return 1;
+ }
+ if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
+ dprintk(1, "busy: buffer #%d queued\n", i);
+ return 1;
+ }
+ if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
+ dprintk(1, "busy: buffer #%d active\n", i);
+ return 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
+
+/*
+ * __videobuf_free() - free all the buffers and their control structures
+ *
+ * This function can only be called if streaming/reading is off, i.e. no buffers
+ * are under control of the driver.
+ */
+/* Locking: Caller holds q->vb_lock */
+static int __videobuf_free(struct videobuf_queue *q)
+{
+ int i;
+
+ dprintk(1, "%s\n", __func__);
+ if (!q)
+ return 0;
+
+ if (q->streaming || q->reading) {
+ dprintk(1, "Cannot free buffers when streaming or reading\n");
+ return -EBUSY;
+ }
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++)
+ if (q->bufs[i] && q->bufs[i]->map) {
+ dprintk(1, "Cannot free mmapped buffers\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ q->ops->buf_release(q, q->bufs[i]);
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+
+ return 0;
+}
+
+/* Locking: Caller holds q->vb_lock */
+void videobuf_queue_cancel(struct videobuf_queue *q)
+{
+ unsigned long flags = 0;
+ int i;
+
+ q->streaming = 0;
+ q->reading = 0;
+ wake_up_interruptible_sync(&q->wait);
+
+ /* remove queued buffers from list */
+ spin_lock_irqsave(q->irqlock, flags);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
+ list_del(&q->bufs[i]->queue);
+ q->bufs[i]->state = VIDEOBUF_ERROR;
+ wake_up_all(&q->bufs[i]->done);
+ }
+ }
+ spin_unlock_irqrestore(q->irqlock, flags);
+
+ /* free all buffers + clear queue */
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ q->ops->buf_release(q, q->bufs[i]);
+ }
+ INIT_LIST_HEAD(&q->stream);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
+
+/* --------------------------------------------------------------------- */
+
+/* Locking: Caller holds q->vb_lock */
+enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
+{
+ enum v4l2_field field = q->field;
+
+ BUG_ON(V4L2_FIELD_ANY == field);
+
+ if (V4L2_FIELD_ALTERNATE == field) {
+ if (V4L2_FIELD_TOP == q->last) {
+ field = V4L2_FIELD_BOTTOM;
+ q->last = V4L2_FIELD_BOTTOM;
+ } else {
+ field = V4L2_FIELD_TOP;
+ q->last = V4L2_FIELD_TOP;
+ }
+ }
+ return field;
+}
+EXPORT_SYMBOL_GPL(videobuf_next_field);
+
+/* Locking: Caller holds q->vb_lock */
+static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
+ struct videobuf_buffer *vb, enum v4l2_buf_type type)
+{
+ MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ b->index = vb->i;
+ b->type = type;
+
+ b->memory = vb->memory;
+ switch (b->memory) {
+ case V4L2_MEMORY_MMAP:
+ b->m.offset = vb->boff;
+ b->length = vb->bsize;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ b->m.userptr = vb->baddr;
+ b->length = vb->bsize;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ b->m.offset = vb->boff;
+ break;
+ case V4L2_MEMORY_DMABUF:
+ /* DMABUF is not handled in videobuf framework */
+ break;
+ }
+
+ b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ if (vb->map)
+ b->flags |= V4L2_BUF_FLAG_MAPPED;
+
+ switch (vb->state) {
+ case VIDEOBUF_PREPARED:
+ case VIDEOBUF_QUEUED:
+ case VIDEOBUF_ACTIVE:
+ b->flags |= V4L2_BUF_FLAG_QUEUED;
+ break;
+ case VIDEOBUF_ERROR:
+ b->flags |= V4L2_BUF_FLAG_ERROR;
+ fallthrough;
+ case VIDEOBUF_DONE:
+ b->flags |= V4L2_BUF_FLAG_DONE;
+ break;
+ case VIDEOBUF_NEEDS_INIT:
+ case VIDEOBUF_IDLE:
+ /* nothing */
+ break;
+ }
+
+ b->field = vb->field;
+ v4l2_buffer_set_timestamp(b, vb->ts);
+ b->bytesused = vb->size;
+ b->sequence = vb->field_count >> 1;
+}
+
+int videobuf_mmap_free(struct videobuf_queue *q)
+{
+ int ret;
+ videobuf_queue_lock(q);
+ ret = __videobuf_free(q);
+ videobuf_queue_unlock(q);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(videobuf_mmap_free);
+
+/* Locking: Caller holds q->vb_lock */
+int __videobuf_mmap_setup(struct videobuf_queue *q,
+ unsigned int bcount, unsigned int bsize,
+ enum v4l2_memory memory)
+{
+ unsigned int i;
+ int err;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ err = __videobuf_free(q);
+ if (0 != err)
+ return err;
+
+ /* Allocate and initialize buffers */
+ for (i = 0; i < bcount; i++) {
+ q->bufs[i] = videobuf_alloc_vb(q);
+
+ if (NULL == q->bufs[i])
+ break;
+
+ q->bufs[i]->i = i;
+ q->bufs[i]->memory = memory;
+ q->bufs[i]->bsize = bsize;
+ switch (memory) {
+ case V4L2_MEMORY_MMAP:
+ q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ case V4L2_MEMORY_OVERLAY:
+ case V4L2_MEMORY_DMABUF:
+ /* nothing */
+ break;
+ }
+ }
+
+ if (!i)
+ return -ENOMEM;
+
+ dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
+
+ return i;
+}
+EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
+
+int videobuf_mmap_setup(struct videobuf_queue *q,
+ unsigned int bcount, unsigned int bsize,
+ enum v4l2_memory memory)
+{
+ int ret;
+ videobuf_queue_lock(q);
+ ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
+ videobuf_queue_unlock(q);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
+
+int videobuf_reqbufs(struct videobuf_queue *q,
+ struct v4l2_requestbuffers *req)
+{
+ unsigned int size, count;
+ int retval;
+
+ if (req->memory != V4L2_MEMORY_MMAP &&
+ req->memory != V4L2_MEMORY_USERPTR &&
+ req->memory != V4L2_MEMORY_OVERLAY) {
+ dprintk(1, "reqbufs: memory type invalid\n");
+ return -EINVAL;
+ }
+
+ videobuf_queue_lock(q);
+ if (req->type != q->type) {
+ dprintk(1, "reqbufs: queue type invalid\n");
+ retval = -EINVAL;
+ goto done;
+ }
+
+ if (q->streaming) {
+ dprintk(1, "reqbufs: streaming already exists\n");
+ retval = -EBUSY;
+ goto done;
+ }
+ if (!list_empty(&q->stream)) {
+ dprintk(1, "reqbufs: stream running\n");
+ retval = -EBUSY;
+ goto done;
+ }
+
+ if (req->count == 0) {
+ dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
+ retval = __videobuf_free(q);
+ goto done;
+ }
+
+ count = req->count;
+ if (count > VIDEO_MAX_FRAME)
+ count = VIDEO_MAX_FRAME;
+ size = 0;
+ q->ops->buf_setup(q, &count, &size);
+ dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
+ count, size,
+ (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
+
+ retval = __videobuf_mmap_setup(q, count, size, req->memory);
+ if (retval < 0) {
+ dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
+ goto done;
+ }
+
+ req->count = retval;
+ retval = 0;
+
+ done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_reqbufs);
+
+int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
+{
+ int ret = -EINVAL;
+
+ videobuf_queue_lock(q);
+ if (unlikely(b->type != q->type)) {
+ dprintk(1, "querybuf: Wrong type.\n");
+ goto done;
+ }
+ if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
+ dprintk(1, "querybuf: index out of range.\n");
+ goto done;
+ }
+ if (unlikely(NULL == q->bufs[b->index])) {
+ dprintk(1, "querybuf: buffer is null.\n");
+ goto done;
+ }
+
+ videobuf_status(q, b, q->bufs[b->index], q->type);
+
+ ret = 0;
+done:
+ videobuf_queue_unlock(q);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(videobuf_querybuf);
+
+int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
+{
+ struct videobuf_buffer *buf;
+ enum v4l2_field field;
+ unsigned long flags = 0;
+ int retval;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ if (b->memory == V4L2_MEMORY_MMAP)
+ mmap_read_lock(current->mm);
+
+ videobuf_queue_lock(q);
+ retval = -EBUSY;
+ if (q->reading) {
+ dprintk(1, "qbuf: Reading running...\n");
+ goto done;
+ }
+ retval = -EINVAL;
+ if (b->type != q->type) {
+ dprintk(1, "qbuf: Wrong type.\n");
+ goto done;
+ }
+ if (b->index >= VIDEO_MAX_FRAME) {
+ dprintk(1, "qbuf: index out of range.\n");
+ goto done;
+ }
+ buf = q->bufs[b->index];
+ if (NULL == buf) {
+ dprintk(1, "qbuf: buffer is null.\n");
+ goto done;
+ }
+ MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
+ if (buf->memory != b->memory) {
+ dprintk(1, "qbuf: memory type is wrong.\n");
+ goto done;
+ }
+ if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
+ dprintk(1, "qbuf: buffer is already queued or active.\n");
+ goto done;
+ }
+
+ switch (b->memory) {
+ case V4L2_MEMORY_MMAP:
+ if (0 == buf->baddr) {
+ dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
+ goto done;
+ }
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
+ || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
+ || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
+ || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
+ buf->size = b->bytesused;
+ buf->field = b->field;
+ buf->ts = v4l2_buffer_get_timestamp(b);
+ }
+ break;
+ case V4L2_MEMORY_USERPTR:
+ if (b->length < buf->bsize) {
+ dprintk(1, "qbuf: buffer length is not enough\n");
+ goto done;
+ }
+ if (VIDEOBUF_NEEDS_INIT != buf->state &&
+ buf->baddr != b->m.userptr)
+ q->ops->buf_release(q, buf);
+ buf->baddr = b->m.userptr;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ buf->boff = b->m.offset;
+ break;
+ default:
+ dprintk(1, "qbuf: wrong memory type\n");
+ goto done;
+ }
+
+ dprintk(1, "qbuf: requesting next field\n");
+ field = videobuf_next_field(q);
+ retval = q->ops->buf_prepare(q, buf, field);
+ if (0 != retval) {
+ dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
+ goto done;
+ }
+
+ list_add_tail(&buf->stream, &q->stream);
+ if (q->streaming) {
+ spin_lock_irqsave(q->irqlock, flags);
+ q->ops->buf_queue(q, buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+ }
+ dprintk(1, "qbuf: succeeded\n");
+ retval = 0;
+ wake_up_interruptible_sync(&q->wait);
+
+done:
+ videobuf_queue_unlock(q);
+
+ if (b->memory == V4L2_MEMORY_MMAP)
+ mmap_read_unlock(current->mm);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_qbuf);
+
+/* Locking: Caller holds q->vb_lock */
+static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
+{
+ int retval;
+
+checks:
+ if (!q->streaming) {
+ dprintk(1, "next_buffer: Not streaming\n");
+ retval = -EINVAL;
+ goto done;
+ }
+
+ if (list_empty(&q->stream)) {
+ if (noblock) {
+ retval = -EAGAIN;
+ dprintk(2, "next_buffer: no buffers to dequeue\n");
+ goto done;
+ } else {
+ dprintk(2, "next_buffer: waiting on buffer\n");
+
+ /* Drop lock to avoid deadlock with qbuf */
+ videobuf_queue_unlock(q);
+
+ /* Checking list_empty and streaming is safe without
+ * locks because we goto checks to validate while
+ * holding locks before proceeding */
+ retval = wait_event_interruptible(q->wait,
+ !list_empty(&q->stream) || !q->streaming);
+ videobuf_queue_lock(q);
+
+ if (retval)
+ goto done;
+
+ goto checks;
+ }
+ }
+
+ retval = 0;
+
+done:
+ return retval;
+}
+
+/* Locking: Caller holds q->vb_lock */
+static int stream_next_buffer(struct videobuf_queue *q,
+ struct videobuf_buffer **vb, int nonblocking)
+{
+ int retval;
+ struct videobuf_buffer *buf = NULL;
+
+ retval = stream_next_buffer_check_queue(q, nonblocking);
+ if (retval)
+ goto done;
+
+ buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
+ retval = videobuf_waiton(q, buf, nonblocking, 1);
+ if (retval < 0)
+ goto done;
+
+ *vb = buf;
+done:
+ return retval;
+}
+
+int videobuf_dqbuf(struct videobuf_queue *q,
+ struct v4l2_buffer *b, int nonblocking)
+{
+ struct videobuf_buffer *buf = NULL;
+ int retval;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ memset(b, 0, sizeof(*b));
+ videobuf_queue_lock(q);
+
+ retval = stream_next_buffer(q, &buf, nonblocking);
+ if (retval < 0) {
+ dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
+ goto done;
+ }
+
+ switch (buf->state) {
+ case VIDEOBUF_ERROR:
+ dprintk(1, "dqbuf: state is error\n");
+ break;
+ case VIDEOBUF_DONE:
+ dprintk(1, "dqbuf: state is done\n");
+ break;
+ default:
+ dprintk(1, "dqbuf: state invalid\n");
+ retval = -EINVAL;
+ goto done;
+ }
+ CALL(q, sync, q, buf);
+ videobuf_status(q, b, buf, q->type);
+ list_del(&buf->stream);
+ buf->state = VIDEOBUF_IDLE;
+ b->flags &= ~V4L2_BUF_FLAG_DONE;
+done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_dqbuf);
+
+int videobuf_streamon(struct videobuf_queue *q)
+{
+ struct videobuf_buffer *buf;
+ unsigned long flags = 0;
+ int retval;
+
+ videobuf_queue_lock(q);
+ retval = -EBUSY;
+ if (q->reading)
+ goto done;
+ retval = 0;
+ if (q->streaming)
+ goto done;
+ q->streaming = 1;
+ spin_lock_irqsave(q->irqlock, flags);
+ list_for_each_entry(buf, &q->stream, stream)
+ if (buf->state == VIDEOBUF_PREPARED)
+ q->ops->buf_queue(q, buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+
+ wake_up_interruptible_sync(&q->wait);
+done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_streamon);
+
+/* Locking: Caller holds q->vb_lock */
+static int __videobuf_streamoff(struct videobuf_queue *q)
+{
+ if (!q->streaming)
+ return -EINVAL;
+
+ videobuf_queue_cancel(q);
+
+ return 0;
+}
+
+int videobuf_streamoff(struct videobuf_queue *q)
+{
+ int retval;
+
+ videobuf_queue_lock(q);
+ retval = __videobuf_streamoff(q);
+ videobuf_queue_unlock(q);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_streamoff);
+
+/* Locking: Caller holds q->vb_lock */
+static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
+ char __user *data,
+ size_t count, loff_t *ppos)
+{
+ enum v4l2_field field;
+ unsigned long flags = 0;
+ int retval;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ /* setup stuff */
+ q->read_buf = videobuf_alloc_vb(q);
+ if (NULL == q->read_buf)
+ return -ENOMEM;
+
+ q->read_buf->memory = V4L2_MEMORY_USERPTR;
+ q->read_buf->baddr = (unsigned long)data;
+ q->read_buf->bsize = count;
+
+ field = videobuf_next_field(q);
+ retval = q->ops->buf_prepare(q, q->read_buf, field);
+ if (0 != retval)
+ goto done;
+
+ /* start capture & wait */
+ spin_lock_irqsave(q->irqlock, flags);
+ q->ops->buf_queue(q, q->read_buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+ retval = videobuf_waiton(q, q->read_buf, 0, 0);
+ if (0 == retval) {
+ CALL(q, sync, q, q->read_buf);
+ if (VIDEOBUF_ERROR == q->read_buf->state)
+ retval = -EIO;
+ else
+ retval = q->read_buf->size;
+ }
+
+done:
+ /* cleanup */
+ q->ops->buf_release(q, q->read_buf);
+ kfree(q->read_buf);
+ q->read_buf = NULL;
+ return retval;
+}
+
+static int __videobuf_copy_to_user(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count,
+ int nonblocking)
+{
+ void *vaddr = CALLPTR(q, vaddr, buf);
+
+ /* copy to userspace */
+ if (count > buf->size - q->read_off)
+ count = buf->size - q->read_off;
+
+ if (copy_to_user(data, vaddr + q->read_off, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static int __videobuf_copy_stream(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ char __user *data, size_t count, size_t pos,
+ int vbihack, int nonblocking)
+{
+ unsigned int *fc = CALLPTR(q, vaddr, buf);
+
+ if (vbihack) {
+ /* dirty, undocumented hack -- pass the frame counter
+ * within the last four bytes of each vbi data block.
+ * We need that one to maintain backward compatibility
+ * to all vbi decoding software out there ... */
+ fc += (buf->size >> 2) - 1;
+ *fc = buf->field_count >> 1;
+ dprintk(1, "vbihack: %d\n", *fc);
+ }
+
+ /* copy stuff using the common method */
+ count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
+
+ if ((count == -EFAULT) && (pos == 0))
+ return -EFAULT;
+
+ return count;
+}
+
+ssize_t videobuf_read_one(struct videobuf_queue *q,
+ char __user *data, size_t count, loff_t *ppos,
+ int nonblocking)
+{
+ enum v4l2_field field;
+ unsigned long flags = 0;
+ unsigned size = 0, nbufs = 1;
+ int retval;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ videobuf_queue_lock(q);
+
+ q->ops->buf_setup(q, &nbufs, &size);
+
+ if (NULL == q->read_buf &&
+ count >= size &&
+ !nonblocking) {
+ retval = videobuf_read_zerocopy(q, data, count, ppos);
+ if (retval >= 0 || retval == -EIO)
+ /* ok, all done */
+ goto done;
+ /* fallback to kernel bounce buffer on failures */
+ }
+
+ if (NULL == q->read_buf) {
+ /* need to capture a new frame */
+ retval = -ENOMEM;
+ q->read_buf = videobuf_alloc_vb(q);
+
+ dprintk(1, "video alloc=0x%p\n", q->read_buf);
+ if (NULL == q->read_buf)
+ goto done;
+ q->read_buf->memory = V4L2_MEMORY_USERPTR;
+ q->read_buf->bsize = count; /* preferred size */
+ field = videobuf_next_field(q);
+ retval = q->ops->buf_prepare(q, q->read_buf, field);
+
+ if (0 != retval) {
+ kfree(q->read_buf);
+ q->read_buf = NULL;
+ goto done;
+ }
+
+ spin_lock_irqsave(q->irqlock, flags);
+ q->ops->buf_queue(q, q->read_buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+
+ q->read_off = 0;
+ }
+
+ /* wait until capture is done */
+ retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
+ if (0 != retval)
+ goto done;
+
+ CALL(q, sync, q, q->read_buf);
+
+ if (VIDEOBUF_ERROR == q->read_buf->state) {
+ /* catch I/O errors */
+ q->ops->buf_release(q, q->read_buf);
+ kfree(q->read_buf);
+ q->read_buf = NULL;
+ retval = -EIO;
+ goto done;
+ }
+
+ /* Copy to userspace */
+ retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
+ if (retval < 0)
+ goto done;
+
+ q->read_off += retval;
+ if (q->read_off == q->read_buf->size) {
+ /* all data copied, cleanup */
+ q->ops->buf_release(q, q->read_buf);
+ kfree(q->read_buf);
+ q->read_buf = NULL;
+ }
+
+done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_read_one);
+
+/* Locking: Caller holds q->vb_lock */
+static int __videobuf_read_start(struct videobuf_queue *q)
+{
+ enum v4l2_field field;
+ unsigned long flags = 0;
+ unsigned int count = 0, size = 0;
+ int err, i;
+
+ q->ops->buf_setup(q, &count, &size);
+ if (count < 2)
+ count = 2;
+ if (count > VIDEO_MAX_FRAME)
+ count = VIDEO_MAX_FRAME;
+ size = PAGE_ALIGN(size);
+
+ err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
+ if (err < 0)
+ return err;
+
+ count = err;
+
+ for (i = 0; i < count; i++) {
+ field = videobuf_next_field(q);
+ err = q->ops->buf_prepare(q, q->bufs[i], field);
+ if (err)
+ return err;
+ list_add_tail(&q->bufs[i]->stream, &q->stream);
+ }
+ spin_lock_irqsave(q->irqlock, flags);
+ for (i = 0; i < count; i++)
+ q->ops->buf_queue(q, q->bufs[i]);
+ spin_unlock_irqrestore(q->irqlock, flags);
+ q->reading = 1;
+ return 0;
+}
+
+static void __videobuf_read_stop(struct videobuf_queue *q)
+{
+ int i;
+
+ videobuf_queue_cancel(q);
+ __videobuf_free(q);
+ INIT_LIST_HEAD(&q->stream);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ kfree(q->bufs[i]);
+ q->bufs[i] = NULL;
+ }
+ q->read_buf = NULL;
+}
+
+int videobuf_read_start(struct videobuf_queue *q)
+{
+ int rc;
+
+ videobuf_queue_lock(q);
+ rc = __videobuf_read_start(q);
+ videobuf_queue_unlock(q);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(videobuf_read_start);
+
+void videobuf_read_stop(struct videobuf_queue *q)
+{
+ videobuf_queue_lock(q);
+ __videobuf_read_stop(q);
+ videobuf_queue_unlock(q);
+}
+EXPORT_SYMBOL_GPL(videobuf_read_stop);
+
+void videobuf_stop(struct videobuf_queue *q)
+{
+ videobuf_queue_lock(q);
+
+ if (q->streaming)
+ __videobuf_streamoff(q);
+
+ if (q->reading)
+ __videobuf_read_stop(q);
+
+ videobuf_queue_unlock(q);
+}
+EXPORT_SYMBOL_GPL(videobuf_stop);
+
+ssize_t videobuf_read_stream(struct videobuf_queue *q,
+ char __user *data, size_t count, loff_t *ppos,
+ int vbihack, int nonblocking)
+{
+ int rc, retval;
+ unsigned long flags = 0;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ dprintk(2, "%s\n", __func__);
+ videobuf_queue_lock(q);
+ retval = -EBUSY;
+ if (q->streaming)
+ goto done;
+ if (!q->reading) {
+ retval = __videobuf_read_start(q);
+ if (retval < 0)
+ goto done;
+ }
+
+ retval = 0;
+ while (count > 0) {
+ /* get / wait for data */
+ if (NULL == q->read_buf) {
+ q->read_buf = list_entry(q->stream.next,
+ struct videobuf_buffer,
+ stream);
+ list_del(&q->read_buf->stream);
+ q->read_off = 0;
+ }
+ rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
+ if (rc < 0) {
+ if (0 == retval)
+ retval = rc;
+ break;
+ }
+
+ if (q->read_buf->state == VIDEOBUF_DONE) {
+ rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
+ retval, vbihack, nonblocking);
+ if (rc < 0) {
+ retval = rc;
+ break;
+ }
+ retval += rc;
+ count -= rc;
+ q->read_off += rc;
+ } else {
+ /* some error */
+ q->read_off = q->read_buf->size;
+ if (0 == retval)
+ retval = -EIO;
+ }
+
+ /* requeue buffer when done with copying */
+ if (q->read_off == q->read_buf->size) {
+ list_add_tail(&q->read_buf->stream,
+ &q->stream);
+ spin_lock_irqsave(q->irqlock, flags);
+ q->ops->buf_queue(q, q->read_buf);
+ spin_unlock_irqrestore(q->irqlock, flags);
+ q->read_buf = NULL;
+ }
+ if (retval < 0)
+ break;
+ }
+
+done:
+ videobuf_queue_unlock(q);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(videobuf_read_stream);
+
+__poll_t videobuf_poll_stream(struct file *file,
+ struct videobuf_queue *q,
+ poll_table *wait)
+{
+ __poll_t req_events = poll_requested_events(wait);
+ struct videobuf_buffer *buf = NULL;
+ __poll_t rc = 0;
+
+ videobuf_queue_lock(q);
+ if (q->streaming) {
+ if (!list_empty(&q->stream))
+ buf = list_entry(q->stream.next,
+ struct videobuf_buffer, stream);
+ } else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
+ if (!q->reading)
+ __videobuf_read_start(q);
+ if (!q->reading) {
+ rc = EPOLLERR;
+ } else if (NULL == q->read_buf) {
+ q->read_buf = list_entry(q->stream.next,
+ struct videobuf_buffer,
+ stream);
+ list_del(&q->read_buf->stream);
+ q->read_off = 0;
+ }
+ buf = q->read_buf;
+ }
+ if (buf)
+ poll_wait(file, &buf->done, wait);
+ else
+ rc = EPOLLERR;
+
+ if (0 == rc) {
+ if (buf->state == VIDEOBUF_DONE ||
+ buf->state == VIDEOBUF_ERROR) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ rc = EPOLLOUT | EPOLLWRNORM;
+ break;
+ default:
+ rc = EPOLLIN | EPOLLRDNORM;
+ break;
+ }
+ }
+ }
+ videobuf_queue_unlock(q);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(videobuf_poll_stream);
+
+int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
+{
+ int rc = -EINVAL;
+ int i;
+
+ MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
+
+ if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
+ dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
+ return -EINVAL;
+ }
+
+ videobuf_queue_lock(q);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ struct videobuf_buffer *buf = q->bufs[i];
+
+ if (buf && buf->memory == V4L2_MEMORY_MMAP &&
+ buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
+ rc = CALL(q, mmap_mapper, q, buf, vma);
+ break;
+ }
+ }
+ videobuf_queue_unlock(q);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
new file mode 100644
index 0000000000..4c2ec7a0d8
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * helper functions for physically contiguous capture buffers
+ *
+ * The functions support hardware lacking scatter gather support
+ * (i.e. the buffers must be linear in physical memory)
+ *
+ * Copyright (c) 2008 Magnus Damm
+ *
+ * Based on videobuf-vmalloc.c,
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <media/videobuf-dma-contig.h>
+
+struct videobuf_dma_contig_memory {
+ u32 magic;
+ void *vaddr;
+ dma_addr_t dma_handle;
+ unsigned long size;
+};
+
+#define MAGIC_DC_MEM 0x0733ac61
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
+ BUG(); \
+ }
+
+static int __videobuf_dc_alloc(struct device *dev,
+ struct videobuf_dma_contig_memory *mem,
+ unsigned long size)
+{
+ mem->size = size;
+ mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
+ GFP_KERNEL);
+ if (!mem->vaddr) {
+ dev_err(dev, "memory alloc size %ld failed\n", mem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
+
+ return 0;
+}
+
+static void __videobuf_dc_free(struct device *dev,
+ struct videobuf_dma_contig_memory *mem)
+{
+ dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
+
+ mem->vaddr = NULL;
+}
+
+static void videobuf_vm_open(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+
+ dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+ map, map->count, vma->vm_start, vma->vm_end);
+
+ map->count++;
+}
+
+static void videobuf_vm_close(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+ struct videobuf_queue *q = map->q;
+ int i;
+
+ dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
+ map, map->count, vma->vm_start, vma->vm_end);
+
+ map->count--;
+ if (0 == map->count) {
+ struct videobuf_dma_contig_memory *mem;
+
+ dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
+
+ /* We need first to cancel streams, before unmapping */
+ if (q->streaming)
+ videobuf_queue_cancel(q);
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+
+ if (q->bufs[i]->map != map)
+ continue;
+
+ mem = q->bufs[i]->priv;
+ if (mem) {
+ /* This callback is called only if kernel has
+ allocated memory and this memory is mmapped.
+ In this case, memory should be freed,
+ in order to do memory unmap.
+ */
+
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ /* vfree is not atomic - can't be
+ called with IRQ's disabled
+ */
+ dev_dbg(q->dev, "buf[%d] freeing %p\n",
+ i, mem->vaddr);
+
+ __videobuf_dc_free(q->dev, mem);
+ mem->vaddr = NULL;
+ }
+
+ q->bufs[i]->map = NULL;
+ q->bufs[i]->baddr = 0;
+ }
+
+ kfree(map);
+
+ videobuf_queue_unlock(q);
+ }
+}
+
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+};
+
+/**
+ * videobuf_dma_contig_user_put() - reset pointer to user space buffer
+ * @mem: per-buffer private videobuf-dma-contig data
+ *
+ * This function resets the user space pointer
+ */
+static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
+{
+ mem->dma_handle = 0;
+ mem->size = 0;
+}
+
+/**
+ * videobuf_dma_contig_user_get() - setup user space memory pointer
+ * @mem: per-buffer private videobuf-dma-contig data
+ * @vb: video buffer to map
+ *
+ * This function validates and sets up a pointer to user space memory.
+ * Only physically contiguous pfn-mapped memory is accepted.
+ *
+ * Returns 0 if successful.
+ */
+static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
+ struct videobuf_buffer *vb)
+{
+ unsigned long untagged_baddr = untagged_addr(vb->baddr);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long prev_pfn, this_pfn;
+ unsigned long pages_done, user_address;
+ unsigned int offset;
+ int ret;
+
+ offset = untagged_baddr & ~PAGE_MASK;
+ mem->size = PAGE_ALIGN(vb->size + offset);
+ ret = -EINVAL;
+
+ mmap_read_lock(mm);
+
+ vma = find_vma(mm, untagged_baddr);
+ if (!vma)
+ goto out_up;
+
+ if ((untagged_baddr + mem->size) > vma->vm_end)
+ goto out_up;
+
+ pages_done = 0;
+ prev_pfn = 0; /* kill warning */
+ user_address = untagged_baddr;
+
+ while (pages_done < (mem->size >> PAGE_SHIFT)) {
+ ret = follow_pfn(vma, user_address, &this_pfn);
+ if (ret)
+ break;
+
+ if (pages_done == 0)
+ mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
+ else if (this_pfn != (prev_pfn + 1))
+ ret = -EFAULT;
+
+ if (ret)
+ break;
+
+ prev_pfn = this_pfn;
+ user_address += PAGE_SIZE;
+ pages_done++;
+ }
+
+out_up:
+ mmap_read_unlock(current->mm);
+
+ return ret;
+}
+
+static struct videobuf_buffer *__videobuf_alloc(size_t size)
+{
+ struct videobuf_dma_contig_memory *mem;
+ struct videobuf_buffer *vb;
+
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
+ if (vb) {
+ vb->priv = ((char *)vb) + size;
+ mem = vb->priv;
+ mem->magic = MAGIC_DC_MEM;
+ }
+
+ return vb;
+}
+
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_contig_memory *mem = buf->priv;
+
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ return mem->vaddr;
+}
+
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
+{
+ struct videobuf_dma_contig_memory *mem = vb->priv;
+
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
+
+ /* All handling should be done by __videobuf_mmap_mapper() */
+ if (!mem->vaddr) {
+ dev_err(q->dev, "memory is not allocated/mmapped.\n");
+ return -EINVAL;
+ }
+ break;
+ case V4L2_MEMORY_USERPTR:
+ dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
+
+ /* handle pointer from user space */
+ if (vb->baddr)
+ return videobuf_dma_contig_user_get(mem, vb);
+
+ /* allocate memory for the read() method */
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size)))
+ return -ENOMEM;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ default:
+ dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
+{
+ struct videobuf_dma_contig_memory *mem;
+ struct videobuf_mapping *map;
+ int retval;
+
+ dev_dbg(q->dev, "%s\n", __func__);
+
+ /* create mapping + update buffer list */
+ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ buf->map = map;
+ map->q = q;
+
+ buf->baddr = vma->vm_start;
+
+ mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize)))
+ goto error;
+
+ /* the "vm_pgoff" is just used in v4l2 to find the
+ * corresponding buffer data structure which is allocated
+ * earlier and it does not mean the offset from the physical
+ * buffer start address as usual. So set it to 0 to pass
+ * the sanity check in dma_mmap_coherent().
+ */
+ vma->vm_pgoff = 0;
+ retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
+ mem->size);
+ if (retval) {
+ dev_err(q->dev, "mmap: remap failed with error %d. ",
+ retval);
+ dma_free_coherent(q->dev, mem->size,
+ mem->vaddr, mem->dma_handle);
+ goto error;
+ }
+
+ vma->vm_ops = &videobuf_vm_ops;
+ vm_flags_set(vma, VM_DONTEXPAND);
+ vma->vm_private_data = map;
+
+ dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
+ map, q, vma->vm_start, vma->vm_end,
+ (long int)buf->bsize, vma->vm_pgoff, buf->i);
+
+ videobuf_vm_open(vma);
+
+ return 0;
+
+error:
+ kfree(map);
+ return -ENOMEM;
+}
+
+static struct videobuf_qtype_ops qops = {
+ .magic = MAGIC_QTYPE_OPS,
+ .alloc_vb = __videobuf_alloc,
+ .iolock = __videobuf_iolock,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = __videobuf_to_vaddr,
+};
+
+void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct mutex *ext_lock)
+{
+ videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+ priv, &qops, ext_lock);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
+
+dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_contig_memory *mem = buf->priv;
+
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ return mem->dma_handle;
+}
+EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
+
+void videobuf_dma_contig_free(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_contig_memory *mem = buf->priv;
+
+ /* mmapped memory can't be freed here, otherwise mmapped region
+ would be released, while still needed. In this case, the memory
+ release should happen inside videobuf_vm_close().
+ So, it should free memory only if the memory were allocated for
+ read() operation.
+ */
+ if (buf->memory != V4L2_MEMORY_USERPTR)
+ return;
+
+ if (!mem)
+ return;
+
+ MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
+
+ /* handle user space pointer case */
+ if (buf->baddr) {
+ videobuf_dma_contig_user_put(mem);
+ return;
+ }
+
+ /* read() method */
+ if (mem->vaddr) {
+ __videobuf_dc_free(q->dev, mem);
+ mem->vaddr = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
+
+MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
new file mode 100644
index 0000000000..405b89ea10
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * helper functions for SG DMA video4linux capture buffers
+ *
+ * The functions expect the hardware being able to scatter gather
+ * (i.e. the buffers are not linear in physical memory, but fragmented
+ * into PAGE_SIZE chunks). They also assume the driver does not need
+ * to touch the video data.
+ *
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
+ *
+ * Highly based on video-buf written originally by:
+ * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
+ * (c) 2006 Ted Walther and John Sokol
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pgtable.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <asm/page.h>
+
+#include <media/videobuf-dma-sg.h>
+
+#define MAGIC_DMABUF 0x19721112
+#define MAGIC_SG_MEM 0x17890714
+
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
+
+static int debug;
+module_param(debug, int, 0644);
+
+MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
+MODULE_LICENSE("GPL");
+
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Return a scatterlist for some page-aligned vmalloc()'ed memory
+ * block (NULL on errors). Memory for the scatterlist is allocated
+ * using kmalloc. The caller must free the memory.
+ */
+static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt,
+ int nr_pages)
+{
+ struct scatterlist *sglist;
+ struct page *pg;
+ int i;
+
+ sglist = vzalloc(array_size(nr_pages, sizeof(*sglist)));
+ if (NULL == sglist)
+ return NULL;
+ sg_init_table(sglist, nr_pages);
+ for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
+ pg = vmalloc_to_page(virt);
+ if (NULL == pg)
+ goto err;
+ BUG_ON(PageHighMem(pg));
+ sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
+ }
+ return sglist;
+
+err:
+ vfree(sglist);
+ return NULL;
+}
+
+/*
+ * Return a scatterlist for a an array of userpages (NULL on errors).
+ * Memory for the scatterlist is allocated using kmalloc. The caller
+ * must free the memory.
+ */
+static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
+ int nr_pages, int offset, size_t size)
+{
+ struct scatterlist *sglist;
+ int i;
+
+ if (NULL == pages[0])
+ return NULL;
+ sglist = vmalloc(array_size(nr_pages, sizeof(*sglist)));
+ if (NULL == sglist)
+ return NULL;
+ sg_init_table(sglist, nr_pages);
+
+ if (PageHighMem(pages[0]))
+ /* DMA to highmem pages might not work */
+ goto highmem;
+ sg_set_page(&sglist[0], pages[0],
+ min_t(size_t, PAGE_SIZE - offset, size), offset);
+ size -= min_t(size_t, PAGE_SIZE - offset, size);
+ for (i = 1; i < nr_pages; i++) {
+ if (NULL == pages[i])
+ goto nopage;
+ if (PageHighMem(pages[i]))
+ goto highmem;
+ sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
+ size -= min_t(size_t, PAGE_SIZE, size);
+ }
+ return sglist;
+
+nopage:
+ dprintk(2, "sgl: oops - no page\n");
+ vfree(sglist);
+ return NULL;
+
+highmem:
+ dprintk(2, "sgl: oops - highmem page\n");
+ vfree(sglist);
+ return NULL;
+}
+
+/* --------------------------------------------------------------------- */
+
+struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_sg_memory *mem = buf->priv;
+ BUG_ON(!mem);
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ return &mem->dma;
+}
+EXPORT_SYMBOL_GPL(videobuf_to_dma);
+
+static void videobuf_dma_init(struct videobuf_dmabuf *dma)
+{
+ memset(dma, 0, sizeof(*dma));
+ dma->magic = MAGIC_DMABUF;
+}
+
+static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
+ int direction, unsigned long data, unsigned long size)
+{
+ unsigned int gup_flags = FOLL_LONGTERM;
+ unsigned long first, last;
+ int err;
+
+ dma->direction = direction;
+ switch (dma->direction) {
+ case DMA_FROM_DEVICE:
+ gup_flags |= FOLL_WRITE;
+ break;
+ case DMA_TO_DEVICE:
+ break;
+ default:
+ BUG();
+ }
+
+ first = (data & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
+ dma->offset = data & ~PAGE_MASK;
+ dma->size = size;
+ dma->nr_pages = last-first+1;
+ dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (NULL == dma->pages)
+ return -ENOMEM;
+
+ dprintk(1, "init user [0x%lx+0x%lx => %lu pages]\n",
+ data, size, dma->nr_pages);
+
+ err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags,
+ dma->pages);
+
+ if (err != dma->nr_pages) {
+ dma->nr_pages = (err >= 0) ? err : 0;
+ dprintk(1, "pin_user_pages: err=%d [%lu]\n", err,
+ dma->nr_pages);
+ return err < 0 ? err : -EINVAL;
+ }
+ return 0;
+}
+
+static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
+ unsigned long data, unsigned long size)
+{
+ int ret;
+
+ mmap_read_lock(current->mm);
+ ret = videobuf_dma_init_user_locked(dma, direction, data, size);
+ mmap_read_unlock(current->mm);
+
+ return ret;
+}
+
+static int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
+ unsigned long nr_pages)
+{
+ int i;
+
+ dprintk(1, "init kernel [%lu pages]\n", nr_pages);
+
+ dma->direction = direction;
+ dma->vaddr_pages = kcalloc(nr_pages, sizeof(*dma->vaddr_pages),
+ GFP_KERNEL);
+ if (!dma->vaddr_pages)
+ return -ENOMEM;
+
+ dma->dma_addr = kcalloc(nr_pages, sizeof(*dma->dma_addr), GFP_KERNEL);
+ if (!dma->dma_addr) {
+ kfree(dma->vaddr_pages);
+ return -ENOMEM;
+ }
+ for (i = 0; i < nr_pages; i++) {
+ void *addr;
+
+ addr = dma_alloc_coherent(dma->dev, PAGE_SIZE,
+ &(dma->dma_addr[i]), GFP_KERNEL);
+ if (addr == NULL)
+ goto out_free_pages;
+
+ dma->vaddr_pages[i] = virt_to_page(addr);
+ }
+ dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP,
+ PAGE_KERNEL);
+ if (NULL == dma->vaddr) {
+ dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages);
+ goto out_free_pages;
+ }
+
+ dprintk(1, "vmalloc is at addr %p, size=%lu\n",
+ dma->vaddr, nr_pages << PAGE_SHIFT);
+
+ memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
+ dma->nr_pages = nr_pages;
+
+ return 0;
+out_free_pages:
+ while (i > 0) {
+ void *addr;
+
+ i--;
+ addr = page_address(dma->vaddr_pages[i]);
+ dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
+ }
+ kfree(dma->dma_addr);
+ dma->dma_addr = NULL;
+ kfree(dma->vaddr_pages);
+ dma->vaddr_pages = NULL;
+
+ return -ENOMEM;
+
+}
+
+static int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
+ dma_addr_t addr, unsigned long nr_pages)
+{
+ dprintk(1, "init overlay [%lu pages @ bus 0x%lx]\n",
+ nr_pages, (unsigned long)addr);
+ dma->direction = direction;
+
+ if (0 == addr)
+ return -EINVAL;
+
+ dma->bus_addr = addr;
+ dma->nr_pages = nr_pages;
+
+ return 0;
+}
+
+static int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
+{
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
+ BUG_ON(0 == dma->nr_pages);
+
+ if (dma->pages) {
+ dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
+ dma->offset, dma->size);
+ }
+ if (dma->vaddr) {
+ dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
+ dma->nr_pages);
+ }
+ if (dma->bus_addr) {
+ dma->sglist = vmalloc(sizeof(*dma->sglist));
+ if (NULL != dma->sglist) {
+ dma->sglen = 1;
+ sg_dma_address(&dma->sglist[0]) = dma->bus_addr
+ & PAGE_MASK;
+ dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
+ sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
+ }
+ }
+ if (NULL == dma->sglist) {
+ dprintk(1, "scatterlist is NULL\n");
+ return -ENOMEM;
+ }
+ if (!dma->bus_addr) {
+ dma->sglen = dma_map_sg(dev, dma->sglist,
+ dma->nr_pages, dma->direction);
+ if (0 == dma->sglen) {
+ printk(KERN_WARNING
+ "%s: videobuf_map_sg failed\n", __func__);
+ vfree(dma->sglist);
+ dma->sglist = NULL;
+ dma->sglen = 0;
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
+{
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
+
+ if (!dma->sglen)
+ return 0;
+
+ dma_unmap_sg(dev, dma->sglist, dma->nr_pages, dma->direction);
+
+ vfree(dma->sglist);
+ dma->sglist = NULL;
+ dma->sglen = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
+
+int videobuf_dma_free(struct videobuf_dmabuf *dma)
+{
+ int i;
+ MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
+ BUG_ON(dma->sglen);
+
+ if (dma->pages) {
+ unpin_user_pages_dirty_lock(dma->pages, dma->nr_pages,
+ dma->direction == DMA_FROM_DEVICE);
+ kfree(dma->pages);
+ dma->pages = NULL;
+ }
+
+ if (dma->dma_addr) {
+ for (i = 0; i < dma->nr_pages; i++) {
+ void *addr;
+
+ addr = page_address(dma->vaddr_pages[i]);
+ dma_free_coherent(dma->dev, PAGE_SIZE, addr,
+ dma->dma_addr[i]);
+ }
+ kfree(dma->dma_addr);
+ dma->dma_addr = NULL;
+ kfree(dma->vaddr_pages);
+ dma->vaddr_pages = NULL;
+ vunmap(dma->vaddr);
+ dma->vaddr = NULL;
+ }
+
+ if (dma->bus_addr)
+ dma->bus_addr = 0;
+ dma->direction = DMA_NONE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(videobuf_dma_free);
+
+/* --------------------------------------------------------------------- */
+
+static void videobuf_vm_open(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+
+ dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+ map->count++;
+}
+
+static void videobuf_vm_close(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+ struct videobuf_queue *q = map->q;
+ struct videobuf_dma_sg_memory *mem;
+ int i;
+
+ dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+ map->count--;
+ if (0 == map->count) {
+ dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ mem = q->bufs[i]->priv;
+ if (!mem)
+ continue;
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ if (q->bufs[i]->map != map)
+ continue;
+ q->bufs[i]->map = NULL;
+ q->bufs[i]->baddr = 0;
+ q->ops->buf_release(q, q->bufs[i]);
+ }
+ videobuf_queue_unlock(q);
+ kfree(map);
+ }
+}
+
+/*
+ * Get a anonymous page for the mapping. Make sure we can DMA to that
+ * memory location with 32bit PCI devices (i.e. don't use highmem for
+ * now ...). Bounce buffers don't work very well for the data rates
+ * video capture has.
+ */
+static vm_fault_t videobuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct page *page;
+
+ dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
+ vmf->address, vma->vm_start, vma->vm_end);
+
+ page = alloc_page(GFP_USER | __GFP_DMA32);
+ if (!page)
+ return VM_FAULT_OOM;
+ clear_user_highpage(page, vmf->address);
+ vmf->page = page;
+
+ return 0;
+}
+
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+ .fault = videobuf_vm_fault,
+};
+
+/* ---------------------------------------------------------------------
+ * SG handlers for the generic methods
+ */
+
+/* Allocated area consists on 3 parts:
+ struct video_buffer
+ struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
+ struct videobuf_dma_sg_memory
+ */
+
+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
+{
+ struct videobuf_dma_sg_memory *mem;
+ struct videobuf_buffer *vb;
+
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
+ if (!vb)
+ return vb;
+
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_SG_MEM;
+
+ videobuf_dma_init(&mem->dma);
+
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
+
+ return vb;
+}
+
+static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_sg_memory *mem = buf->priv;
+ BUG_ON(!mem);
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ return mem->dma.vaddr;
+}
+
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
+{
+ struct videobuf_dma_sg_memory *mem = vb->priv;
+ unsigned long pages;
+ dma_addr_t bus;
+ int err;
+
+ BUG_ON(!mem);
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ if (!mem->dma.dev)
+ mem->dma.dev = q->dev;
+ else
+ WARN_ON(mem->dma.dev != q->dev);
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ case V4L2_MEMORY_USERPTR:
+ if (0 == vb->baddr) {
+ /* no userspace addr -- kernel bounce buffer */
+ pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
+ err = videobuf_dma_init_kernel(&mem->dma,
+ DMA_FROM_DEVICE,
+ pages);
+ if (0 != err)
+ return err;
+ } else if (vb->memory == V4L2_MEMORY_USERPTR) {
+ /* dma directly to userspace */
+ err = videobuf_dma_init_user(&mem->dma,
+ DMA_FROM_DEVICE,
+ vb->baddr, vb->bsize);
+ if (0 != err)
+ return err;
+ } else {
+ /* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
+ buffers can only be called from videobuf_qbuf
+ we take current->mm->mmap_lock there, to prevent
+ locking inversion, so don't take it here */
+
+ err = videobuf_dma_init_user_locked(&mem->dma,
+ DMA_FROM_DEVICE,
+ vb->baddr, vb->bsize);
+ if (0 != err)
+ return err;
+ }
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ if (NULL == fbuf)
+ return -EINVAL;
+ /* FIXME: need sanity checks for vb->boff */
+ /*
+ * Using a double cast to avoid compiler warnings when
+ * building for PAE. Compiler doesn't like direct casting
+ * of a 32 bit ptr to 64 bit integer.
+ */
+ bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
+ pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
+ err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
+ bus, pages);
+ if (0 != err)
+ return err;
+ break;
+ default:
+ BUG();
+ }
+ err = videobuf_dma_map(q->dev, &mem->dma);
+ if (0 != err)
+ return err;
+
+ return 0;
+}
+
+static int __videobuf_sync(struct videobuf_queue *q,
+ struct videobuf_buffer *buf)
+{
+ struct videobuf_dma_sg_memory *mem = buf->priv;
+ BUG_ON(!mem || !mem->dma.sglen);
+
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+ MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
+
+ dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
+ mem->dma.nr_pages, mem->dma.direction);
+
+ return 0;
+}
+
+static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
+{
+ struct videobuf_dma_sg_memory *mem = buf->priv;
+ struct videobuf_mapping *map;
+ unsigned int first, last, size = 0, i;
+ int retval;
+
+ retval = -EINVAL;
+
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
+
+ /* look for first buffer to map */
+ for (first = 0; first < VIDEO_MAX_FRAME; first++) {
+ if (buf == q->bufs[first]) {
+ size = PAGE_ALIGN(q->bufs[first]->bsize);
+ break;
+ }
+ }
+
+ /* paranoia, should never happen since buf is always valid. */
+ if (!size) {
+ dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
+ (vma->vm_pgoff << PAGE_SHIFT));
+ goto done;
+ }
+
+ last = first;
+
+ /* create mapping + update buffer list */
+ retval = -ENOMEM;
+ map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+ if (NULL == map)
+ goto done;
+
+ size = 0;
+ for (i = first; i <= last; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+ q->bufs[i]->map = map;
+ q->bufs[i]->baddr = vma->vm_start + size;
+ size += PAGE_ALIGN(q->bufs[i]->bsize);
+ }
+
+ map->count = 1;
+ map->q = q;
+ vma->vm_ops = &videobuf_vm_ops;
+ /* using shared anonymous pages */
+ vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
+ vma->vm_private_data = map;
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
+ map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
+ retval = 0;
+
+done:
+ return retval;
+}
+
+static struct videobuf_qtype_ops sg_ops = {
+ .magic = MAGIC_QTYPE_OPS,
+
+ .alloc_vb = __videobuf_alloc_vb,
+ .iolock = __videobuf_iolock,
+ .sync = __videobuf_sync,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = __videobuf_to_vaddr,
+};
+
+void *videobuf_sg_alloc(size_t size)
+{
+ struct videobuf_queue q;
+
+ /* Required to make generic handler to call __videobuf_alloc */
+ q.int_ops = &sg_ops;
+
+ q.msize = size;
+
+ return videobuf_alloc_vb(&q);
+}
+EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
+
+void videobuf_queue_sg_init(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct mutex *ext_lock)
+{
+ videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+ priv, &sg_ops, ext_lock);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
+
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
new file mode 100644
index 0000000000..85c7090606
--- /dev/null
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * helper functions for vmalloc video4linux capture buffers
+ *
+ * The functions expect the hardware being able to scatter gather
+ * (i.e. the buffers are not linear in physical memory, but fragmented
+ * into PAGE_SIZE chunks). They also assume the driver does not need
+ * to touch the video data.
+ *
+ * (c) 2007 Mauro Carvalho Chehab <mchehab@kernel.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pgtable.h>
+
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <asm/page.h>
+
+#include <media/videobuf-vmalloc.h>
+
+#define MAGIC_DMABUF 0x17760309
+#define MAGIC_VMAL_MEM 0x18221223
+
+#define MAGIC_CHECK(is, should) \
+ if (unlikely((is) != (should))) { \
+ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
+ is, should); \
+ BUG(); \
+ }
+
+static int debug;
+module_param(debug, int, 0644);
+
+MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
+MODULE_LICENSE("GPL");
+
+#define dprintk(level, fmt, arg...) \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
+
+
+/***************************************************************************/
+
+static void videobuf_vm_open(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+
+ dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+ map->count++;
+}
+
+static void videobuf_vm_close(struct vm_area_struct *vma)
+{
+ struct videobuf_mapping *map = vma->vm_private_data;
+ struct videobuf_queue *q = map->q;
+ int i;
+
+ dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
+ map->count, vma->vm_start, vma->vm_end);
+
+ map->count--;
+ if (0 == map->count) {
+ struct videobuf_vmalloc_memory *mem;
+
+ dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
+
+ /* We need first to cancel streams, before unmapping */
+ if (q->streaming)
+ videobuf_queue_cancel(q);
+
+ for (i = 0; i < VIDEO_MAX_FRAME; i++) {
+ if (NULL == q->bufs[i])
+ continue;
+
+ if (q->bufs[i]->map != map)
+ continue;
+
+ mem = q->bufs[i]->priv;
+ if (mem) {
+ /* This callback is called only if kernel has
+ allocated memory and this memory is mmapped.
+ In this case, memory should be freed,
+ in order to do memory unmap.
+ */
+
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ /* vfree is not atomic - can't be
+ called with IRQ's disabled
+ */
+ dprintk(1, "%s: buf[%d] freeing (%p)\n",
+ __func__, i, mem->vaddr);
+
+ vfree(mem->vaddr);
+ mem->vaddr = NULL;
+ }
+
+ q->bufs[i]->map = NULL;
+ q->bufs[i]->baddr = 0;
+ }
+
+ kfree(map);
+
+ videobuf_queue_unlock(q);
+ }
+
+ return;
+}
+
+static const struct vm_operations_struct videobuf_vm_ops = {
+ .open = videobuf_vm_open,
+ .close = videobuf_vm_close,
+};
+
+/* ---------------------------------------------------------------------
+ * vmalloc handlers for the generic methods
+ */
+
+/* Allocated area consists on 3 parts:
+ struct video_buffer
+ struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
+ struct videobuf_dma_sg_memory
+ */
+
+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
+{
+ struct videobuf_vmalloc_memory *mem;
+ struct videobuf_buffer *vb;
+
+ vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
+ if (!vb)
+ return vb;
+
+ mem = vb->priv = ((char *)vb) + size;
+ mem->magic = MAGIC_VMAL_MEM;
+
+ dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
+ __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
+ mem, (long)sizeof(*mem));
+
+ return vb;
+}
+
+static int __videobuf_iolock(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf)
+{
+ struct videobuf_vmalloc_memory *mem = vb->priv;
+ int pages;
+
+ BUG_ON(!mem);
+
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ switch (vb->memory) {
+ case V4L2_MEMORY_MMAP:
+ dprintk(1, "%s memory method MMAP\n", __func__);
+
+ /* All handling should be done by __videobuf_mmap_mapper() */
+ if (!mem->vaddr) {
+ printk(KERN_ERR "memory is not allocated/mmapped.\n");
+ return -EINVAL;
+ }
+ break;
+ case V4L2_MEMORY_USERPTR:
+ pages = PAGE_ALIGN(vb->size);
+
+ dprintk(1, "%s memory method USERPTR\n", __func__);
+
+ if (vb->baddr) {
+ printk(KERN_ERR "USERPTR is currently not supported\n");
+ return -EINVAL;
+ }
+
+ /* The only USERPTR currently supported is the one needed for
+ * read() method.
+ */
+
+ mem->vaddr = vmalloc_user(pages);
+ if (!mem->vaddr) {
+ printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
+ return -ENOMEM;
+ }
+ dprintk(1, "vmalloc is at addr %p (%d pages)\n",
+ mem->vaddr, pages);
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ default:
+ dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
+
+ /* Currently, doesn't support V4L2_MEMORY_OVERLAY */
+ printk(KERN_ERR "Memory method currently unsupported.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct videobuf_buffer *buf,
+ struct vm_area_struct *vma)
+{
+ struct videobuf_vmalloc_memory *mem;
+ struct videobuf_mapping *map;
+ int retval, pages;
+
+ dprintk(1, "%s\n", __func__);
+
+ /* create mapping + update buffer list */
+ map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
+ if (NULL == map)
+ return -ENOMEM;
+
+ buf->map = map;
+ map->q = q;
+
+ buf->baddr = vma->vm_start;
+
+ mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
+ mem->vaddr = vmalloc_user(pages);
+ if (!mem->vaddr) {
+ printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
+ goto error;
+ }
+ dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
+
+ /* Try to remap memory */
+ retval = remap_vmalloc_range(vma, mem->vaddr, 0);
+ if (retval < 0) {
+ printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
+ vfree(mem->vaddr);
+ goto error;
+ }
+
+ vma->vm_ops = &videobuf_vm_ops;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_private_data = map;
+
+ dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
+ map, q, vma->vm_start, vma->vm_end,
+ (long int)buf->bsize,
+ vma->vm_pgoff, buf->i);
+
+ videobuf_vm_open(vma);
+
+ return 0;
+
+error:
+ mem = NULL;
+ kfree(map);
+ return -ENOMEM;
+}
+
+static struct videobuf_qtype_ops qops = {
+ .magic = MAGIC_QTYPE_OPS,
+
+ .alloc_vb = __videobuf_alloc_vb,
+ .iolock = __videobuf_iolock,
+ .mmap_mapper = __videobuf_mmap_mapper,
+ .vaddr = videobuf_to_vmalloc,
+};
+
+void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
+ const struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv,
+ struct mutex *ext_lock)
+{
+ videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
+ priv, &qops, ext_lock);
+}
+EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
+
+void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
+{
+ struct videobuf_vmalloc_memory *mem = buf->priv;
+ BUG_ON(!mem);
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ return mem->vaddr;
+}
+EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
+
+void videobuf_vmalloc_free(struct videobuf_buffer *buf)
+{
+ struct videobuf_vmalloc_memory *mem = buf->priv;
+
+ /* mmapped memory can't be freed here, otherwise mmapped region
+ would be released, while still needed. In this case, the memory
+ release should happen inside videobuf_vm_close().
+ So, it should free memory only if the memory were allocated for
+ read() operation.
+ */
+ if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
+ return;
+
+ if (!mem)
+ return;
+
+ MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
+
+ vfree(mem->vaddr);
+ mem->vaddr = NULL;
+
+ return;
+}
+EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
+