From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- drivers/media/common/Kconfig | 31 + drivers/media/common/Makefile | 10 + drivers/media/common/b2c2/Kconfig | 25 + drivers/media/common/b2c2/Makefile | 8 + drivers/media/common/b2c2/flexcop-common.h | 186 ++ drivers/media/common/b2c2/flexcop-eeprom.c | 147 + drivers/media/common/b2c2/flexcop-fe-tuner.c | 721 +++++ drivers/media/common/b2c2/flexcop-hw-filter.c | 245 ++ drivers/media/common/b2c2/flexcop-i2c.c | 280 ++ drivers/media/common/b2c2/flexcop-misc.c | 85 + drivers/media/common/b2c2/flexcop-reg.h | 168 ++ drivers/media/common/b2c2/flexcop-sram.c | 364 +++ drivers/media/common/b2c2/flexcop.c | 311 ++ drivers/media/common/b2c2/flexcop.h | 31 + drivers/media/common/b2c2/flexcop_ibi_value_be.h | 456 +++ drivers/media/common/b2c2/flexcop_ibi_value_le.h | 456 +++ drivers/media/common/cx2341x.c | 1778 +++++++++++ drivers/media/common/cypress_firmware.c | 131 + drivers/media/common/cypress_firmware.h | 29 + drivers/media/common/saa7146/Kconfig | 10 + drivers/media/common/saa7146/Makefile | 6 + drivers/media/common/saa7146/saa7146_core.c | 566 ++++ drivers/media/common/saa7146/saa7146_fops.c | 435 +++ drivers/media/common/saa7146/saa7146_hlp.c | 771 +++++ drivers/media/common/saa7146/saa7146_i2c.c | 421 +++ drivers/media/common/saa7146/saa7146_vbi.c | 447 +++ drivers/media/common/saa7146/saa7146_video.c | 725 +++++ drivers/media/common/siano/Kconfig | 33 + drivers/media/common/siano/Makefile | 13 + drivers/media/common/siano/sms-cards.c | 348 +++ drivers/media/common/siano/sms-cards.h | 127 + drivers/media/common/siano/smscoreapi.c | 2183 ++++++++++++++ drivers/media/common/siano/smscoreapi.h | 1168 ++++++++ drivers/media/common/siano/smsdvb-debugfs.c | 437 +++ drivers/media/common/siano/smsdvb-main.c | 1271 ++++++++ drivers/media/common/siano/smsdvb.h | 115 + drivers/media/common/siano/smsendian.c | 92 + drivers/media/common/siano/smsendian.h | 21 + drivers/media/common/siano/smsir.c | 97 + drivers/media/common/siano/smsir.h | 48 + drivers/media/common/ttpci-eeprom.c | 159 + drivers/media/common/ttpci-eeprom.h | 22 + drivers/media/common/tveeprom.c | 759 +++++ drivers/media/common/uvc.c | 183 ++ drivers/media/common/v4l2-tpg/Kconfig | 3 + drivers/media/common/v4l2-tpg/Makefile | 4 + drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c | 1409 +++++++++ drivers/media/common/v4l2-tpg/v4l2-tpg-core.c | 2697 +++++++++++++++++ drivers/media/common/videobuf2/Kconfig | 32 + drivers/media/common/videobuf2/Makefile | 17 + drivers/media/common/videobuf2/frame_vector.c | 197 ++ drivers/media/common/videobuf2/vb2-trace.c | 10 + drivers/media/common/videobuf2/videobuf2-core.c | 3076 ++++++++++++++++++++ .../media/common/videobuf2/videobuf2-dma-contig.c | 865 ++++++ drivers/media/common/videobuf2/videobuf2-dma-sg.c | 679 +++++ drivers/media/common/videobuf2/videobuf2-dvb.c | 341 +++ drivers/media/common/videobuf2/videobuf2-memops.c | 131 + drivers/media/common/videobuf2/videobuf2-v4l2.c | 1332 +++++++++ drivers/media/common/videobuf2/videobuf2-vmalloc.c | 443 +++ 59 files changed, 27155 insertions(+) create mode 100644 drivers/media/common/Kconfig create mode 100644 drivers/media/common/Makefile create mode 100644 drivers/media/common/b2c2/Kconfig create mode 100644 drivers/media/common/b2c2/Makefile create mode 100644 drivers/media/common/b2c2/flexcop-common.h create mode 100644 drivers/media/common/b2c2/flexcop-eeprom.c create mode 100644 drivers/media/common/b2c2/flexcop-fe-tuner.c create mode 100644 drivers/media/common/b2c2/flexcop-hw-filter.c create mode 100644 drivers/media/common/b2c2/flexcop-i2c.c create mode 100644 drivers/media/common/b2c2/flexcop-misc.c create mode 100644 drivers/media/common/b2c2/flexcop-reg.h create mode 100644 drivers/media/common/b2c2/flexcop-sram.c create mode 100644 drivers/media/common/b2c2/flexcop.c create mode 100644 drivers/media/common/b2c2/flexcop.h create mode 100644 drivers/media/common/b2c2/flexcop_ibi_value_be.h create mode 100644 drivers/media/common/b2c2/flexcop_ibi_value_le.h create mode 100644 drivers/media/common/cx2341x.c create mode 100644 drivers/media/common/cypress_firmware.c create mode 100644 drivers/media/common/cypress_firmware.h create mode 100644 drivers/media/common/saa7146/Kconfig create mode 100644 drivers/media/common/saa7146/Makefile create mode 100644 drivers/media/common/saa7146/saa7146_core.c create mode 100644 drivers/media/common/saa7146/saa7146_fops.c create mode 100644 drivers/media/common/saa7146/saa7146_hlp.c create mode 100644 drivers/media/common/saa7146/saa7146_i2c.c create mode 100644 drivers/media/common/saa7146/saa7146_vbi.c create mode 100644 drivers/media/common/saa7146/saa7146_video.c create mode 100644 drivers/media/common/siano/Kconfig create mode 100644 drivers/media/common/siano/Makefile create mode 100644 drivers/media/common/siano/sms-cards.c create mode 100644 drivers/media/common/siano/sms-cards.h create mode 100644 drivers/media/common/siano/smscoreapi.c create mode 100644 drivers/media/common/siano/smscoreapi.h create mode 100644 drivers/media/common/siano/smsdvb-debugfs.c create mode 100644 drivers/media/common/siano/smsdvb-main.c create mode 100644 drivers/media/common/siano/smsdvb.h create mode 100644 drivers/media/common/siano/smsendian.c create mode 100644 drivers/media/common/siano/smsendian.h create mode 100644 drivers/media/common/siano/smsir.c create mode 100644 drivers/media/common/siano/smsir.h create mode 100644 drivers/media/common/ttpci-eeprom.c create mode 100644 drivers/media/common/ttpci-eeprom.h create mode 100644 drivers/media/common/tveeprom.c create mode 100644 drivers/media/common/uvc.c create mode 100644 drivers/media/common/v4l2-tpg/Kconfig create mode 100644 drivers/media/common/v4l2-tpg/Makefile create mode 100644 drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c create mode 100644 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c create mode 100644 drivers/media/common/videobuf2/Kconfig create mode 100644 drivers/media/common/videobuf2/Makefile create mode 100644 drivers/media/common/videobuf2/frame_vector.c create mode 100644 drivers/media/common/videobuf2/vb2-trace.c create mode 100644 drivers/media/common/videobuf2/videobuf2-core.c create mode 100644 drivers/media/common/videobuf2/videobuf2-dma-contig.c create mode 100644 drivers/media/common/videobuf2/videobuf2-dma-sg.c create mode 100644 drivers/media/common/videobuf2/videobuf2-dvb.c create mode 100644 drivers/media/common/videobuf2/videobuf2-memops.c create mode 100644 drivers/media/common/videobuf2/videobuf2-v4l2.c create mode 100644 drivers/media/common/videobuf2/videobuf2-vmalloc.c (limited to 'drivers/media/common') diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig new file mode 100644 index 0000000000..adcb665538 --- /dev/null +++ b/drivers/media/common/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Used by common drivers, when they need to ask questions +config MEDIA_COMMON_OPTIONS + bool + +comment "common driver options" + depends on MEDIA_COMMON_OPTIONS + +config CYPRESS_FIRMWARE + tristate + depends on USB + +config TTPCI_EEPROM + tristate + depends on I2C + +config UVC_COMMON + tristate + +config VIDEO_CX2341X + tristate + +config VIDEO_TVEEPROM + tristate + depends on I2C + +source "drivers/media/common/b2c2/Kconfig" +source "drivers/media/common/saa7146/Kconfig" +source "drivers/media/common/siano/Kconfig" +source "drivers/media/common/v4l2-tpg/Kconfig" +source "drivers/media/common/videobuf2/Kconfig" diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile new file mode 100644 index 0000000000..c5ab905e7c --- /dev/null +++ b/drivers/media/common/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += b2c2/ saa7146/ siano/ v4l2-tpg/ videobuf2/ + +# Please keep it alphabetically sorted by Kconfig name +# (e. g. LC_ALL=C sort Makefile) +obj-$(CONFIG_CYPRESS_FIRMWARE) += cypress_firmware.o +obj-$(CONFIG_TTPCI_EEPROM) += ttpci-eeprom.o +obj-$(CONFIG_UVC_COMMON) += uvc.o +obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o +obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o diff --git a/drivers/media/common/b2c2/Kconfig b/drivers/media/common/b2c2/Kconfig new file mode 100644 index 0000000000..27284797e7 --- /dev/null +++ b/drivers/media/common/b2c2/Kconfig @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DVB_B2C2_FLEXCOP + tristate + depends on DVB_CORE && I2C + depends on DVB_B2C2_FLEXCOP_PCI || DVB_B2C2_FLEXCOP_USB + default y + select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT + select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT + select DVB_MT352 if MEDIA_SUBDRV_AUTOSELECT + select DVB_MT312 if MEDIA_SUBDRV_AUTOSELECT + select DVB_NXT200X if MEDIA_SUBDRV_AUTOSELECT + select DVB_STV0297 if MEDIA_SUBDRV_AUTOSELECT + select DVB_BCM3510 if MEDIA_SUBDRV_AUTOSELECT + select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT + select DVB_S5H1420 if MEDIA_SUBDRV_AUTOSELECT + select DVB_TUNER_ITD1000 if MEDIA_SUBDRV_AUTOSELECT + select DVB_ISL6421 if MEDIA_SUBDRV_AUTOSELECT + select DVB_CX24120 if MEDIA_SUBDRV_AUTOSELECT + select DVB_CX24123 if MEDIA_SUBDRV_AUTOSELECT + select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT + select DVB_TUNER_CX24113 if MEDIA_SUBDRV_AUTOSELECT + +# Selected via the PCI or USB flexcop drivers +config DVB_B2C2_FLEXCOP_DEBUG + bool diff --git a/drivers/media/common/b2c2/Makefile b/drivers/media/common/b2c2/Makefile new file mode 100644 index 0000000000..0e32b77f34 --- /dev/null +++ b/drivers/media/common/b2c2/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +b2c2-flexcop-objs += flexcop.o flexcop-fe-tuner.o flexcop-i2c.o +b2c2-flexcop-objs += flexcop-sram.o flexcop-eeprom.o flexcop-misc.o +b2c2-flexcop-objs += flexcop-hw-filter.o +obj-$(CONFIG_DVB_B2C2_FLEXCOP) += b2c2-flexcop.o + +ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/ +ccflags-y += -I $(srctree)/drivers/media/tuners/ diff --git a/drivers/media/common/b2c2/flexcop-common.h b/drivers/media/common/b2c2/flexcop-common.h new file mode 100644 index 0000000000..f944c59cf4 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop-common.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop-common.h - common header file for device-specific source files + * see flexcop.c for copyright information + */ +#ifndef __FLEXCOP_COMMON_H__ +#define __FLEXCOP_COMMON_H__ + +#include +#include +#include + +#include "flexcop-reg.h" + +#include +#include +#include +#include + +#define FC_MAX_FEED 256 + +#ifndef FC_LOG_PREFIX +#warning please define a log prefix for your file, using a default one +#define FC_LOG_PREFIX "b2c2-undef" +#endif + +/* Steal from usb.h */ +#undef err +#define err(format, arg...) \ + printk(KERN_ERR FC_LOG_PREFIX ": " format "\n" , ## arg) +#undef info +#define info(format, arg...) \ + printk(KERN_INFO FC_LOG_PREFIX ": " format "\n" , ## arg) +#undef warn +#define warn(format, arg...) \ + printk(KERN_WARNING FC_LOG_PREFIX ": " format "\n" , ## arg) + +struct flexcop_dma { + struct pci_dev *pdev; + + u8 *cpu_addr0; + dma_addr_t dma_addr0; + u8 *cpu_addr1; + dma_addr_t dma_addr1; + u32 size; /* size of each address in bytes */ +}; + +struct flexcop_i2c_adapter { + struct flexcop_device *fc; + struct i2c_adapter i2c_adap; + + u8 no_base_addr; + flexcop_i2c_port_t port; +}; + +/* Control structure for data definitions that are common to + * the B2C2-based PCI and USB devices. + */ +struct flexcop_device { + /* general */ + struct device *dev; /* for firmware_class */ + +#define FC_STATE_DVB_INIT 0x01 +#define FC_STATE_I2C_INIT 0x02 +#define FC_STATE_FE_INIT 0x04 + int init_state; + + /* device information */ + int has_32_hw_pid_filter; + flexcop_revision_t rev; + flexcop_device_type_t dev_type; + flexcop_bus_t bus_type; + + /* dvb stuff */ + struct dvb_adapter dvb_adapter; + struct dvb_frontend *fe; + struct dvb_net dvbnet; + struct dvb_demux demux; + struct dmxdev dmxdev; + struct dmx_frontend hw_frontend; + struct dmx_frontend mem_frontend; + int (*fe_sleep) (struct dvb_frontend *); + + struct flexcop_i2c_adapter fc_i2c_adap[3]; + struct mutex i2c_mutex; + struct module *owner; + + /* options and status */ + int extra_feedcount; + int feedcount; + int pid_filtering; + int fullts_streaming_state; + int skip_6_hw_pid_filter; + + /* bus specific callbacks */ + flexcop_ibi_value(*read_ibi_reg) (struct flexcop_device *, + flexcop_ibi_register); + int (*write_ibi_reg) (struct flexcop_device *, + flexcop_ibi_register, flexcop_ibi_value); + int (*i2c_request) (struct flexcop_i2c_adapter *, + flexcop_access_op_t, u8 chipaddr, u8 addr, u8 *buf, u16 len); + int (*stream_control) (struct flexcop_device *, int); + int (*get_mac_addr) (struct flexcop_device *fc, int extended); + void *bus_specific; +}; + +/* exported prototypes */ + +/* from flexcop.c */ +void flexcop_pass_dmx_data(struct flexcop_device *fc, u8 *buf, u32 len); +void flexcop_pass_dmx_packets(struct flexcop_device *fc, u8 *buf, u32 no); + +struct flexcop_device *flexcop_device_kmalloc(size_t bus_specific_len); +void flexcop_device_kfree(struct flexcop_device *); + +int flexcop_device_initialize(struct flexcop_device *); +void flexcop_device_exit(struct flexcop_device *fc); +void flexcop_reset_block_300(struct flexcop_device *fc); + +/* from flexcop-dma.c */ +int flexcop_dma_allocate(struct pci_dev *pdev, + struct flexcop_dma *dma, u32 size); +void flexcop_dma_free(struct flexcop_dma *dma); + +int flexcop_dma_control_timer_irq(struct flexcop_device *fc, + flexcop_dma_index_t no, int onoff); +int flexcop_dma_control_size_irq(struct flexcop_device *fc, + flexcop_dma_index_t no, int onoff); +int flexcop_dma_config(struct flexcop_device *fc, struct flexcop_dma *dma, + flexcop_dma_index_t dma_idx); +int flexcop_dma_xfer_control(struct flexcop_device *fc, + flexcop_dma_index_t dma_idx, flexcop_dma_addr_index_t index, + int onoff); +int flexcop_dma_config_timer(struct flexcop_device *fc, + flexcop_dma_index_t dma_idx, u8 cycles); + +/* from flexcop-eeprom.c */ +/* the PCI part uses this call to get the MAC address, the USB part has its own */ +int flexcop_eeprom_check_mac_addr(struct flexcop_device *fc, int extended); + +/* from flexcop-i2c.c */ +/* the PCI part uses this a i2c_request callback, whereas the usb part has its own + * one. We have it in flexcop-i2c.c, because it is going via the actual + * I2C-channel of the flexcop. + */ +int flexcop_i2c_request(struct flexcop_i2c_adapter*, flexcop_access_op_t, + u8 chipaddr, u8 addr, u8 *buf, u16 len); + +/* from flexcop-sram.c */ +int flexcop_sram_set_dest(struct flexcop_device *fc, flexcop_sram_dest_t dest, + flexcop_sram_dest_target_t target); +void flexcop_wan_set_speed(struct flexcop_device *fc, flexcop_wan_speed_t s); +void flexcop_sram_ctrl(struct flexcop_device *fc, + int usb_wan, int sramdma, int maximumfill); + +/* global prototypes for the flexcop-chip */ +/* from flexcop-fe-tuner.c */ +int flexcop_frontend_init(struct flexcop_device *fc); +void flexcop_frontend_exit(struct flexcop_device *fc); + +/* from flexcop-i2c.c */ +int flexcop_i2c_init(struct flexcop_device *fc); +void flexcop_i2c_exit(struct flexcop_device *fc); + +/* from flexcop-sram.c */ +int flexcop_sram_init(struct flexcop_device *fc); + +/* from flexcop-misc.c */ +void flexcop_determine_revision(struct flexcop_device *fc); +void flexcop_device_name(struct flexcop_device *fc, + const char *prefix, const char *suffix); +void flexcop_dump_reg(struct flexcop_device *fc, + flexcop_ibi_register reg, int num); + +/* from flexcop-hw-filter.c */ +int flexcop_pid_feed_control(struct flexcop_device *fc, + struct dvb_demux_feed *dvbdmxfeed, int onoff); +void flexcop_hw_filter_init(struct flexcop_device *fc); + +void flexcop_smc_ctrl(struct flexcop_device *fc, int onoff); + +void flexcop_set_mac_filter(struct flexcop_device *fc, u8 mac[6]); +void flexcop_mac_filter_ctrl(struct flexcop_device *fc, int onoff); + +#endif diff --git a/drivers/media/common/b2c2/flexcop-eeprom.c b/drivers/media/common/b2c2/flexcop-eeprom.c new file mode 100644 index 0000000000..0f2151cd36 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop-eeprom.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop-eeprom.c - eeprom access methods (currently only MAC address reading) + * see flexcop.c for copyright information + */ +#include "flexcop.h" + +#if 0 +/*EEPROM (Skystar2 has one "24LC08B" chip on board) */ +static int eeprom_write(struct adapter *adapter, u16 addr, u8 *buf, u16 len) +{ + return flex_i2c_write(adapter, 0x20000000, 0x50, addr, buf, len); +} + +static int eeprom_lrc_write(struct adapter *adapter, u32 addr, + u32 len, u8 *wbuf, u8 *rbuf, int retries) +{ +int i; + +for (i = 0; i < retries; i++) { + if (eeprom_write(adapter, addr, wbuf, len) == len) { + if (eeprom_lrc_read(adapter, addr, len, rbuf, retries) == 1) + return 1; + } + } + return 0; +} + +/* These functions could be used to unlock SkyStar2 cards. */ + +static int eeprom_writeKey(struct adapter *adapter, u8 *key, u32 len) +{ + u8 rbuf[20]; + u8 wbuf[20]; + + if (len != 16) + return 0; + + memcpy(wbuf, key, len); + wbuf[16] = 0; + wbuf[17] = 0; + wbuf[18] = 0; + wbuf[19] = calc_lrc(wbuf, 19); + return eeprom_lrc_write(adapter, 0x3e4, 20, wbuf, rbuf, 4); +} + +static int eeprom_readKey(struct adapter *adapter, u8 *key, u32 len) +{ + u8 buf[20]; + + if (len != 16) + return 0; + + if (eeprom_lrc_read(adapter, 0x3e4, 20, buf, 4) == 0) + return 0; + + memcpy(key, buf, len); + return 1; +} + +static char eeprom_set_mac_addr(struct adapter *adapter, char type, u8 *mac) +{ + u8 tmp[8]; + + if (type != 0) { + tmp[0] = mac[0]; + tmp[1] = mac[1]; + tmp[2] = mac[2]; + tmp[3] = mac[5]; + tmp[4] = mac[6]; + tmp[5] = mac[7]; + } else { + tmp[0] = mac[0]; + tmp[1] = mac[1]; + tmp[2] = mac[2]; + tmp[3] = mac[3]; + tmp[4] = mac[4]; + tmp[5] = mac[5]; + } + + tmp[6] = 0; + tmp[7] = calc_lrc(tmp, 7); + + if (eeprom_write(adapter, 0x3f8, tmp, 8) == 8) + return 1; + return 0; +} + +static int flexcop_eeprom_read(struct flexcop_device *fc, + u16 addr, u8 *buf, u16 len) +{ + return fc->i2c_request(fc,FC_READ,FC_I2C_PORT_EEPROM,0x50,addr,buf,len); +} + +#endif + +static u8 calc_lrc(u8 *buf, int len) +{ + int i; + u8 sum = 0; + for (i = 0; i < len; i++) + sum = sum ^ buf[i]; + return sum; +} + +static int flexcop_eeprom_request(struct flexcop_device *fc, + flexcop_access_op_t op, u16 addr, u8 *buf, u16 len, int retries) +{ + int i,ret = 0; + u8 chipaddr = 0x50 | ((addr >> 8) & 3); + for (i = 0; i < retries; i++) { + ret = fc->i2c_request(&fc->fc_i2c_adap[1], op, chipaddr, + addr & 0xff, buf, len); + if (ret == 0) + break; + } + return ret; +} + +static int flexcop_eeprom_lrc_read(struct flexcop_device *fc, u16 addr, + u8 *buf, u16 len, int retries) +{ + int ret = flexcop_eeprom_request(fc, FC_READ, addr, buf, len, retries); + if (ret == 0) + if (calc_lrc(buf, len - 1) != buf[len - 1]) + ret = -EINVAL; + return ret; +} + +/* JJ's comment about extended == 1: it is not presently used anywhere but was + * added to the low-level functions for possible support of EUI64 */ +int flexcop_eeprom_check_mac_addr(struct flexcop_device *fc, int extended) +{ + u8 buf[8]; + int ret = 0; + + if ((ret = flexcop_eeprom_lrc_read(fc,0x3f8,buf,8,4)) == 0) { + if (extended != 0) { + err("TODO: extended (EUI64) MAC addresses aren't completely supported yet"); + ret = -EINVAL; + } else + memcpy(fc->dvb_adapter.proposed_mac,buf,6); + } + return ret; +} +EXPORT_SYMBOL(flexcop_eeprom_check_mac_addr); diff --git a/drivers/media/common/b2c2/flexcop-fe-tuner.c b/drivers/media/common/b2c2/flexcop-fe-tuner.c new file mode 100644 index 0000000000..aac1aadb0c --- /dev/null +++ b/drivers/media/common/b2c2/flexcop-fe-tuner.c @@ -0,0 +1,721 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop-fe-tuner.c - methods for frontend attachment and DiSEqC controlling + * see flexcop.c for copyright information + */ +#include +#include "flexcop.h" +#include "mt312.h" +#include "stv0299.h" +#include "s5h1420.h" +#include "itd1000.h" +#include "cx24113.h" +#include "cx24123.h" +#include "isl6421.h" +#include "cx24120.h" +#include "mt352.h" +#include "bcm3510.h" +#include "nxt200x.h" +#include "dvb-pll.h" +#include "lgdt330x.h" +#include "tuner-simple.h" +#include "stv0297.h" + + +/* Can we use the specified front-end? Remember that if we are compiled + * into the kernel we can't call code that's in modules. */ +#define FE_SUPPORTED(fe) IS_REACHABLE(CONFIG_DVB_ ## fe) + +#if FE_SUPPORTED(BCM3510) || (FE_SUPPORTED(CX24120) && FE_SUPPORTED(ISL6421)) +static int flexcop_fe_request_firmware(struct dvb_frontend *fe, + const struct firmware **fw, char *name) +{ + struct flexcop_device *fc = fe->dvb->priv; + + return request_firmware(fw, name, fc->dev); +} +#endif + +/* lnb control */ +#if (FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)) && FE_SUPPORTED(PLL) +static int flexcop_set_voltage(struct dvb_frontend *fe, + enum fe_sec_voltage voltage) +{ + struct flexcop_device *fc = fe->dvb->priv; + flexcop_ibi_value v; + deb_tuner("polarity/voltage = %u\n", voltage); + + v = fc->read_ibi_reg(fc, misc_204); + switch (voltage) { + case SEC_VOLTAGE_OFF: + v.misc_204.ACPI1_sig = 1; + break; + case SEC_VOLTAGE_13: + v.misc_204.ACPI1_sig = 0; + v.misc_204.LNB_L_H_sig = 0; + break; + case SEC_VOLTAGE_18: + v.misc_204.ACPI1_sig = 0; + v.misc_204.LNB_L_H_sig = 1; + break; + default: + err("unknown SEC_VOLTAGE value"); + return -EINVAL; + } + return fc->write_ibi_reg(fc, misc_204, v); +} +#endif + +#if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312) +static int __maybe_unused flexcop_sleep(struct dvb_frontend* fe) +{ + struct flexcop_device *fc = fe->dvb->priv; + if (fc->fe_sleep) + return fc->fe_sleep(fe); + return 0; +} +#endif + +/* SkyStar2 DVB-S rev 2.3 */ +#if FE_SUPPORTED(MT312) && FE_SUPPORTED(PLL) +static int flexcop_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone) +{ +/* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */ + struct flexcop_device *fc = fe->dvb->priv; + flexcop_ibi_value v; + u16 ax; + v.raw = 0; + deb_tuner("tone = %u\n",tone); + + switch (tone) { + case SEC_TONE_ON: + ax = 0x01ff; + break; + case SEC_TONE_OFF: + ax = 0; + break; + default: + err("unknown SEC_TONE value"); + return -EINVAL; + } + + v.lnb_switch_freq_200.LNB_CTLPrescaler_sig = 1; /* divide by 2 */ + v.lnb_switch_freq_200.LNB_CTLHighCount_sig = ax; + v.lnb_switch_freq_200.LNB_CTLLowCount_sig = ax == 0 ? 0x1ff : ax; + return fc->write_ibi_reg(fc,lnb_switch_freq_200,v); +} + +static void flexcop_diseqc_send_bit(struct dvb_frontend* fe, int data) +{ + flexcop_set_tone(fe, SEC_TONE_ON); + udelay(data ? 500 : 1000); + flexcop_set_tone(fe, SEC_TONE_OFF); + udelay(data ? 1000 : 500); +} + +static void flexcop_diseqc_send_byte(struct dvb_frontend* fe, int data) +{ + int i, par = 1, d; + for (i = 7; i >= 0; i--) { + d = (data >> i) & 1; + par ^= d; + flexcop_diseqc_send_bit(fe, d); + } + flexcop_diseqc_send_bit(fe, par); +} + +static int flexcop_send_diseqc_msg(struct dvb_frontend *fe, + int len, u8 *msg, unsigned long burst) +{ + int i; + + flexcop_set_tone(fe, SEC_TONE_OFF); + mdelay(16); + + for (i = 0; i < len; i++) + flexcop_diseqc_send_byte(fe,msg[i]); + mdelay(16); + + if (burst != -1) { + if (burst) + flexcop_diseqc_send_byte(fe, 0xff); + else { + flexcop_set_tone(fe, SEC_TONE_ON); + mdelay(12); + udelay(500); + flexcop_set_tone(fe, SEC_TONE_OFF); + } + msleep(20); + } + return 0; +} + +static int flexcop_diseqc_send_master_cmd(struct dvb_frontend *fe, + struct dvb_diseqc_master_cmd *cmd) +{ + return flexcop_send_diseqc_msg(fe, cmd->msg_len, cmd->msg, 0); +} + +static int flexcop_diseqc_send_burst(struct dvb_frontend *fe, + enum fe_sec_mini_cmd minicmd) +{ + return flexcop_send_diseqc_msg(fe, 0, NULL, minicmd); +} + +static struct mt312_config skystar23_samsung_tbdu18132_config = { + .demod_address = 0x0e, +}; + +static int skystar2_rev23_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + struct dvb_frontend_ops *ops; + + fc->fe = dvb_attach(mt312_attach, &skystar23_samsung_tbdu18132_config, i2c); + if (!fc->fe) + return 0; + + if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, i2c, + DVB_PLL_SAMSUNG_TBDU18132)) + return 0; + + ops = &fc->fe->ops; + ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd; + ops->diseqc_send_burst = flexcop_diseqc_send_burst; + ops->set_tone = flexcop_set_tone; + ops->set_voltage = flexcop_set_voltage; + fc->fe_sleep = ops->sleep; + ops->sleep = flexcop_sleep; + return 1; +} +#else +#define skystar2_rev23_attach NULL +#endif + +/* SkyStar2 DVB-S rev 2.6 */ +#if FE_SUPPORTED(STV0299) && FE_SUPPORTED(PLL) +static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe, + u32 srate, u32 ratio) +{ + u8 aclk = 0; + u8 bclk = 0; + + if (srate < 1500000) { + aclk = 0xb7; bclk = 0x47; + } else if (srate < 3000000) { + aclk = 0xb7; bclk = 0x4b; + } else if (srate < 7000000) { + aclk = 0xb7; bclk = 0x4f; + } else if (srate < 14000000) { + aclk = 0xb7; bclk = 0x53; + } else if (srate < 30000000) { + aclk = 0xb6; bclk = 0x53; + } else if (srate < 45000000) { + aclk = 0xb4; bclk = 0x51; + } + + stv0299_writereg(fe, 0x13, aclk); + stv0299_writereg(fe, 0x14, bclk); + stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff); + stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff); + stv0299_writereg(fe, 0x21, ratio & 0xf0); + return 0; +} + +static u8 samsung_tbmu24112_inittab[] = { + 0x01, 0x15, + 0x02, 0x30, + 0x03, 0x00, + 0x04, 0x7D, + 0x05, 0x35, + 0x06, 0x02, + 0x07, 0x00, + 0x08, 0xC3, + 0x0C, 0x00, + 0x0D, 0x81, + 0x0E, 0x23, + 0x0F, 0x12, + 0x10, 0x7E, + 0x11, 0x84, + 0x12, 0xB9, + 0x13, 0x88, + 0x14, 0x89, + 0x15, 0xC9, + 0x16, 0x00, + 0x17, 0x5C, + 0x18, 0x00, + 0x19, 0x00, + 0x1A, 0x00, + 0x1C, 0x00, + 0x1D, 0x00, + 0x1E, 0x00, + 0x1F, 0x3A, + 0x20, 0x2E, + 0x21, 0x80, + 0x22, 0xFF, + 0x23, 0xC1, + 0x28, 0x00, + 0x29, 0x1E, + 0x2A, 0x14, + 0x2B, 0x0F, + 0x2C, 0x09, + 0x2D, 0x05, + 0x31, 0x1F, + 0x32, 0x19, + 0x33, 0xFE, + 0x34, 0x93, + 0xff, 0xff, +}; + +static struct stv0299_config samsung_tbmu24112_config = { + .demod_address = 0x68, + .inittab = samsung_tbmu24112_inittab, + .mclk = 88000000UL, + .invert = 0, + .skip_reinit = 0, + .lock_output = STV0299_LOCKOUTPUT_LK, + .volt13_op0_op1 = STV0299_VOLT13_OP1, + .min_delay_ms = 100, + .set_symbol_rate = samsung_tbmu24112_set_symbol_rate, +}; + +static int skystar2_rev26_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + fc->fe = dvb_attach(stv0299_attach, &samsung_tbmu24112_config, i2c); + if (!fc->fe) + return 0; + + if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, i2c, + DVB_PLL_SAMSUNG_TBMU24112)) + return 0; + + fc->fe->ops.set_voltage = flexcop_set_voltage; + fc->fe_sleep = fc->fe->ops.sleep; + fc->fe->ops.sleep = flexcop_sleep; + return 1; + +} +#else +#define skystar2_rev26_attach NULL +#endif + +/* SkyStar2 DVB-S rev 2.7 */ +#if FE_SUPPORTED(S5H1420) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_ITD1000) +static struct s5h1420_config skystar2_rev2_7_s5h1420_config = { + .demod_address = 0x53, + .invert = 1, + .repeated_start_workaround = 1, + .serial_mpeg = 1, +}; + +static struct itd1000_config skystar2_rev2_7_itd1000_config = { + .i2c_address = 0x61, +}; + +static int skystar2_rev27_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + flexcop_ibi_value r108; + struct i2c_adapter *i2c_tuner; + + /* enable no_base_addr - no repeated start when reading */ + fc->fc_i2c_adap[0].no_base_addr = 1; + fc->fe = dvb_attach(s5h1420_attach, &skystar2_rev2_7_s5h1420_config, + i2c); + if (!fc->fe) + goto fail; + + i2c_tuner = s5h1420_get_tuner_i2c_adapter(fc->fe); + if (!i2c_tuner) + goto fail; + + fc->fe_sleep = fc->fe->ops.sleep; + fc->fe->ops.sleep = flexcop_sleep; + + /* enable no_base_addr - no repeated start when reading */ + fc->fc_i2c_adap[2].no_base_addr = 1; + if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap, + 0x08, 1, 1, false)) { + err("ISL6421 could NOT be attached"); + goto fail_isl; + } + info("ISL6421 successfully attached"); + + /* the ITD1000 requires a lower i2c clock - is it a problem ? */ + r108.raw = 0x00000506; + fc->write_ibi_reg(fc, tw_sm_c_108, r108); + if (!dvb_attach(itd1000_attach, fc->fe, i2c_tuner, + &skystar2_rev2_7_itd1000_config)) { + err("ITD1000 could NOT be attached"); + /* Should i2c clock be restored? */ + goto fail_isl; + } + info("ITD1000 successfully attached"); + + return 1; + +fail_isl: + fc->fc_i2c_adap[2].no_base_addr = 0; +fail: + /* for the next devices we need it again */ + fc->fc_i2c_adap[0].no_base_addr = 0; + return 0; +} +#else +#define skystar2_rev27_attach NULL +#endif + +/* SkyStar2 rev 2.8 */ +#if FE_SUPPORTED(CX24123) && FE_SUPPORTED(ISL6421) && FE_SUPPORTED(TUNER_CX24113) +static struct cx24123_config skystar2_rev2_8_cx24123_config = { + .demod_address = 0x55, + .dont_use_pll = 1, + .agc_callback = cx24113_agc_callback, +}; + +static const struct cx24113_config skystar2_rev2_8_cx24113_config = { + .i2c_addr = 0x54, + .xtal_khz = 10111, +}; + +static int skystar2_rev28_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + struct i2c_adapter *i2c_tuner; + + fc->fe = dvb_attach(cx24123_attach, &skystar2_rev2_8_cx24123_config, + i2c); + if (!fc->fe) + return 0; + + i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe); + if (!i2c_tuner) + return 0; + + if (!dvb_attach(cx24113_attach, fc->fe, &skystar2_rev2_8_cx24113_config, + i2c_tuner)) { + err("CX24113 could NOT be attached"); + return 0; + } + info("CX24113 successfully attached"); + + fc->fc_i2c_adap[2].no_base_addr = 1; + if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap, + 0x08, 0, 0, false)) { + err("ISL6421 could NOT be attached"); + fc->fc_i2c_adap[2].no_base_addr = 0; + return 0; + } + info("ISL6421 successfully attached"); + /* TODO on i2c_adap[1] addr 0x11 (EEPROM) there seems to be an + * IR-receiver (PIC16F818) - but the card has no input for that ??? */ + return 1; +} +#else +#define skystar2_rev28_attach NULL +#endif + +/* AirStar DVB-T */ +#if FE_SUPPORTED(MT352) && FE_SUPPORTED(PLL) +static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe) +{ + static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d }; + static u8 mt352_reset[] = { 0x50, 0x80 }; + static u8 mt352_adc_ctl_1_cfg[] = { 0x8E, 0x40 }; + static u8 mt352_agc_cfg[] = { 0x67, 0x28, 0xa1 }; + static u8 mt352_capt_range_cfg[] = { 0x75, 0x32 }; + + mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config)); + udelay(2000); + mt352_write(fe, mt352_reset, sizeof(mt352_reset)); + mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg)); + mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg)); + mt352_write(fe, mt352_capt_range_cfg, sizeof(mt352_capt_range_cfg)); + return 0; +} + +static struct mt352_config samsung_tdtc9251dh0_config = { + .demod_address = 0x0f, + .demod_init = samsung_tdtc9251dh0_demod_init, +}; + +static int airstar_dvbt_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + fc->fe = dvb_attach(mt352_attach, &samsung_tdtc9251dh0_config, i2c); + if (!fc->fe) + return 0; + + return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, + DVB_PLL_SAMSUNG_TDTC9251DH0); +} +#else +#define airstar_dvbt_attach NULL +#endif + +/* AirStar ATSC 1st generation */ +#if FE_SUPPORTED(BCM3510) +static struct bcm3510_config air2pc_atsc_first_gen_config = { + .demod_address = 0x0f, + .request_firmware = flexcop_fe_request_firmware, +}; + +static int airstar_atsc1_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c); + return fc->fe != NULL; +} +#else +#define airstar_atsc1_attach NULL +#endif + +/* AirStar ATSC 2nd generation */ +#if FE_SUPPORTED(NXT200X) && FE_SUPPORTED(PLL) +static const struct nxt200x_config samsung_tbmv_config = { + .demod_address = 0x0a, +}; + +static int airstar_atsc2_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, i2c); + if (!fc->fe) + return 0; + + return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, + DVB_PLL_SAMSUNG_TBMV); +} +#else +#define airstar_atsc2_attach NULL +#endif + +/* AirStar ATSC 3rd generation */ +#if FE_SUPPORTED(LGDT330X) +static struct lgdt330x_config air2pc_atsc_hd5000_config = { + .demod_chip = LGDT3303, + .serial_mpeg = 0x04, + .clock_polarity_flip = 1, +}; + +static int airstar_atsc3_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, + 0x59, i2c); + if (!fc->fe) + return 0; + + return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61, + TUNER_LG_TDVS_H06XF); +} +#else +#define airstar_atsc3_attach NULL +#endif + +/* CableStar2 DVB-C */ +#if FE_SUPPORTED(STV0297) && FE_SUPPORTED(PLL) +static u8 alps_tdee4_stv0297_inittab[] = { + 0x80, 0x01, + 0x80, 0x00, + 0x81, 0x01, + 0x81, 0x00, + 0x00, 0x48, + 0x01, 0x58, + 0x03, 0x00, + 0x04, 0x00, + 0x07, 0x00, + 0x08, 0x00, + 0x30, 0xff, + 0x31, 0x9d, + 0x32, 0xff, + 0x33, 0x00, + 0x34, 0x29, + 0x35, 0x55, + 0x36, 0x80, + 0x37, 0x6e, + 0x38, 0x9c, + 0x40, 0x1a, + 0x41, 0xfe, + 0x42, 0x33, + 0x43, 0x00, + 0x44, 0xff, + 0x45, 0x00, + 0x46, 0x00, + 0x49, 0x04, + 0x4a, 0x51, + 0x4b, 0xf8, + 0x52, 0x30, + 0x53, 0x06, + 0x59, 0x06, + 0x5a, 0x5e, + 0x5b, 0x04, + 0x61, 0x49, + 0x62, 0x0a, + 0x70, 0xff, + 0x71, 0x04, + 0x72, 0x00, + 0x73, 0x00, + 0x74, 0x0c, + 0x80, 0x20, + 0x81, 0x00, + 0x82, 0x30, + 0x83, 0x00, + 0x84, 0x04, + 0x85, 0x22, + 0x86, 0x08, + 0x87, 0x1b, + 0x88, 0x00, + 0x89, 0x00, + 0x90, 0x00, + 0x91, 0x04, + 0xa0, 0x86, + 0xa1, 0x00, + 0xa2, 0x00, + 0xb0, 0x91, + 0xb1, 0x0b, + 0xc0, 0x5b, + 0xc1, 0x10, + 0xc2, 0x12, + 0xd0, 0x02, + 0xd1, 0x00, + 0xd2, 0x00, + 0xd3, 0x00, + 0xd4, 0x02, + 0xd5, 0x00, + 0xde, 0x00, + 0xdf, 0x01, + 0xff, 0xff, +}; + +static struct stv0297_config alps_tdee4_stv0297_config = { + .demod_address = 0x1c, + .inittab = alps_tdee4_stv0297_inittab, +}; + +static int cablestar2_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + fc->fc_i2c_adap[0].no_base_addr = 1; + fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c); + if (!fc->fe) + goto fail; + + /* This tuner doesn't use the stv0297's I2C gate, but instead the + * tuner is connected to a different flexcop I2C adapter. */ + if (fc->fe->ops.i2c_gate_ctrl) + fc->fe->ops.i2c_gate_ctrl(fc->fe, 0); + fc->fe->ops.i2c_gate_ctrl = NULL; + + if (!dvb_attach(dvb_pll_attach, fc->fe, 0x61, + &fc->fc_i2c_adap[2].i2c_adap, DVB_PLL_TDEE4)) + goto fail; + + return 1; + +fail: + /* Reset for next frontend to try */ + fc->fc_i2c_adap[0].no_base_addr = 0; + return 0; +} +#else +#define cablestar2_attach NULL +#endif + +/* SkyStar S2 PCI DVB-S/S2 card based on Conexant cx24120/cx24118 */ +#if FE_SUPPORTED(CX24120) && FE_SUPPORTED(ISL6421) +static const struct cx24120_config skystar2_rev3_3_cx24120_config = { + .i2c_addr = 0x55, + .xtal_khz = 10111, + .initial_mpeg_config = { 0xa1, 0x76, 0x07 }, + .request_firmware = flexcop_fe_request_firmware, + .i2c_wr_max = 4, +}; + +static int skystarS2_rev33_attach(struct flexcop_device *fc, + struct i2c_adapter *i2c) +{ + fc->fe = dvb_attach(cx24120_attach, + &skystar2_rev3_3_cx24120_config, i2c); + if (!fc->fe) + return 0; + + fc->dev_type = FC_SKYS2_REV33; + fc->fc_i2c_adap[2].no_base_addr = 1; + if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap, + 0x08, 0, 0, false)) { + err("ISL6421 could NOT be attached!"); + fc->fc_i2c_adap[2].no_base_addr = 0; + return 0; + } + info("ISL6421 successfully attached."); + + if (fc->has_32_hw_pid_filter) + fc->skip_6_hw_pid_filter = 1; + + return 1; +} +#else +#define skystarS2_rev33_attach NULL +#endif + +static struct { + flexcop_device_type_t type; + int (*attach)(struct flexcop_device *, struct i2c_adapter *); +} flexcop_frontends[] = { + { FC_SKY_REV27, skystar2_rev27_attach }, + { FC_SKY_REV28, skystar2_rev28_attach }, + { FC_SKY_REV26, skystar2_rev26_attach }, + { FC_AIR_DVBT, airstar_dvbt_attach }, + { FC_AIR_ATSC2, airstar_atsc2_attach }, + { FC_AIR_ATSC3, airstar_atsc3_attach }, + { FC_AIR_ATSC1, airstar_atsc1_attach }, + { FC_CABLE, cablestar2_attach }, + { FC_SKY_REV23, skystar2_rev23_attach }, + { FC_SKYS2_REV33, skystarS2_rev33_attach }, +}; + +/* try to figure out the frontend */ +int flexcop_frontend_init(struct flexcop_device *fc) +{ + int i; + for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) { + if (!flexcop_frontends[i].attach) + continue; + /* type needs to be set before, because of some workarounds + * done based on the probed card type */ + fc->dev_type = flexcop_frontends[i].type; + if (flexcop_frontends[i].attach(fc, &fc->fc_i2c_adap[0].i2c_adap)) + goto fe_found; + /* Clean up partially attached frontend */ + if (fc->fe) { + dvb_frontend_detach(fc->fe); + fc->fe = NULL; + } + } + fc->dev_type = FC_UNK; + err("no frontend driver found for this B2C2/FlexCop adapter"); + return -ENODEV; + +fe_found: + info("found '%s' .", fc->fe->ops.info.name); + if (dvb_register_frontend(&fc->dvb_adapter, fc->fe)) { + err("frontend registration failed!"); + dvb_frontend_detach(fc->fe); + fc->fe = NULL; + return -EINVAL; + } + fc->init_state |= FC_STATE_FE_INIT; + return 0; +} + +void flexcop_frontend_exit(struct flexcop_device *fc) +{ + if (fc->init_state & FC_STATE_FE_INIT) { + dvb_unregister_frontend(fc->fe); + dvb_frontend_detach(fc->fe); + } + fc->init_state &= ~FC_STATE_FE_INIT; +} diff --git a/drivers/media/common/b2c2/flexcop-hw-filter.c b/drivers/media/common/b2c2/flexcop-hw-filter.c new file mode 100644 index 0000000000..c5a3345c99 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop-hw-filter.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop-hw-filter.c - pid and mac address filtering and control functions + * see flexcop.c for copyright information + */ +#include "flexcop.h" + +static void flexcop_rcv_data_ctrl(struct flexcop_device *fc, int onoff) +{ + flexcop_set_ibi_value(ctrl_208, Rcv_Data_sig, onoff); + deb_ts("rcv_data is now: '%s'\n", onoff ? "on" : "off"); +} + +void flexcop_smc_ctrl(struct flexcop_device *fc, int onoff) +{ + flexcop_set_ibi_value(ctrl_208, SMC_Enable_sig, onoff); +} + +static void flexcop_null_filter_ctrl(struct flexcop_device *fc, int onoff) +{ + flexcop_set_ibi_value(ctrl_208, Null_filter_sig, onoff); +} + +void flexcop_set_mac_filter(struct flexcop_device *fc, u8 mac[6]) +{ + flexcop_ibi_value v418, v41c; + v41c = fc->read_ibi_reg(fc, mac_address_41c); + + v418.mac_address_418.MAC1 = mac[0]; + v418.mac_address_418.MAC2 = mac[1]; + v418.mac_address_418.MAC3 = mac[2]; + v418.mac_address_418.MAC6 = mac[3]; + v41c.mac_address_41c.MAC7 = mac[4]; + v41c.mac_address_41c.MAC8 = mac[5]; + + fc->write_ibi_reg(fc, mac_address_418, v418); + fc->write_ibi_reg(fc, mac_address_41c, v41c); +} + +void flexcop_mac_filter_ctrl(struct flexcop_device *fc, int onoff) +{ + flexcop_set_ibi_value(ctrl_208, MAC_filter_Mode_sig, onoff); +} + +static void flexcop_pid_group_filter(struct flexcop_device *fc, + u16 pid, u16 mask) +{ + /* index_reg_310.extra_index_reg need to 0 or 7 to work */ + flexcop_ibi_value v30c; + v30c.pid_filter_30c_ext_ind_0_7.Group_PID = pid; + v30c.pid_filter_30c_ext_ind_0_7.Group_mask = mask; + fc->write_ibi_reg(fc, pid_filter_30c, v30c); +} + +static void flexcop_pid_group_filter_ctrl(struct flexcop_device *fc, int onoff) +{ + flexcop_set_ibi_value(ctrl_208, Mask_filter_sig, onoff); +} + +/* this fancy define reduces the code size of the quite similar PID controlling of + * the first 6 PIDs + */ + +#define pid_ctrl(vregname,field,enablefield,trans_field,transval) \ + flexcop_ibi_value vpid = fc->read_ibi_reg(fc, vregname), \ +v208 = fc->read_ibi_reg(fc, ctrl_208); \ +vpid.vregname.field = onoff ? pid : 0x1fff; \ +vpid.vregname.trans_field = transval; \ +v208.ctrl_208.enablefield = onoff; \ +fc->write_ibi_reg(fc, vregname, vpid); \ +fc->write_ibi_reg(fc, ctrl_208, v208) + +static void flexcop_pid_Stream1_PID_ctrl(struct flexcop_device *fc, + u16 pid, int onoff) +{ + pid_ctrl(pid_filter_300, Stream1_PID, Stream1_filter_sig, + Stream1_trans, 0); +} + +static void flexcop_pid_Stream2_PID_ctrl(struct flexcop_device *fc, + u16 pid, int onoff) +{ + pid_ctrl(pid_filter_300, Stream2_PID, Stream2_filter_sig, + Stream2_trans, 0); +} + +static void flexcop_pid_PCR_PID_ctrl(struct flexcop_device *fc, + u16 pid, int onoff) +{ + pid_ctrl(pid_filter_304, PCR_PID, PCR_filter_sig, PCR_trans, 0); +} + +static void flexcop_pid_PMT_PID_ctrl(struct flexcop_device *fc, + u16 pid, int onoff) +{ + pid_ctrl(pid_filter_304, PMT_PID, PMT_filter_sig, PMT_trans, 0); +} + +static void flexcop_pid_EMM_PID_ctrl(struct flexcop_device *fc, + u16 pid, int onoff) +{ + pid_ctrl(pid_filter_308, EMM_PID, EMM_filter_sig, EMM_trans, 0); +} + +static void flexcop_pid_ECM_PID_ctrl(struct flexcop_device *fc, + u16 pid, int onoff) +{ + pid_ctrl(pid_filter_308, ECM_PID, ECM_filter_sig, ECM_trans, 0); +} + +static void flexcop_pid_control(struct flexcop_device *fc, + int index, u16 pid, int onoff) +{ + if (pid == 0x2000) + return; + + deb_ts("setting pid: %5d %04x at index %d '%s'\n", + pid, pid, index, onoff ? "on" : "off"); + + /* First 6 can be buggy - skip over them if option set */ + if (fc->skip_6_hw_pid_filter) + index += 6; + + /* We could use bit magic here to reduce source code size. + * I decided against it, but to use the real register names */ + switch (index) { + case 0: + flexcop_pid_Stream1_PID_ctrl(fc, pid, onoff); + break; + case 1: + flexcop_pid_Stream2_PID_ctrl(fc, pid, onoff); + break; + case 2: + flexcop_pid_PCR_PID_ctrl(fc, pid, onoff); + break; + case 3: + flexcop_pid_PMT_PID_ctrl(fc, pid, onoff); + break; + case 4: + flexcop_pid_EMM_PID_ctrl(fc, pid, onoff); + break; + case 5: + flexcop_pid_ECM_PID_ctrl(fc, pid, onoff); + break; + default: + if (fc->has_32_hw_pid_filter && index < 38) { + flexcop_ibi_value vpid, vid; + + /* set the index */ + vid = fc->read_ibi_reg(fc, index_reg_310); + vid.index_reg_310.index_reg = index - 6; + fc->write_ibi_reg(fc, index_reg_310, vid); + + vpid = fc->read_ibi_reg(fc, pid_n_reg_314); + vpid.pid_n_reg_314.PID = onoff ? pid : 0x1fff; + vpid.pid_n_reg_314.PID_enable_bit = onoff; + fc->write_ibi_reg(fc, pid_n_reg_314, vpid); + } + break; + } +} + +static int flexcop_toggle_fullts_streaming(struct flexcop_device *fc, int onoff) +{ + if (fc->fullts_streaming_state != onoff) { + deb_ts("%s full TS transfer\n",onoff ? "enabling" : "disabling"); + flexcop_pid_group_filter(fc, 0, 0x1fe0 * (!onoff)); + flexcop_pid_group_filter_ctrl(fc, onoff); + fc->fullts_streaming_state = onoff; + } + return 0; +} + +int flexcop_pid_feed_control(struct flexcop_device *fc, + struct dvb_demux_feed *dvbdmxfeed, int onoff) +{ + int max_pid_filter = 6; + + max_pid_filter -= 6 * fc->skip_6_hw_pid_filter; + max_pid_filter += 32 * fc->has_32_hw_pid_filter; + + fc->feedcount += onoff ? 1 : -1; /* the number of PIDs/Feed currently requested */ + if (dvbdmxfeed->index >= max_pid_filter) + fc->extra_feedcount += onoff ? 1 : -1; + + /* toggle complete-TS-streaming when: + * - pid_filtering is not enabled and it is the first or last feed requested + * - pid_filtering is enabled, + * - but the number of requested feeds is exceeded + * - or the requested pid is 0x2000 */ + + if (!fc->pid_filtering && fc->feedcount == onoff) + flexcop_toggle_fullts_streaming(fc, onoff); + + if (fc->pid_filtering) { + flexcop_pid_control \ + (fc, dvbdmxfeed->index, dvbdmxfeed->pid, onoff); + + if (fc->extra_feedcount > 0) + flexcop_toggle_fullts_streaming(fc, 1); + else if (dvbdmxfeed->pid == 0x2000) + flexcop_toggle_fullts_streaming(fc, onoff); + else + flexcop_toggle_fullts_streaming(fc, 0); + } + + /* if it was the first or last feed request change the stream-status */ + if (fc->feedcount == onoff) { + flexcop_rcv_data_ctrl(fc, onoff); + if (fc->stream_control) /* device specific stream control */ + fc->stream_control(fc, onoff); + + /* feeding stopped -> reset the flexcop filter*/ + if (onoff == 0) { + flexcop_reset_block_300(fc); + flexcop_hw_filter_init(fc); + } + } + return 0; +} +EXPORT_SYMBOL(flexcop_pid_feed_control); + +void flexcop_hw_filter_init(struct flexcop_device *fc) +{ + int i; + flexcop_ibi_value v; + int max_pid_filter = 6; + + max_pid_filter -= 6 * fc->skip_6_hw_pid_filter; + max_pid_filter += 32 * fc->has_32_hw_pid_filter; + + for (i = 0; i < max_pid_filter; i++) + flexcop_pid_control(fc, i, 0x1fff, 0); + + flexcop_pid_group_filter(fc, 0, 0x1fe0); + flexcop_pid_group_filter_ctrl(fc, 0); + + v = fc->read_ibi_reg(fc, pid_filter_308); + v.pid_filter_308.EMM_filter_4 = 1; + v.pid_filter_308.EMM_filter_6 = 0; + fc->write_ibi_reg(fc, pid_filter_308, v); + + flexcop_null_filter_ctrl(fc, 1); +} diff --git a/drivers/media/common/b2c2/flexcop-i2c.c b/drivers/media/common/b2c2/flexcop-i2c.c new file mode 100644 index 0000000000..1f1eaa8078 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop-i2c.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop-i2c.c - flexcop internal 2Wire bus (I2C) and dvb i2c initialization + * see flexcop.c for copyright information + */ +#include "flexcop.h" + +#define FC_MAX_I2C_RETRIES 100000 + +static int flexcop_i2c_operation(struct flexcop_device *fc, + flexcop_ibi_value *r100) +{ + int i; + flexcop_ibi_value r; + + r100->tw_sm_c_100.working_start = 1; + deb_i2c("r100 before: %08x\n",r100->raw); + + fc->write_ibi_reg(fc, tw_sm_c_100, ibi_zero); + fc->write_ibi_reg(fc, tw_sm_c_100, *r100); /* initiating i2c operation */ + + for (i = 0; i < FC_MAX_I2C_RETRIES; i++) { + r = fc->read_ibi_reg(fc, tw_sm_c_100); + + if (!r.tw_sm_c_100.no_base_addr_ack_error) { + if (r.tw_sm_c_100.st_done) { + *r100 = r; + deb_i2c("i2c success\n"); + return 0; + } + } else { + deb_i2c("suffering from an i2c ack_error\n"); + return -EREMOTEIO; + } + } + deb_i2c("tried %d times i2c operation, never finished or too many ack errors.\n", + i); + return -EREMOTEIO; +} + +static int flexcop_i2c_read4(struct flexcop_i2c_adapter *i2c, + flexcop_ibi_value r100, u8 *buf) +{ + flexcop_ibi_value r104; + int len = r100.tw_sm_c_100.total_bytes, + /* remember total_bytes is buflen-1 */ + ret; + + /* work-around to have CableStar2 and SkyStar2 rev 2.7 work + * correctly: + * + * the ITD1000 is behind an i2c-gate which closes automatically + * after an i2c-transaction the STV0297 needs 2 consecutive reads + * one with no_base_addr = 0 and one with 1 + * + * those two work-arounds are conflictin: we check for the card + * type, it is set when probing the ITD1000 */ + if (i2c->fc->dev_type == FC_SKY_REV27) + r100.tw_sm_c_100.no_base_addr_ack_error = i2c->no_base_addr; + + ret = flexcop_i2c_operation(i2c->fc, &r100); + if (ret != 0) { + deb_i2c("Retrying operation\n"); + r100.tw_sm_c_100.no_base_addr_ack_error = i2c->no_base_addr; + ret = flexcop_i2c_operation(i2c->fc, &r100); + } + if (ret != 0) { + deb_i2c("read failed. %d\n", ret); + return ret; + } + + buf[0] = r100.tw_sm_c_100.data1_reg; + + if (len > 0) { + r104 = i2c->fc->read_ibi_reg(i2c->fc, tw_sm_c_104); + deb_i2c("read: r100: %08x, r104: %08x\n", r100.raw, r104.raw); + + /* there is at least one more byte, otherwise we wouldn't be here */ + buf[1] = r104.tw_sm_c_104.data2_reg; + if (len > 1) buf[2] = r104.tw_sm_c_104.data3_reg; + if (len > 2) buf[3] = r104.tw_sm_c_104.data4_reg; + } + return 0; +} + +static int flexcop_i2c_write4(struct flexcop_device *fc, + flexcop_ibi_value r100, u8 *buf) +{ + flexcop_ibi_value r104; + int len = r100.tw_sm_c_100.total_bytes; /* remember total_bytes is buflen-1 */ + r104.raw = 0; + + /* there is at least one byte, otherwise we wouldn't be here */ + r100.tw_sm_c_100.data1_reg = buf[0]; + r104.tw_sm_c_104.data2_reg = len > 0 ? buf[1] : 0; + r104.tw_sm_c_104.data3_reg = len > 1 ? buf[2] : 0; + r104.tw_sm_c_104.data4_reg = len > 2 ? buf[3] : 0; + + deb_i2c("write: r100: %08x, r104: %08x\n", r100.raw, r104.raw); + + /* write the additional i2c data before doing the actual i2c operation */ + fc->write_ibi_reg(fc, tw_sm_c_104, r104); + return flexcop_i2c_operation(fc, &r100); +} + +int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c, + flexcop_access_op_t op, u8 chipaddr, + u8 start_addr, u8 *buf, u16 size) +{ + int ret; + int len = size; + u8 *p; + u8 addr = start_addr; + + u16 bytes_to_transfer; + flexcop_ibi_value r100; + + deb_i2c("port %d %s(%02x): register %02x, size: %d\n", + i2c->port, + op == FC_READ ? "rd" : "wr", + chipaddr, start_addr, size); + r100.raw = 0; + r100.tw_sm_c_100.chipaddr = chipaddr; + r100.tw_sm_c_100.twoWS_rw = op; + r100.tw_sm_c_100.twoWS_port_reg = i2c->port; + + /* in that case addr is the only value -> + * we write it twice as baseaddr and val0 + * BBTI is doing it like that for ISL6421 at least */ + if (i2c->no_base_addr && len == 0 && op == FC_WRITE) { + buf = &start_addr; + len = 1; + } + + p = buf; + + while (len != 0) { + bytes_to_transfer = len > 4 ? 4 : len; + + r100.tw_sm_c_100.total_bytes = bytes_to_transfer - 1; + r100.tw_sm_c_100.baseaddr = addr; + + if (op == FC_READ) + ret = flexcop_i2c_read4(i2c, r100, p); + else + ret = flexcop_i2c_write4(i2c->fc, r100, p); + + if (ret < 0) + return ret; + + p += bytes_to_transfer; + addr += bytes_to_transfer; + len -= bytes_to_transfer; + } + deb_i2c_dump("port %d %s(%02x): register %02x: %*ph\n", + i2c->port, + op == FC_READ ? "rd" : "wr", + chipaddr, start_addr, size, buf); + + return 0; +} +/* exported for PCI i2c */ +EXPORT_SYMBOL(flexcop_i2c_request); + +/* master xfer callback for demodulator */ +static int flexcop_master_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg msgs[], int num) +{ + struct flexcop_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); + int i, ret = 0; + + /* Some drivers use 1 byte or 0 byte reads as probes, which this + * driver doesn't support. These probes will always fail, so this + * hack makes them always succeed. If one knew how, it would of + * course be better to actually do the read. */ + if (num == 1 && msgs[0].flags == I2C_M_RD && msgs[0].len <= 1) + return 1; + + if (mutex_lock_interruptible(&i2c->fc->i2c_mutex)) + return -ERESTARTSYS; + + for (i = 0; i < num; i++) { + /* reading */ + if (i+1 < num && (msgs[i+1].flags == I2C_M_RD)) { + ret = i2c->fc->i2c_request(i2c, FC_READ, msgs[i].addr, + msgs[i].buf[0], msgs[i+1].buf, + msgs[i+1].len); + i++; /* skip the following message */ + } else /* writing */ + ret = i2c->fc->i2c_request(i2c, FC_WRITE, msgs[i].addr, + msgs[i].buf[0], &msgs[i].buf[1], + msgs[i].len - 1); + if (ret < 0) { + deb_i2c("i2c master_xfer failed"); + break; + } + } + + mutex_unlock(&i2c->fc->i2c_mutex); + + if (ret == 0) + ret = num; + return ret; +} + +static u32 flexcop_i2c_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C; +} + +static struct i2c_algorithm flexcop_algo = { + .master_xfer = flexcop_master_xfer, + .functionality = flexcop_i2c_func, +}; + +int flexcop_i2c_init(struct flexcop_device *fc) +{ + int ret; + mutex_init(&fc->i2c_mutex); + + fc->fc_i2c_adap[0].fc = fc; + fc->fc_i2c_adap[1].fc = fc; + fc->fc_i2c_adap[2].fc = fc; + fc->fc_i2c_adap[0].port = FC_I2C_PORT_DEMOD; + fc->fc_i2c_adap[1].port = FC_I2C_PORT_EEPROM; + fc->fc_i2c_adap[2].port = FC_I2C_PORT_TUNER; + + strscpy(fc->fc_i2c_adap[0].i2c_adap.name, "B2C2 FlexCop I2C to demod", + sizeof(fc->fc_i2c_adap[0].i2c_adap.name)); + strscpy(fc->fc_i2c_adap[1].i2c_adap.name, "B2C2 FlexCop I2C to eeprom", + sizeof(fc->fc_i2c_adap[1].i2c_adap.name)); + strscpy(fc->fc_i2c_adap[2].i2c_adap.name, "B2C2 FlexCop I2C to tuner", + sizeof(fc->fc_i2c_adap[2].i2c_adap.name)); + + i2c_set_adapdata(&fc->fc_i2c_adap[0].i2c_adap, &fc->fc_i2c_adap[0]); + i2c_set_adapdata(&fc->fc_i2c_adap[1].i2c_adap, &fc->fc_i2c_adap[1]); + i2c_set_adapdata(&fc->fc_i2c_adap[2].i2c_adap, &fc->fc_i2c_adap[2]); + + fc->fc_i2c_adap[0].i2c_adap.algo = + fc->fc_i2c_adap[1].i2c_adap.algo = + fc->fc_i2c_adap[2].i2c_adap.algo = &flexcop_algo; + fc->fc_i2c_adap[0].i2c_adap.algo_data = + fc->fc_i2c_adap[1].i2c_adap.algo_data = + fc->fc_i2c_adap[2].i2c_adap.algo_data = NULL; + fc->fc_i2c_adap[0].i2c_adap.dev.parent = + fc->fc_i2c_adap[1].i2c_adap.dev.parent = + fc->fc_i2c_adap[2].i2c_adap.dev.parent = fc->dev; + + ret = i2c_add_adapter(&fc->fc_i2c_adap[0].i2c_adap); + if (ret < 0) + return ret; + + ret = i2c_add_adapter(&fc->fc_i2c_adap[1].i2c_adap); + if (ret < 0) + goto adap_1_failed; + + ret = i2c_add_adapter(&fc->fc_i2c_adap[2].i2c_adap); + if (ret < 0) + goto adap_2_failed; + + fc->init_state |= FC_STATE_I2C_INIT; + return 0; + +adap_2_failed: + i2c_del_adapter(&fc->fc_i2c_adap[1].i2c_adap); +adap_1_failed: + i2c_del_adapter(&fc->fc_i2c_adap[0].i2c_adap); + return ret; +} + +void flexcop_i2c_exit(struct flexcop_device *fc) +{ + if (fc->init_state & FC_STATE_I2C_INIT) { + i2c_del_adapter(&fc->fc_i2c_adap[2].i2c_adap); + i2c_del_adapter(&fc->fc_i2c_adap[1].i2c_adap); + i2c_del_adapter(&fc->fc_i2c_adap[0].i2c_adap); + } + fc->init_state &= ~FC_STATE_I2C_INIT; +} diff --git a/drivers/media/common/b2c2/flexcop-misc.c b/drivers/media/common/b2c2/flexcop-misc.c new file mode 100644 index 0000000000..83d01d3a81 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop-misc.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop-misc.c - miscellaneous functions + * see flexcop.c for copyright information + */ +#include "flexcop.h" + +void flexcop_determine_revision(struct flexcop_device *fc) +{ + flexcop_ibi_value v = fc->read_ibi_reg(fc,misc_204); + + switch (v.misc_204.Rev_N_sig_revision_hi) { + case 0x2: + deb_info("found a FlexCopII.\n"); + fc->rev = FLEXCOP_II; + break; + case 0x3: + deb_info("found a FlexCopIIb.\n"); + fc->rev = FLEXCOP_IIB; + break; + case 0x0: + deb_info("found a FlexCopIII.\n"); + fc->rev = FLEXCOP_III; + break; + default: + err("unknown FlexCop Revision: %x. Please report this to linux-dvb@linuxtv.org.", + v.misc_204.Rev_N_sig_revision_hi); + break; + } + + if ((fc->has_32_hw_pid_filter = v.misc_204.Rev_N_sig_caps)) + deb_info("this FlexCop has the additional 32 hardware pid filter.\n"); + else + deb_info("this FlexCop has the 6 basic main hardware pid filter.\n"); + /* bus parts have to decide if hw pid filtering is used or not. */ +} + +static const char *flexcop_revision_names[] = { + "Unknown chip", + "FlexCopII", + "FlexCopIIb", + "FlexCopIII", +}; + +static const char *flexcop_device_names[] = { + [FC_UNK] = "Unknown device", + [FC_CABLE] = "Cable2PC/CableStar 2 DVB-C", + [FC_AIR_DVBT] = "Air2PC/AirStar 2 DVB-T", + [FC_AIR_ATSC1] = "Air2PC/AirStar 2 ATSC 1st generation", + [FC_AIR_ATSC2] = "Air2PC/AirStar 2 ATSC 2nd generation", + [FC_AIR_ATSC3] = "Air2PC/AirStar 2 ATSC 3rd generation (HD5000)", + [FC_SKY_REV23] = "Sky2PC/SkyStar 2 DVB-S rev 2.3 (old version)", + [FC_SKY_REV26] = "Sky2PC/SkyStar 2 DVB-S rev 2.6", + [FC_SKY_REV27] = "Sky2PC/SkyStar 2 DVB-S rev 2.7a/u", + [FC_SKY_REV28] = "Sky2PC/SkyStar 2 DVB-S rev 2.8", + [FC_SKYS2_REV33] = "Sky2PC/SkyStar S2 DVB-S/S2 rev 3.3", +}; + +static const char *flexcop_bus_names[] = { + "USB", + "PCI", +}; + +void flexcop_device_name(struct flexcop_device *fc, + const char *prefix, const char *suffix) +{ + info("%s '%s' at the '%s' bus controlled by a '%s' %s", + prefix, flexcop_device_names[fc->dev_type], + flexcop_bus_names[fc->bus_type], + flexcop_revision_names[fc->rev], suffix); +} + +void flexcop_dump_reg(struct flexcop_device *fc, + flexcop_ibi_register reg, int num) +{ + flexcop_ibi_value v; + int i; + for (i = 0; i < num; i++) { + v = fc->read_ibi_reg(fc, reg+4*i); + deb_rdump("0x%03x: %08x, ", reg+4*i, v.raw); + } + deb_rdump("\n"); +} +EXPORT_SYMBOL(flexcop_dump_reg); diff --git a/drivers/media/common/b2c2/flexcop-reg.h b/drivers/media/common/b2c2/flexcop-reg.h new file mode 100644 index 0000000000..dd7c962db5 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop-reg.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop-reg.h - register abstraction for FlexCopII, FlexCopIIb and FlexCopIII + * see flexcop.c for copyright information + */ +#ifndef __FLEXCOP_REG_H__ +#define __FLEXCOP_REG_H__ + +typedef enum { + FLEXCOP_UNK = 0, + FLEXCOP_II, + FLEXCOP_IIB, + FLEXCOP_III, +} flexcop_revision_t; + +typedef enum { + FC_UNK = 0, + FC_CABLE, + FC_AIR_DVBT, + FC_AIR_ATSC1, + FC_AIR_ATSC2, + FC_AIR_ATSC3, + FC_SKY_REV23, + FC_SKY_REV26, + FC_SKY_REV27, + FC_SKY_REV28, + FC_SKYS2_REV33, +} flexcop_device_type_t; + +typedef enum { + FC_USB = 0, + FC_PCI, +} flexcop_bus_t; + +/* FlexCop IBI Registers */ +#if defined(__LITTLE_ENDIAN) +#include "flexcop_ibi_value_le.h" +#else +#if defined(__BIG_ENDIAN) +#include "flexcop_ibi_value_be.h" +#else +#error no endian defined +#endif +#endif + +#define fc_data_Tag_ID_DVB 0x3e +#define fc_data_Tag_ID_ATSC 0x3f +#define fc_data_Tag_ID_IDSB 0x8b + +#define fc_key_code_default 0x1 +#define fc_key_code_even 0x2 +#define fc_key_code_odd 0x3 + +extern flexcop_ibi_value ibi_zero; + +typedef enum { + FC_I2C_PORT_DEMOD = 1, + FC_I2C_PORT_EEPROM = 2, + FC_I2C_PORT_TUNER = 3, +} flexcop_i2c_port_t; + +typedef enum { + FC_WRITE = 0, + FC_READ = 1, +} flexcop_access_op_t; + +typedef enum { + FC_SRAM_DEST_NET = 1, + FC_SRAM_DEST_CAI = 2, + FC_SRAM_DEST_CAO = 4, + FC_SRAM_DEST_MEDIA = 8 +} flexcop_sram_dest_t; + +typedef enum { + FC_SRAM_DEST_TARGET_WAN_USB = 0, + FC_SRAM_DEST_TARGET_DMA1 = 1, + FC_SRAM_DEST_TARGET_DMA2 = 2, + FC_SRAM_DEST_TARGET_FC3_CA = 3 +} flexcop_sram_dest_target_t; + +typedef enum { + FC_SRAM_2_32KB = 0, /* 64KB */ + FC_SRAM_1_32KB = 1, /* 32KB - default fow FCII */ + FC_SRAM_1_128KB = 2, /* 128KB */ + FC_SRAM_1_48KB = 3, /* 48KB - default for FCIII */ +} flexcop_sram_type_t; + +typedef enum { + FC_WAN_SPEED_4MBITS = 0, + FC_WAN_SPEED_8MBITS = 1, + FC_WAN_SPEED_12MBITS = 2, + FC_WAN_SPEED_16MBITS = 3, +} flexcop_wan_speed_t; + +typedef enum { + FC_DMA_1 = 1, + FC_DMA_2 = 2, +} flexcop_dma_index_t; + +typedef enum { + FC_DMA_SUBADDR_0 = 1, + FC_DMA_SUBADDR_1 = 2, +} flexcop_dma_addr_index_t; + +/* names of the particular registers */ +typedef enum { + dma1_000 = 0x000, + dma1_004 = 0x004, + dma1_008 = 0x008, + dma1_00c = 0x00c, + dma2_010 = 0x010, + dma2_014 = 0x014, + dma2_018 = 0x018, + dma2_01c = 0x01c, + + tw_sm_c_100 = 0x100, + tw_sm_c_104 = 0x104, + tw_sm_c_108 = 0x108, + tw_sm_c_10c = 0x10c, + tw_sm_c_110 = 0x110, + + lnb_switch_freq_200 = 0x200, + misc_204 = 0x204, + ctrl_208 = 0x208, + irq_20c = 0x20c, + sw_reset_210 = 0x210, + misc_214 = 0x214, + mbox_v8_to_host_218 = 0x218, + mbox_host_to_v8_21c = 0x21c, + + pid_filter_300 = 0x300, + pid_filter_304 = 0x304, + pid_filter_308 = 0x308, + pid_filter_30c = 0x30c, + index_reg_310 = 0x310, + pid_n_reg_314 = 0x314, + mac_low_reg_318 = 0x318, + mac_high_reg_31c = 0x31c, + + data_tag_400 = 0x400, + card_id_408 = 0x408, + card_id_40c = 0x40c, + mac_address_418 = 0x418, + mac_address_41c = 0x41c, + + ci_600 = 0x600, + pi_604 = 0x604, + pi_608 = 0x608, + dvb_reg_60c = 0x60c, + + sram_ctrl_reg_700 = 0x700, + net_buf_reg_704 = 0x704, + cai_buf_reg_708 = 0x708, + cao_buf_reg_70c = 0x70c, + media_buf_reg_710 = 0x710, + sram_dest_reg_714 = 0x714, + net_buf_reg_718 = 0x718, + wan_ctrl_reg_71c = 0x71c, +} flexcop_ibi_register; + +#define flexcop_set_ibi_value(reg,attr,val) { \ + flexcop_ibi_value v = fc->read_ibi_reg(fc,reg); \ + v.reg.attr = val; \ + fc->write_ibi_reg(fc,reg,v); \ +} + +#endif diff --git a/drivers/media/common/b2c2/flexcop-sram.c b/drivers/media/common/b2c2/flexcop-sram.c new file mode 100644 index 0000000000..d97962eb01 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop-sram.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop-sram.c - functions for controlling the SRAM + * see flexcop.c for copyright information + */ +#include "flexcop.h" + +static void flexcop_sram_set_chip(struct flexcop_device *fc, + flexcop_sram_type_t type) +{ + flexcop_set_ibi_value(wan_ctrl_reg_71c, sram_chip, type); +} + +int flexcop_sram_init(struct flexcop_device *fc) +{ + switch (fc->rev) { + case FLEXCOP_II: + case FLEXCOP_IIB: + flexcop_sram_set_chip(fc, FC_SRAM_1_32KB); + break; + case FLEXCOP_III: + flexcop_sram_set_chip(fc, FC_SRAM_1_48KB); + break; + default: + return -EINVAL; + } + return 0; +} + +int flexcop_sram_set_dest(struct flexcop_device *fc, flexcop_sram_dest_t dest, + flexcop_sram_dest_target_t target) +{ + flexcop_ibi_value v; + v = fc->read_ibi_reg(fc, sram_dest_reg_714); + + if (fc->rev != FLEXCOP_III && target == FC_SRAM_DEST_TARGET_FC3_CA) { + err("SRAM destination target to available on FlexCopII(b)\n"); + return -EINVAL; + } + deb_sram("sram dest: %x target: %x\n", dest, target); + + if (dest & FC_SRAM_DEST_NET) + v.sram_dest_reg_714.NET_Dest = target; + if (dest & FC_SRAM_DEST_CAI) + v.sram_dest_reg_714.CAI_Dest = target; + if (dest & FC_SRAM_DEST_CAO) + v.sram_dest_reg_714.CAO_Dest = target; + if (dest & FC_SRAM_DEST_MEDIA) + v.sram_dest_reg_714.MEDIA_Dest = target; + + fc->write_ibi_reg(fc,sram_dest_reg_714,v); + udelay(1000); /* TODO delay really necessary */ + + return 0; +} +EXPORT_SYMBOL(flexcop_sram_set_dest); + +void flexcop_wan_set_speed(struct flexcop_device *fc, flexcop_wan_speed_t s) +{ + flexcop_set_ibi_value(wan_ctrl_reg_71c,wan_speed_sig,s); +} +EXPORT_SYMBOL(flexcop_wan_set_speed); + +void flexcop_sram_ctrl(struct flexcop_device *fc, int usb_wan, int sramdma, int maximumfill) +{ + flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); + v.sram_dest_reg_714.ctrl_usb_wan = usb_wan; + v.sram_dest_reg_714.ctrl_sramdma = sramdma; + v.sram_dest_reg_714.ctrl_maximumfill = maximumfill; + fc->write_ibi_reg(fc,sram_dest_reg_714,v); +} +EXPORT_SYMBOL(flexcop_sram_ctrl); + +#if 0 +static void flexcop_sram_write(struct adapter *adapter, u32 bank, u32 addr, u8 *buf, u32 len) +{ + int i, retries; + u32 command; + + for (i = 0; i < len; i++) { + command = bank | addr | 0x04000000 | (*buf << 0x10); + + retries = 2; + + while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) { + mdelay(1); + retries--; + } + + if (retries == 0) + printk("%s: SRAM timeout\n", __func__); + + write_reg_dw(adapter, 0x700, command); + + buf++; + addr++; + } +} + +static void flex_sram_read(struct adapter *adapter, u32 bank, u32 addr, u8 *buf, u32 len) +{ + int i, retries; + u32 command, value; + + for (i = 0; i < len; i++) { + command = bank | addr | 0x04008000; + + retries = 10000; + + while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) { + mdelay(1); + retries--; + } + + if (retries == 0) + printk("%s: SRAM timeout\n", __func__); + + write_reg_dw(adapter, 0x700, command); + + retries = 10000; + + while (((read_reg_dw(adapter, 0x700) & 0x80000000) != 0) && (retries > 0)) { + mdelay(1); + retries--; + } + + if (retries == 0) + printk("%s: SRAM timeout\n", __func__); + + value = read_reg_dw(adapter, 0x700) >> 0x10; + + *buf = (value & 0xff); + + addr++; + buf++; + } +} + +static void sram_write_chunk(struct adapter *adapter, u32 addr, u8 *buf, u16 len) +{ + u32 bank; + + bank = 0; + + if (adapter->dw_sram_type == 0x20000) { + bank = (addr & 0x18000) << 0x0d; + } + + if (adapter->dw_sram_type == 0x00000) { + if ((addr >> 0x0f) == 0) + bank = 0x20000000; + else + bank = 0x10000000; + } + flex_sram_write(adapter, bank, addr & 0x7fff, buf, len); +} + +static void sram_read_chunk(struct adapter *adapter, u32 addr, u8 *buf, u16 len) +{ + u32 bank; + bank = 0; + + if (adapter->dw_sram_type == 0x20000) { + bank = (addr & 0x18000) << 0x0d; + } + + if (adapter->dw_sram_type == 0x00000) { + if ((addr >> 0x0f) == 0) + bank = 0x20000000; + else + bank = 0x10000000; + } + flex_sram_read(adapter, bank, addr & 0x7fff, buf, len); +} + +static void sram_read(struct adapter *adapter, u32 addr, u8 *buf, u32 len) +{ + u32 length; + while (len != 0) { + length = len; + /* check if the address range belongs to the same + * 32K memory chip. If not, the data is read + * from one chip at a time */ + if ((addr >> 0x0f) != ((addr + len - 1) >> 0x0f)) { + length = (((addr >> 0x0f) + 1) << 0x0f) - addr; + } + + sram_read_chunk(adapter, addr, buf, length); + addr = addr + length; + buf = buf + length; + len = len - length; + } +} + +static void sram_write(struct adapter *adapter, u32 addr, u8 *buf, u32 len) +{ + u32 length; + while (len != 0) { + length = len; + + /* check if the address range belongs to the same + * 32K memory chip. If not, the data is + * written to one chip at a time */ + if ((addr >> 0x0f) != ((addr + len - 1) >> 0x0f)) { + length = (((addr >> 0x0f) + 1) << 0x0f) - addr; + } + + sram_write_chunk(adapter, addr, buf, length); + addr = addr + length; + buf = buf + length; + len = len - length; + } +} + +static void sram_set_size(struct adapter *adapter, u32 mask) +{ + write_reg_dw(adapter, 0x71c, + (mask | (~0x30000 & read_reg_dw(adapter, 0x71c)))); +} + +static void sram_init(struct adapter *adapter) +{ + u32 tmp; + tmp = read_reg_dw(adapter, 0x71c); + write_reg_dw(adapter, 0x71c, 1); + + if (read_reg_dw(adapter, 0x71c) != 0) { + write_reg_dw(adapter, 0x71c, tmp); + adapter->dw_sram_type = tmp & 0x30000; + ddprintk("%s: dw_sram_type = %x\n", __func__, adapter->dw_sram_type); + } else { + adapter->dw_sram_type = 0x10000; + ddprintk("%s: dw_sram_type = %x\n", __func__, adapter->dw_sram_type); + } +} + +static int sram_test_location(struct adapter *adapter, u32 mask, u32 addr) +{ + u8 tmp1, tmp2; + dprintk("%s: mask = %x, addr = %x\n", __func__, mask, addr); + + sram_set_size(adapter, mask); + sram_init(adapter); + + tmp2 = 0xa5; + tmp1 = 0x4f; + + sram_write(adapter, addr, &tmp2, 1); + sram_write(adapter, addr + 4, &tmp1, 1); + + tmp2 = 0; + mdelay(20); + + sram_read(adapter, addr, &tmp2, 1); + sram_read(adapter, addr, &tmp2, 1); + + dprintk("%s: wrote 0xa5, read 0x%2x\n", __func__, tmp2); + + if (tmp2 != 0xa5) + return 0; + + tmp2 = 0x5a; + tmp1 = 0xf4; + + sram_write(adapter, addr, &tmp2, 1); + sram_write(adapter, addr + 4, &tmp1, 1); + + tmp2 = 0; + mdelay(20); + + sram_read(adapter, addr, &tmp2, 1); + sram_read(adapter, addr, &tmp2, 1); + + dprintk("%s: wrote 0x5a, read 0x%2x\n", __func__, tmp2); + + if (tmp2 != 0x5a) + return 0; + return 1; +} + +static u32 sram_length(struct adapter *adapter) +{ + if (adapter->dw_sram_type == 0x10000) + return 32768; /* 32K */ + if (adapter->dw_sram_type == 0x00000) + return 65536; /* 64K */ + if (adapter->dw_sram_type == 0x20000) + return 131072; /* 128K */ + return 32768; /* 32K */ +} + +/* FlexcopII can work with 32K, 64K or 128K of external SRAM memory. + - for 128K there are 4x32K chips at bank 0,1,2,3. + - for 64K there are 2x32K chips at bank 1,2. + - for 32K there is one 32K chip at bank 0. + + FlexCop works only with one bank at a time. The bank is selected + by bits 28-29 of the 0x700 register. + + bank 0 covers addresses 0x00000-0x07fff + bank 1 covers addresses 0x08000-0x0ffff + bank 2 covers addresses 0x10000-0x17fff + bank 3 covers addresses 0x18000-0x1ffff */ + +static int flexcop_sram_detect(struct flexcop_device *fc) +{ + flexcop_ibi_value r208, r71c_0, vr71c_1; + r208 = fc->read_ibi_reg(fc, ctrl_208); + fc->write_ibi_reg(fc, ctrl_208, ibi_zero); + + r71c_0 = fc->read_ibi_reg(fc, wan_ctrl_reg_71c); + write_reg_dw(adapter, 0x71c, 1); + tmp3 = read_reg_dw(adapter, 0x71c); + dprintk("%s: tmp3 = %x\n", __func__, tmp3); + write_reg_dw(adapter, 0x71c, tmp2); + + // check for internal SRAM ??? + tmp3--; + if (tmp3 != 0) { + sram_set_size(adapter, 0x10000); + sram_init(adapter); + write_reg_dw(adapter, 0x208, tmp); + dprintk("%s: sram size = 32K\n", __func__); + return 32; + } + + if (sram_test_location(adapter, 0x20000, 0x18000) != 0) { + sram_set_size(adapter, 0x20000); + sram_init(adapter); + write_reg_dw(adapter, 0x208, tmp); + dprintk("%s: sram size = 128K\n", __func__); + return 128; + } + + if (sram_test_location(adapter, 0x00000, 0x10000) != 0) { + sram_set_size(adapter, 0x00000); + sram_init(adapter); + write_reg_dw(adapter, 0x208, tmp); + dprintk("%s: sram size = 64K\n", __func__); + return 64; + } + + if (sram_test_location(adapter, 0x10000, 0x00000) != 0) { + sram_set_size(adapter, 0x10000); + sram_init(adapter); + write_reg_dw(adapter, 0x208, tmp); + dprintk("%s: sram size = 32K\n", __func__); + return 32; + } + + sram_set_size(adapter, 0x10000); + sram_init(adapter); + write_reg_dw(adapter, 0x208, tmp); + dprintk("%s: SRAM detection failed. Set to 32K \n", __func__); + return 0; +} + +static void sll_detect_sram_size(struct adapter *adapter) +{ + sram_detect_for_flex2(adapter); +} + +#endif diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c new file mode 100644 index 0000000000..e7a88a2d24 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: LGPL-2.1-or-later +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop.c - main module part + * Copyright (C) 2004-9 Patrick Boettcher + * based on skystar2-driver Copyright (C) 2003 Vadim Catana, skystar@moldova.cc + * + * Acknowledgements: + * John Jurrius from BBTI, Inc. for extensive support + * with code examples and data books + * Bjarne Steinsbo, bjarne at steinsbo.com (some ideas for rewriting) + * + * Contributions to the skystar2-driver have been done by + * Vincenzo Di Massa, hawk.it at tiscalinet.it (several DiSEqC fixes) + * Roberto Ragusa, r.ragusa at libero.it (polishing, restyling the code) + * Uwe Bugla, uwe.bugla at gmx.de (doing tests, restyling code, writing docu) + * Niklas Peinecke, peinecke at gdv.uni-hannover.de (hardware pid/mac + * filtering) + */ + +#include "flexcop.h" + +#define DRIVER_NAME "B2C2 FlexcopII/II(b)/III digital TV receiver chip" +#define DRIVER_AUTHOR "Patrick Boettcher demux->priv; + return flexcop_pid_feed_control(fc, dvbdmxfeed, 1); +} + +static int flexcop_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed) +{ + struct flexcop_device *fc = dvbdmxfeed->demux->priv; + return flexcop_pid_feed_control(fc, dvbdmxfeed, 0); +} + +static int flexcop_dvb_init(struct flexcop_device *fc) +{ + int ret = dvb_register_adapter(&fc->dvb_adapter, + "FlexCop Digital TV device", fc->owner, + fc->dev, adapter_nr); + if (ret < 0) { + err("error registering DVB adapter"); + return ret; + } + fc->dvb_adapter.priv = fc; + + fc->demux.dmx.capabilities = (DMX_TS_FILTERING | DMX_SECTION_FILTERING + | DMX_MEMORY_BASED_FILTERING); + fc->demux.priv = fc; + fc->demux.filternum = fc->demux.feednum = FC_MAX_FEED; + fc->demux.start_feed = flexcop_dvb_start_feed; + fc->demux.stop_feed = flexcop_dvb_stop_feed; + fc->demux.write_to_decoder = NULL; + + ret = dvb_dmx_init(&fc->demux); + if (ret < 0) { + err("dvb_dmx failed: error %d", ret); + goto err_dmx; + } + + fc->hw_frontend.source = DMX_FRONTEND_0; + + fc->dmxdev.filternum = fc->demux.feednum; + fc->dmxdev.demux = &fc->demux.dmx; + fc->dmxdev.capabilities = 0; + ret = dvb_dmxdev_init(&fc->dmxdev, &fc->dvb_adapter); + if (ret < 0) { + err("dvb_dmxdev_init failed: error %d", ret); + goto err_dmx_dev; + } + + ret = fc->demux.dmx.add_frontend(&fc->demux.dmx, &fc->hw_frontend); + if (ret < 0) { + err("adding hw_frontend to dmx failed: error %d", ret); + goto err_dmx_add_hw_frontend; + } + + fc->mem_frontend.source = DMX_MEMORY_FE; + ret = fc->demux.dmx.add_frontend(&fc->demux.dmx, &fc->mem_frontend); + if (ret < 0) { + err("adding mem_frontend to dmx failed: error %d", ret); + goto err_dmx_add_mem_frontend; + } + + ret = fc->demux.dmx.connect_frontend(&fc->demux.dmx, &fc->hw_frontend); + if (ret < 0) { + err("connect frontend failed: error %d", ret); + goto err_connect_frontend; + } + + ret = dvb_net_init(&fc->dvb_adapter, &fc->dvbnet, &fc->demux.dmx); + if (ret < 0) { + err("dvb_net_init failed: error %d", ret); + goto err_net; + } + + fc->init_state |= FC_STATE_DVB_INIT; + return 0; + +err_net: + fc->demux.dmx.disconnect_frontend(&fc->demux.dmx); +err_connect_frontend: + fc->demux.dmx.remove_frontend(&fc->demux.dmx, &fc->mem_frontend); +err_dmx_add_mem_frontend: + fc->demux.dmx.remove_frontend(&fc->demux.dmx, &fc->hw_frontend); +err_dmx_add_hw_frontend: + dvb_dmxdev_release(&fc->dmxdev); +err_dmx_dev: + dvb_dmx_release(&fc->demux); +err_dmx: + dvb_unregister_adapter(&fc->dvb_adapter); + return ret; +} + +static void flexcop_dvb_exit(struct flexcop_device *fc) +{ + if (fc->init_state & FC_STATE_DVB_INIT) { + dvb_net_release(&fc->dvbnet); + + fc->demux.dmx.close(&fc->demux.dmx); + fc->demux.dmx.remove_frontend(&fc->demux.dmx, + &fc->mem_frontend); + fc->demux.dmx.remove_frontend(&fc->demux.dmx, + &fc->hw_frontend); + dvb_dmxdev_release(&fc->dmxdev); + dvb_dmx_release(&fc->demux); + dvb_unregister_adapter(&fc->dvb_adapter); + deb_info("deinitialized dvb stuff\n"); + } + fc->init_state &= ~FC_STATE_DVB_INIT; +} + +/* these methods are necessary to achieve the long-term-goal of hiding the + * struct flexcop_device from the bus-parts */ +void flexcop_pass_dmx_data(struct flexcop_device *fc, u8 *buf, u32 len) +{ + dvb_dmx_swfilter(&fc->demux, buf, len); +} +EXPORT_SYMBOL(flexcop_pass_dmx_data); + +void flexcop_pass_dmx_packets(struct flexcop_device *fc, u8 *buf, u32 no) +{ + dvb_dmx_swfilter_packets(&fc->demux, buf, no); +} +EXPORT_SYMBOL(flexcop_pass_dmx_packets); + +static void flexcop_reset(struct flexcop_device *fc) +{ + flexcop_ibi_value v210, v204; + + /* reset the flexcop itself */ + fc->write_ibi_reg(fc,ctrl_208,ibi_zero); + + v210.raw = 0; + v210.sw_reset_210.reset_block_000 = 1; + v210.sw_reset_210.reset_block_100 = 1; + v210.sw_reset_210.reset_block_200 = 1; + v210.sw_reset_210.reset_block_300 = 1; + v210.sw_reset_210.reset_block_400 = 1; + v210.sw_reset_210.reset_block_500 = 1; + v210.sw_reset_210.reset_block_600 = 1; + v210.sw_reset_210.reset_block_700 = 1; + v210.sw_reset_210.Block_reset_enable = 0xb2; + v210.sw_reset_210.Special_controls = 0xc259; + fc->write_ibi_reg(fc,sw_reset_210,v210); + msleep(1); + + /* reset the periphical devices */ + + v204 = fc->read_ibi_reg(fc,misc_204); + v204.misc_204.Per_reset_sig = 0; + fc->write_ibi_reg(fc,misc_204,v204); + msleep(1); + v204.misc_204.Per_reset_sig = 1; + fc->write_ibi_reg(fc,misc_204,v204); +} + +void flexcop_reset_block_300(struct flexcop_device *fc) +{ + flexcop_ibi_value v208_save = fc->read_ibi_reg(fc, ctrl_208), + v210 = fc->read_ibi_reg(fc, sw_reset_210); + + deb_rdump("208: %08x, 210: %08x\n", v208_save.raw, v210.raw); + fc->write_ibi_reg(fc,ctrl_208,ibi_zero); + + v210.sw_reset_210.reset_block_300 = 1; + v210.sw_reset_210.Block_reset_enable = 0xb2; + + fc->write_ibi_reg(fc,sw_reset_210,v210); + fc->write_ibi_reg(fc,ctrl_208,v208_save); +} + +struct flexcop_device *flexcop_device_kmalloc(size_t bus_specific_len) +{ + void *bus; + struct flexcop_device *fc = kzalloc(sizeof(struct flexcop_device), + GFP_KERNEL); + if (!fc) { + err("no memory"); + return NULL; + } + + bus = kzalloc(bus_specific_len, GFP_KERNEL); + if (!bus) { + err("no memory"); + kfree(fc); + return NULL; + } + + fc->bus_specific = bus; + + return fc; +} +EXPORT_SYMBOL(flexcop_device_kmalloc); + +void flexcop_device_kfree(struct flexcop_device *fc) +{ + kfree(fc->bus_specific); + kfree(fc); +} +EXPORT_SYMBOL(flexcop_device_kfree); + +int flexcop_device_initialize(struct flexcop_device *fc) +{ + int ret; + ibi_zero.raw = 0; + + flexcop_reset(fc); + flexcop_determine_revision(fc); + flexcop_sram_init(fc); + flexcop_hw_filter_init(fc); + flexcop_smc_ctrl(fc, 0); + + ret = flexcop_dvb_init(fc); + if (ret) + goto error; + + /* i2c has to be done before doing EEProm stuff - + * because the EEProm is accessed via i2c */ + ret = flexcop_i2c_init(fc); + if (ret) + goto error; + + /* do the MAC address reading after initializing the dvb_adapter */ + if (fc->get_mac_addr(fc, 0) == 0) { + u8 *b = fc->dvb_adapter.proposed_mac; + info("MAC address = %pM", b); + flexcop_set_mac_filter(fc,b); + flexcop_mac_filter_ctrl(fc,1); + } else + warn("reading of MAC address failed.\n"); + + ret = flexcop_frontend_init(fc); + if (ret) + goto error; + + flexcop_device_name(fc,"initialization of","complete"); + return 0; + +error: + flexcop_device_exit(fc); + return ret; +} +EXPORT_SYMBOL(flexcop_device_initialize); + +void flexcop_device_exit(struct flexcop_device *fc) +{ + flexcop_frontend_exit(fc); + flexcop_i2c_exit(fc); + flexcop_dvb_exit(fc); +} +EXPORT_SYMBOL(flexcop_device_exit); + +static int flexcop_module_init(void) +{ + info(DRIVER_NAME " loaded successfully"); + return 0; +} + +static void flexcop_module_cleanup(void) +{ + info(DRIVER_NAME " unloaded successfully"); +} + +module_init(flexcop_module_init); +module_exit(flexcop_module_cleanup); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_NAME); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/common/b2c2/flexcop.h b/drivers/media/common/b2c2/flexcop.h new file mode 100644 index 0000000000..05e595f896 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * flexcop.h - private header file for all flexcop-chip-source files + * see flexcop.c for copyright information + */ +#ifndef __FLEXCOP_H__ +#define __FLEXCOP_H__ + +#define FC_LOG_PREFIX "b2c2-flexcop" +#include "flexcop-common.h" + +extern int b2c2_flexcop_debug; + +/* debug */ +#ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG +#define dprintk(level, args...) \ + do { if ((b2c2_flexcop_debug & (level))) printk(args); } while (0) +#else +#define dprintk(level, args...) no_printk(args) +#endif + +#define deb_info(args...) dprintk(0x01, args) +#define deb_tuner(args...) dprintk(0x02, args) +#define deb_i2c(args...) dprintk(0x04, args) +#define deb_ts(args...) dprintk(0x08, args) +#define deb_sram(args...) dprintk(0x10, args) +#define deb_rdump(args...) dprintk(0x20, args) +#define deb_i2c_dump(args...) dprintk(0x40, args) + +#endif diff --git a/drivers/media/common/b2c2/flexcop_ibi_value_be.h b/drivers/media/common/b2c2/flexcop_ibi_value_be.h new file mode 100644 index 0000000000..c97a0d6d7b --- /dev/null +++ b/drivers/media/common/b2c2/flexcop_ibi_value_be.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * register descriptions + * see flexcop.c for copyright information + */ +/* This file is automatically generated, do not edit things here. */ +#ifndef __FLEXCOP_IBI_VALUE_INCLUDED__ +#define __FLEXCOP_IBI_VALUE_INCLUDED__ + +typedef union { + u32 raw; + + struct { + u32 dma_address0 :30; + u32 dma_0No_update : 1; + u32 dma_0start : 1; + } dma_0x0; + + struct { + u32 dma_addr_size :24; + u32 DMA_maxpackets : 8; + } dma_0x4_remap; + + struct { + u32 dma_addr_size :24; + u32 unused : 1; + u32 dma1timer : 7; + } dma_0x4_read; + + struct { + u32 dma_addr_size :24; + u32 dmatimer : 7; + u32 unused : 1; + } dma_0x4_write; + + struct { + u32 dma_cur_addr :30; + u32 unused : 2; + } dma_0x8; + + struct { + u32 dma_address1 :30; + u32 remap_enable : 1; + u32 dma_1start : 1; + } dma_0xc; + + struct { + u32 st_done : 1; + u32 no_base_addr_ack_error : 1; + u32 twoWS_port_reg : 2; + u32 total_bytes : 2; + u32 twoWS_rw : 1; + u32 working_start : 1; + u32 data1_reg : 8; + u32 baseaddr : 8; + u32 reserved1 : 1; + u32 chipaddr : 7; + } tw_sm_c_100; + + struct { + u32 unused : 6; + u32 force_stop : 1; + u32 exlicit_stops : 1; + u32 data4_reg : 8; + u32 data3_reg : 8; + u32 data2_reg : 8; + } tw_sm_c_104; + + struct { + u32 reserved2 :19; + u32 tlo1 : 5; + u32 reserved1 : 2; + u32 thi1 : 6; + } tw_sm_c_108; + + struct { + u32 reserved2 :19; + u32 tlo1 : 5; + u32 reserved1 : 2; + u32 thi1 : 6; + } tw_sm_c_10c; + + struct { + u32 reserved2 :19; + u32 tlo1 : 5; + u32 reserved1 : 2; + u32 thi1 : 6; + } tw_sm_c_110; + + struct { + u32 LNB_CTLPrescaler_sig : 2; + u32 LNB_CTLLowCount_sig :15; + u32 LNB_CTLHighCount_sig :15; + } lnb_switch_freq_200; + + struct { + u32 Rev_N_sig_reserved2 : 1; + u32 Rev_N_sig_caps : 1; + u32 Rev_N_sig_reserved1 : 2; + u32 Rev_N_sig_revision_hi : 4; + u32 reserved :20; + u32 Per_reset_sig : 1; + u32 LNB_L_H_sig : 1; + u32 ACPI3_sig : 1; + u32 ACPI1_sig : 1; + } misc_204; + + struct { + u32 unused : 9; + u32 Mailbox_from_V8_Enable_sig : 1; + u32 DMA2_Size_IRQ_Enable_sig : 1; + u32 DMA1_Size_IRQ_Enable_sig : 1; + u32 DMA2_Timer_Enable_sig : 1; + u32 DMA2_IRQ_Enable_sig : 1; + u32 DMA1_Timer_Enable_sig : 1; + u32 DMA1_IRQ_Enable_sig : 1; + u32 Rcv_Data_sig : 1; + u32 MAC_filter_Mode_sig : 1; + u32 Multi2_Enable_sig : 1; + u32 Per_CA_Enable_sig : 1; + u32 SMC_Enable_sig : 1; + u32 CA_Enable_sig : 1; + u32 WAN_CA_Enable_sig : 1; + u32 WAN_Enable_sig : 1; + u32 Mask_filter_sig : 1; + u32 Null_filter_sig : 1; + u32 ECM_filter_sig : 1; + u32 EMM_filter_sig : 1; + u32 PMT_filter_sig : 1; + u32 PCR_filter_sig : 1; + u32 Stream2_filter_sig : 1; + u32 Stream1_filter_sig : 1; + } ctrl_208; + + struct { + u32 reserved :21; + u32 Transport_Error : 1; + u32 LLC_SNAP_FLAG_set : 1; + u32 Continuity_error_flag : 1; + u32 Data_receiver_error : 1; + u32 Mailbox_from_V8_Status_sig : 1; + u32 DMA2_Size_IRQ_Status : 1; + u32 DMA1_Size_IRQ_Status : 1; + u32 DMA2_Timer_Status : 1; + u32 DMA2_IRQ_Status : 1; + u32 DMA1_Timer_Status : 1; + u32 DMA1_IRQ_Status : 1; + } irq_20c; + + struct { + u32 Special_controls :16; + u32 Block_reset_enable : 8; + u32 reset_block_700 : 1; + u32 reset_block_600 : 1; + u32 reset_block_500 : 1; + u32 reset_block_400 : 1; + u32 reset_block_300 : 1; + u32 reset_block_200 : 1; + u32 reset_block_100 : 1; + u32 reset_block_000 : 1; + } sw_reset_210; + + struct { + u32 unused2 :20; + u32 polarity_PS_ERR_sig : 1; + u32 polarity_PS_SYNC_sig : 1; + u32 polarity_PS_VALID_sig : 1; + u32 polarity_PS_CLK_sig : 1; + u32 unused1 : 3; + u32 s2p_sel_sig : 1; + u32 section_pkg_enable_sig : 1; + u32 halt_V8_sig : 1; + u32 v2WS_oe_sig : 1; + u32 vuart_oe_sig : 1; + } misc_214; + + struct { + u32 Mailbox_from_V8 :32; + } mbox_v8_to_host_218; + + struct { + u32 sysramaccess_busmuster : 1; + u32 sysramaccess_write : 1; + u32 unused : 7; + u32 sysramaccess_addr :15; + u32 sysramaccess_data : 8; + } mbox_host_to_v8_21c; + + struct { + u32 debug_fifo_problem : 1; + u32 debug_flag_write_status00 : 1; + u32 Stream2_trans : 1; + u32 Stream2_PID :13; + u32 debug_flag_pid_saved : 1; + u32 MAC_Multicast_filter : 1; + u32 Stream1_trans : 1; + u32 Stream1_PID :13; + } pid_filter_300; + + struct { + u32 reserved : 2; + u32 PMT_trans : 1; + u32 PMT_PID :13; + u32 debug_overrun2 : 1; + u32 debug_overrun3 : 1; + u32 PCR_trans : 1; + u32 PCR_PID :13; + } pid_filter_304; + + struct { + u32 reserved : 2; + u32 ECM_trans : 1; + u32 ECM_PID :13; + u32 EMM_filter_6 : 1; + u32 EMM_filter_4 : 1; + u32 EMM_trans : 1; + u32 EMM_PID :13; + } pid_filter_308; + + struct { + u32 unused2 : 3; + u32 Group_mask :13; + u32 unused1 : 2; + u32 Group_trans : 1; + u32 Group_PID :13; + } pid_filter_30c_ext_ind_0_7; + + struct { + u32 unused :15; + u32 net_master_read :17; + } pid_filter_30c_ext_ind_1; + + struct { + u32 unused :15; + u32 net_master_write :17; + } pid_filter_30c_ext_ind_2; + + struct { + u32 unused :15; + u32 next_net_master_write :17; + } pid_filter_30c_ext_ind_3; + + struct { + u32 reserved2 : 5; + u32 stack_read :10; + u32 reserved1 : 6; + u32 state_write :10; + u32 unused1 : 1; + } pid_filter_30c_ext_ind_4; + + struct { + u32 unused :22; + u32 stack_cnt :10; + } pid_filter_30c_ext_ind_5; + + struct { + u32 unused : 4; + u32 data_size_reg :12; + u32 write_status4 : 2; + u32 write_status1 : 2; + u32 pid_fsm_save_reg300 : 2; + u32 pid_fsm_save_reg4 : 2; + u32 pid_fsm_save_reg3 : 2; + u32 pid_fsm_save_reg2 : 2; + u32 pid_fsm_save_reg1 : 2; + u32 pid_fsm_save_reg0 : 2; + } pid_filter_30c_ext_ind_6; + + struct { + u32 unused :22; + u32 pass_alltables : 1; + u32 AB_select : 1; + u32 extra_index_reg : 3; + u32 index_reg : 5; + } index_reg_310; + + struct { + u32 reserved :17; + u32 PID_enable_bit : 1; + u32 PID_trans : 1; + u32 PID :13; + } pid_n_reg_314; + + struct { + u32 reserved : 6; + u32 HighAB_bit : 1; + u32 Enable_bit : 1; + u32 A6_byte : 8; + u32 A5_byte : 8; + u32 A4_byte : 8; + } mac_low_reg_318; + + struct { + u32 reserved : 8; + u32 A3_byte : 8; + u32 A2_byte : 8; + u32 A1_byte : 8; + } mac_high_reg_31c; + + struct { + u32 data_Tag_ID :16; + u32 reserved :16; + } data_tag_400; + + struct { + u32 Card_IDbyte3 : 8; + u32 Card_IDbyte4 : 8; + u32 Card_IDbyte5 : 8; + u32 Card_IDbyte6 : 8; + } card_id_408; + + struct { + u32 Card_IDbyte1 : 8; + u32 Card_IDbyte2 : 8; + } card_id_40c; + + struct { + u32 MAC6 : 8; + u32 MAC3 : 8; + u32 MAC2 : 8; + u32 MAC1 : 8; + } mac_address_418; + + struct { + u32 reserved :16; + u32 MAC8 : 8; + u32 MAC7 : 8; + } mac_address_41c; + + struct { + u32 reserved :21; + u32 txbuffempty : 1; + u32 ReceiveByteFrameError : 1; + u32 ReceiveDataReady : 1; + u32 transmitter_data_byte : 8; + } ci_600; + + struct { + u32 pi_component_reg : 3; + u32 pi_rw : 1; + u32 pi_ha :20; + u32 pi_d : 8; + } pi_604; + + struct { + u32 pi_busy_n : 1; + u32 pi_wait_n : 1; + u32 pi_timeout_status : 1; + u32 pi_CiMax_IRQ_n : 1; + u32 config_cclk : 1; + u32 config_cs_n : 1; + u32 config_wr_n : 1; + u32 config_Prog_n : 1; + u32 config_Init_stat : 1; + u32 config_Done_stat : 1; + u32 pcmcia_b_mod_pwr_n : 1; + u32 pcmcia_a_mod_pwr_n : 1; + u32 reserved : 3; + u32 Timer_addr : 5; + u32 unused : 1; + u32 timer_data : 7; + u32 Timer_Load_req : 1; + u32 Timer_Read_req : 1; + u32 oncecycle_read : 1; + u32 serialReset : 1; + } pi_608; + + struct { + u32 reserved : 6; + u32 rw_flag : 1; + u32 dvb_en : 1; + u32 key_array_row : 5; + u32 key_array_col : 3; + u32 key_code : 2; + u32 key_enable : 1; + u32 PID :13; + } dvb_reg_60c; + + struct { + u32 start_sram_ibi : 1; + u32 reserved2 : 1; + u32 ce_pin_reg : 1; + u32 oe_pin_reg : 1; + u32 reserved1 : 3; + u32 sc_xfer_bit : 1; + u32 sram_data : 8; + u32 sram_rw : 1; + u32 sram_addr :15; + } sram_ctrl_reg_700; + + struct { + u32 net_addr_write :16; + u32 net_addr_read :16; + } net_buf_reg_704; + + struct { + u32 cai_cnt : 4; + u32 reserved2 : 6; + u32 cai_write :11; + u32 reserved1 : 5; + u32 cai_read :11; + } cai_buf_reg_708; + + struct { + u32 cao_cnt : 4; + u32 reserved2 : 6; + u32 cap_write :11; + u32 reserved1 : 5; + u32 cao_read :11; + } cao_buf_reg_70c; + + struct { + u32 media_cnt : 4; + u32 reserved2 : 6; + u32 media_write :11; + u32 reserved1 : 5; + u32 media_read :11; + } media_buf_reg_710; + + struct { + u32 reserved :17; + u32 ctrl_maximumfill : 1; + u32 ctrl_sramdma : 1; + u32 ctrl_usb_wan : 1; + u32 cao_ovflow_error : 1; + u32 cai_ovflow_error : 1; + u32 media_ovflow_error : 1; + u32 net_ovflow_error : 1; + u32 MEDIA_Dest : 2; + u32 CAO_Dest : 2; + u32 CAI_Dest : 2; + u32 NET_Dest : 2; + } sram_dest_reg_714; + + struct { + u32 reserved3 :11; + u32 net_addr_write : 1; + u32 reserved2 : 3; + u32 net_addr_read : 1; + u32 reserved1 : 4; + u32 net_cnt :12; + } net_buf_reg_718; + + struct { + u32 reserved3 : 4; + u32 wan_pkt_frame : 4; + u32 reserved2 : 4; + u32 sram_memmap : 2; + u32 sram_chip : 2; + u32 wan_wait_state : 8; + u32 reserved1 : 6; + u32 wan_speed_sig : 2; + } wan_ctrl_reg_71c; +} flexcop_ibi_value; + +#endif diff --git a/drivers/media/common/b2c2/flexcop_ibi_value_le.h b/drivers/media/common/b2c2/flexcop_ibi_value_le.h new file mode 100644 index 0000000000..5db3b46f21 --- /dev/null +++ b/drivers/media/common/b2c2/flexcop_ibi_value_le.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III + * register descriptions + * see flexcop.c for copyright information + */ +/* This file is automatically generated, do not edit things here. */ +#ifndef __FLEXCOP_IBI_VALUE_INCLUDED__ +#define __FLEXCOP_IBI_VALUE_INCLUDED__ + +typedef union { + u32 raw; + + struct { + u32 dma_0start : 1; + u32 dma_0No_update : 1; + u32 dma_address0 :30; + } dma_0x0; + + struct { + u32 DMA_maxpackets : 8; + u32 dma_addr_size :24; + } dma_0x4_remap; + + struct { + u32 dma1timer : 7; + u32 unused : 1; + u32 dma_addr_size :24; + } dma_0x4_read; + + struct { + u32 unused : 1; + u32 dmatimer : 7; + u32 dma_addr_size :24; + } dma_0x4_write; + + struct { + u32 unused : 2; + u32 dma_cur_addr :30; + } dma_0x8; + + struct { + u32 dma_1start : 1; + u32 remap_enable : 1; + u32 dma_address1 :30; + } dma_0xc; + + struct { + u32 chipaddr : 7; + u32 reserved1 : 1; + u32 baseaddr : 8; + u32 data1_reg : 8; + u32 working_start : 1; + u32 twoWS_rw : 1; + u32 total_bytes : 2; + u32 twoWS_port_reg : 2; + u32 no_base_addr_ack_error : 1; + u32 st_done : 1; + } tw_sm_c_100; + + struct { + u32 data2_reg : 8; + u32 data3_reg : 8; + u32 data4_reg : 8; + u32 exlicit_stops : 1; + u32 force_stop : 1; + u32 unused : 6; + } tw_sm_c_104; + + struct { + u32 thi1 : 6; + u32 reserved1 : 2; + u32 tlo1 : 5; + u32 reserved2 :19; + } tw_sm_c_108; + + struct { + u32 thi1 : 6; + u32 reserved1 : 2; + u32 tlo1 : 5; + u32 reserved2 :19; + } tw_sm_c_10c; + + struct { + u32 thi1 : 6; + u32 reserved1 : 2; + u32 tlo1 : 5; + u32 reserved2 :19; + } tw_sm_c_110; + + struct { + u32 LNB_CTLHighCount_sig :15; + u32 LNB_CTLLowCount_sig :15; + u32 LNB_CTLPrescaler_sig : 2; + } lnb_switch_freq_200; + + struct { + u32 ACPI1_sig : 1; + u32 ACPI3_sig : 1; + u32 LNB_L_H_sig : 1; + u32 Per_reset_sig : 1; + u32 reserved :20; + u32 Rev_N_sig_revision_hi : 4; + u32 Rev_N_sig_reserved1 : 2; + u32 Rev_N_sig_caps : 1; + u32 Rev_N_sig_reserved2 : 1; + } misc_204; + + struct { + u32 Stream1_filter_sig : 1; + u32 Stream2_filter_sig : 1; + u32 PCR_filter_sig : 1; + u32 PMT_filter_sig : 1; + u32 EMM_filter_sig : 1; + u32 ECM_filter_sig : 1; + u32 Null_filter_sig : 1; + u32 Mask_filter_sig : 1; + u32 WAN_Enable_sig : 1; + u32 WAN_CA_Enable_sig : 1; + u32 CA_Enable_sig : 1; + u32 SMC_Enable_sig : 1; + u32 Per_CA_Enable_sig : 1; + u32 Multi2_Enable_sig : 1; + u32 MAC_filter_Mode_sig : 1; + u32 Rcv_Data_sig : 1; + u32 DMA1_IRQ_Enable_sig : 1; + u32 DMA1_Timer_Enable_sig : 1; + u32 DMA2_IRQ_Enable_sig : 1; + u32 DMA2_Timer_Enable_sig : 1; + u32 DMA1_Size_IRQ_Enable_sig : 1; + u32 DMA2_Size_IRQ_Enable_sig : 1; + u32 Mailbox_from_V8_Enable_sig : 1; + u32 unused : 9; + } ctrl_208; + + struct { + u32 DMA1_IRQ_Status : 1; + u32 DMA1_Timer_Status : 1; + u32 DMA2_IRQ_Status : 1; + u32 DMA2_Timer_Status : 1; + u32 DMA1_Size_IRQ_Status : 1; + u32 DMA2_Size_IRQ_Status : 1; + u32 Mailbox_from_V8_Status_sig : 1; + u32 Data_receiver_error : 1; + u32 Continuity_error_flag : 1; + u32 LLC_SNAP_FLAG_set : 1; + u32 Transport_Error : 1; + u32 reserved :21; + } irq_20c; + + struct { + u32 reset_block_000 : 1; + u32 reset_block_100 : 1; + u32 reset_block_200 : 1; + u32 reset_block_300 : 1; + u32 reset_block_400 : 1; + u32 reset_block_500 : 1; + u32 reset_block_600 : 1; + u32 reset_block_700 : 1; + u32 Block_reset_enable : 8; + u32 Special_controls :16; + } sw_reset_210; + + struct { + u32 vuart_oe_sig : 1; + u32 v2WS_oe_sig : 1; + u32 halt_V8_sig : 1; + u32 section_pkg_enable_sig : 1; + u32 s2p_sel_sig : 1; + u32 unused1 : 3; + u32 polarity_PS_CLK_sig : 1; + u32 polarity_PS_VALID_sig : 1; + u32 polarity_PS_SYNC_sig : 1; + u32 polarity_PS_ERR_sig : 1; + u32 unused2 :20; + } misc_214; + + struct { + u32 Mailbox_from_V8 :32; + } mbox_v8_to_host_218; + + struct { + u32 sysramaccess_data : 8; + u32 sysramaccess_addr :15; + u32 unused : 7; + u32 sysramaccess_write : 1; + u32 sysramaccess_busmuster : 1; + } mbox_host_to_v8_21c; + + struct { + u32 Stream1_PID :13; + u32 Stream1_trans : 1; + u32 MAC_Multicast_filter : 1; + u32 debug_flag_pid_saved : 1; + u32 Stream2_PID :13; + u32 Stream2_trans : 1; + u32 debug_flag_write_status00 : 1; + u32 debug_fifo_problem : 1; + } pid_filter_300; + + struct { + u32 PCR_PID :13; + u32 PCR_trans : 1; + u32 debug_overrun3 : 1; + u32 debug_overrun2 : 1; + u32 PMT_PID :13; + u32 PMT_trans : 1; + u32 reserved : 2; + } pid_filter_304; + + struct { + u32 EMM_PID :13; + u32 EMM_trans : 1; + u32 EMM_filter_4 : 1; + u32 EMM_filter_6 : 1; + u32 ECM_PID :13; + u32 ECM_trans : 1; + u32 reserved : 2; + } pid_filter_308; + + struct { + u32 Group_PID :13; + u32 Group_trans : 1; + u32 unused1 : 2; + u32 Group_mask :13; + u32 unused2 : 3; + } pid_filter_30c_ext_ind_0_7; + + struct { + u32 net_master_read :17; + u32 unused :15; + } pid_filter_30c_ext_ind_1; + + struct { + u32 net_master_write :17; + u32 unused :15; + } pid_filter_30c_ext_ind_2; + + struct { + u32 next_net_master_write :17; + u32 unused :15; + } pid_filter_30c_ext_ind_3; + + struct { + u32 unused1 : 1; + u32 state_write :10; + u32 reserved1 : 6; + u32 stack_read :10; + u32 reserved2 : 5; + } pid_filter_30c_ext_ind_4; + + struct { + u32 stack_cnt :10; + u32 unused :22; + } pid_filter_30c_ext_ind_5; + + struct { + u32 pid_fsm_save_reg0 : 2; + u32 pid_fsm_save_reg1 : 2; + u32 pid_fsm_save_reg2 : 2; + u32 pid_fsm_save_reg3 : 2; + u32 pid_fsm_save_reg4 : 2; + u32 pid_fsm_save_reg300 : 2; + u32 write_status1 : 2; + u32 write_status4 : 2; + u32 data_size_reg :12; + u32 unused : 4; + } pid_filter_30c_ext_ind_6; + + struct { + u32 index_reg : 5; + u32 extra_index_reg : 3; + u32 AB_select : 1; + u32 pass_alltables : 1; + u32 unused :22; + } index_reg_310; + + struct { + u32 PID :13; + u32 PID_trans : 1; + u32 PID_enable_bit : 1; + u32 reserved :17; + } pid_n_reg_314; + + struct { + u32 A4_byte : 8; + u32 A5_byte : 8; + u32 A6_byte : 8; + u32 Enable_bit : 1; + u32 HighAB_bit : 1; + u32 reserved : 6; + } mac_low_reg_318; + + struct { + u32 A1_byte : 8; + u32 A2_byte : 8; + u32 A3_byte : 8; + u32 reserved : 8; + } mac_high_reg_31c; + + struct { + u32 reserved :16; + u32 data_Tag_ID :16; + } data_tag_400; + + struct { + u32 Card_IDbyte6 : 8; + u32 Card_IDbyte5 : 8; + u32 Card_IDbyte4 : 8; + u32 Card_IDbyte3 : 8; + } card_id_408; + + struct { + u32 Card_IDbyte2 : 8; + u32 Card_IDbyte1 : 8; + } card_id_40c; + + struct { + u32 MAC1 : 8; + u32 MAC2 : 8; + u32 MAC3 : 8; + u32 MAC6 : 8; + } mac_address_418; + + struct { + u32 MAC7 : 8; + u32 MAC8 : 8; + u32 reserved :16; + } mac_address_41c; + + struct { + u32 transmitter_data_byte : 8; + u32 ReceiveDataReady : 1; + u32 ReceiveByteFrameError : 1; + u32 txbuffempty : 1; + u32 reserved :21; + } ci_600; + + struct { + u32 pi_d : 8; + u32 pi_ha :20; + u32 pi_rw : 1; + u32 pi_component_reg : 3; + } pi_604; + + struct { + u32 serialReset : 1; + u32 oncecycle_read : 1; + u32 Timer_Read_req : 1; + u32 Timer_Load_req : 1; + u32 timer_data : 7; + u32 unused : 1; + u32 Timer_addr : 5; + u32 reserved : 3; + u32 pcmcia_a_mod_pwr_n : 1; + u32 pcmcia_b_mod_pwr_n : 1; + u32 config_Done_stat : 1; + u32 config_Init_stat : 1; + u32 config_Prog_n : 1; + u32 config_wr_n : 1; + u32 config_cs_n : 1; + u32 config_cclk : 1; + u32 pi_CiMax_IRQ_n : 1; + u32 pi_timeout_status : 1; + u32 pi_wait_n : 1; + u32 pi_busy_n : 1; + } pi_608; + + struct { + u32 PID :13; + u32 key_enable : 1; + u32 key_code : 2; + u32 key_array_col : 3; + u32 key_array_row : 5; + u32 dvb_en : 1; + u32 rw_flag : 1; + u32 reserved : 6; + } dvb_reg_60c; + + struct { + u32 sram_addr :15; + u32 sram_rw : 1; + u32 sram_data : 8; + u32 sc_xfer_bit : 1; + u32 reserved1 : 3; + u32 oe_pin_reg : 1; + u32 ce_pin_reg : 1; + u32 reserved2 : 1; + u32 start_sram_ibi : 1; + } sram_ctrl_reg_700; + + struct { + u32 net_addr_read :16; + u32 net_addr_write :16; + } net_buf_reg_704; + + struct { + u32 cai_read :11; + u32 reserved1 : 5; + u32 cai_write :11; + u32 reserved2 : 6; + u32 cai_cnt : 4; + } cai_buf_reg_708; + + struct { + u32 cao_read :11; + u32 reserved1 : 5; + u32 cap_write :11; + u32 reserved2 : 6; + u32 cao_cnt : 4; + } cao_buf_reg_70c; + + struct { + u32 media_read :11; + u32 reserved1 : 5; + u32 media_write :11; + u32 reserved2 : 6; + u32 media_cnt : 4; + } media_buf_reg_710; + + struct { + u32 NET_Dest : 2; + u32 CAI_Dest : 2; + u32 CAO_Dest : 2; + u32 MEDIA_Dest : 2; + u32 net_ovflow_error : 1; + u32 media_ovflow_error : 1; + u32 cai_ovflow_error : 1; + u32 cao_ovflow_error : 1; + u32 ctrl_usb_wan : 1; + u32 ctrl_sramdma : 1; + u32 ctrl_maximumfill : 1; + u32 reserved :17; + } sram_dest_reg_714; + + struct { + u32 net_cnt :12; + u32 reserved1 : 4; + u32 net_addr_read : 1; + u32 reserved2 : 3; + u32 net_addr_write : 1; + u32 reserved3 :11; + } net_buf_reg_718; + + struct { + u32 wan_speed_sig : 2; + u32 reserved1 : 6; + u32 wan_wait_state : 8; + u32 sram_chip : 2; + u32 sram_memmap : 2; + u32 reserved2 : 4; + u32 wan_pkt_frame : 4; + u32 reserved3 : 4; + } wan_ctrl_reg_71c; +} flexcop_ibi_value; + +#endif diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c new file mode 100644 index 0000000000..1392bd6b00 --- /dev/null +++ b/drivers/media/common/cx2341x.c @@ -0,0 +1,1778 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * cx2341x - generic code for cx23415/6/8 based devices + * + * Copyright (C) 2006 Hans Verkuil + */ + + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +MODULE_DESCRIPTION("cx23415/6/8 driver"); +MODULE_AUTHOR("Hans Verkuil"); +MODULE_LICENSE("GPL"); + +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Debug level (0-1)"); + +/********************** COMMON CODE *********************/ + +/* definitions for audio properties bits 29-28 */ +#define CX2341X_AUDIO_ENCODING_METHOD_MPEG 0 +#define CX2341X_AUDIO_ENCODING_METHOD_AC3 1 +#define CX2341X_AUDIO_ENCODING_METHOD_LPCM 2 + +static const char *cx2341x_get_name(u32 id) +{ + switch (id) { + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: + return "Spatial Filter Mode"; + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: + return "Spatial Filter"; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: + return "Spatial Luma Filter Type"; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: + return "Spatial Chroma Filter Type"; + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: + return "Temporal Filter Mode"; + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: + return "Temporal Filter"; + case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: + return "Median Filter Type"; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: + return "Median Luma Filter Maximum"; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: + return "Median Luma Filter Minimum"; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: + return "Median Chroma Filter Maximum"; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: + return "Median Chroma Filter Minimum"; + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + return "Insert Navigation Packets"; + } + return NULL; +} + +static const char **cx2341x_get_menu(u32 id) +{ + static const char *cx2341x_video_spatial_filter_mode_menu[] = { + "Manual", + "Auto", + NULL + }; + + static const char *cx2341x_video_luma_spatial_filter_type_menu[] = { + "Off", + "1D Horizontal", + "1D Vertical", + "2D H/V Separable", + "2D Symmetric non-separable", + NULL + }; + + static const char *cx2341x_video_chroma_spatial_filter_type_menu[] = { + "Off", + "1D Horizontal", + NULL + }; + + static const char *cx2341x_video_temporal_filter_mode_menu[] = { + "Manual", + "Auto", + NULL + }; + + static const char *cx2341x_video_median_filter_type_menu[] = { + "Off", + "Horizontal", + "Vertical", + "Horizontal/Vertical", + "Diagonal", + NULL + }; + + switch (id) { + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: + return cx2341x_video_spatial_filter_mode_menu; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: + return cx2341x_video_luma_spatial_filter_type_menu; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: + return cx2341x_video_chroma_spatial_filter_type_menu; + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: + return cx2341x_video_temporal_filter_mode_menu; + case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: + return cx2341x_video_median_filter_type_menu; + } + return NULL; +} + +static void cx2341x_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, + s32 *min, s32 *max, s32 *step, s32 *def, u32 *flags) +{ + *name = cx2341x_get_name(id); + *flags = 0; + + switch (id) { + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: + case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: + *type = V4L2_CTRL_TYPE_MENU; + *min = 0; + *step = 0; + break; + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + *type = V4L2_CTRL_TYPE_BOOLEAN; + *min = 0; + *max = *step = 1; + break; + default: + *type = V4L2_CTRL_TYPE_INTEGER; + break; + } + switch (id) { + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: + case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: + *flags |= V4L2_CTRL_FLAG_UPDATE; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: + *flags |= V4L2_CTRL_FLAG_SLIDER; + break; + case V4L2_CID_MPEG_VIDEO_ENCODING: + *flags |= V4L2_CTRL_FLAG_READ_ONLY; + break; + } +} + + +/********************** OLD CODE *********************/ + +/* Must be sorted from low to high control ID! */ +const u32 cx2341x_mpeg_ctrls[] = { + V4L2_CID_CODEC_CLASS, + V4L2_CID_MPEG_STREAM_TYPE, + V4L2_CID_MPEG_STREAM_VBI_FMT, + V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ, + V4L2_CID_MPEG_AUDIO_ENCODING, + V4L2_CID_MPEG_AUDIO_L2_BITRATE, + V4L2_CID_MPEG_AUDIO_MODE, + V4L2_CID_MPEG_AUDIO_MODE_EXTENSION, + V4L2_CID_MPEG_AUDIO_EMPHASIS, + V4L2_CID_MPEG_AUDIO_CRC, + V4L2_CID_MPEG_AUDIO_MUTE, + V4L2_CID_MPEG_AUDIO_AC3_BITRATE, + V4L2_CID_MPEG_VIDEO_ENCODING, + V4L2_CID_MPEG_VIDEO_ASPECT, + V4L2_CID_MPEG_VIDEO_B_FRAMES, + V4L2_CID_MPEG_VIDEO_GOP_SIZE, + V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, + V4L2_CID_MPEG_VIDEO_BITRATE_MODE, + V4L2_CID_MPEG_VIDEO_BITRATE, + V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, + V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, + V4L2_CID_MPEG_VIDEO_MUTE, + V4L2_CID_MPEG_VIDEO_MUTE_YUV, + V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE, + V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER, + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE, + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE, + V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE, + V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER, + V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE, + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM, + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP, + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM, + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP, + V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS, + 0 +}; +EXPORT_SYMBOL(cx2341x_mpeg_ctrls); + +static const struct cx2341x_mpeg_params default_params = { + /* misc */ + .capabilities = 0, + .port = CX2341X_PORT_MEMORY, + .width = 720, + .height = 480, + .is_50hz = 0, + + /* stream */ + .stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS, + .stream_vbi_fmt = V4L2_MPEG_STREAM_VBI_FMT_NONE, + .stream_insert_nav_packets = 0, + + /* audio */ + .audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000, + .audio_encoding = V4L2_MPEG_AUDIO_ENCODING_LAYER_2, + .audio_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_224K, + .audio_ac3_bitrate = V4L2_MPEG_AUDIO_AC3_BITRATE_224K, + .audio_mode = V4L2_MPEG_AUDIO_MODE_STEREO, + .audio_mode_extension = V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4, + .audio_emphasis = V4L2_MPEG_AUDIO_EMPHASIS_NONE, + .audio_crc = V4L2_MPEG_AUDIO_CRC_NONE, + .audio_mute = 0, + + /* video */ + .video_encoding = V4L2_MPEG_VIDEO_ENCODING_MPEG_2, + .video_aspect = V4L2_MPEG_VIDEO_ASPECT_4x3, + .video_b_frames = 2, + .video_gop_size = 12, + .video_gop_closure = 1, + .video_bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, + .video_bitrate = 6000000, + .video_bitrate_peak = 8000000, + .video_temporal_decimation = 0, + .video_mute = 0, + .video_mute_yuv = 0x008080, /* YCbCr value for black */ + + /* encoding filters */ + .video_spatial_filter_mode = + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, + .video_spatial_filter = 0, + .video_luma_spatial_filter_type = + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR, + .video_chroma_spatial_filter_type = + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR, + .video_temporal_filter_mode = + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL, + .video_temporal_filter = 8, + .video_median_filter_type = + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF, + .video_luma_median_filter_top = 255, + .video_luma_median_filter_bottom = 0, + .video_chroma_median_filter_top = 255, + .video_chroma_median_filter_bottom = 0, +}; +/* Map the control ID to the correct field in the cx2341x_mpeg_params + struct. Return -EINVAL if the ID is unknown, else return 0. */ +static int cx2341x_get_ctrl(const struct cx2341x_mpeg_params *params, + struct v4l2_ext_control *ctrl) +{ + switch (ctrl->id) { + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + ctrl->value = params->audio_sampling_freq; + break; + case V4L2_CID_MPEG_AUDIO_ENCODING: + ctrl->value = params->audio_encoding; + break; + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: + ctrl->value = params->audio_l2_bitrate; + break; + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: + ctrl->value = params->audio_ac3_bitrate; + break; + case V4L2_CID_MPEG_AUDIO_MODE: + ctrl->value = params->audio_mode; + break; + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: + ctrl->value = params->audio_mode_extension; + break; + case V4L2_CID_MPEG_AUDIO_EMPHASIS: + ctrl->value = params->audio_emphasis; + break; + case V4L2_CID_MPEG_AUDIO_CRC: + ctrl->value = params->audio_crc; + break; + case V4L2_CID_MPEG_AUDIO_MUTE: + ctrl->value = params->audio_mute; + break; + case V4L2_CID_MPEG_VIDEO_ENCODING: + ctrl->value = params->video_encoding; + break; + case V4L2_CID_MPEG_VIDEO_ASPECT: + ctrl->value = params->video_aspect; + break; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + ctrl->value = params->video_b_frames; + break; + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: + ctrl->value = params->video_gop_size; + break; + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: + ctrl->value = params->video_gop_closure; + break; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + ctrl->value = params->video_bitrate_mode; + break; + case V4L2_CID_MPEG_VIDEO_BITRATE: + ctrl->value = params->video_bitrate; + break; + case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: + ctrl->value = params->video_bitrate_peak; + break; + case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: + ctrl->value = params->video_temporal_decimation; + break; + case V4L2_CID_MPEG_VIDEO_MUTE: + ctrl->value = params->video_mute; + break; + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: + ctrl->value = params->video_mute_yuv; + break; + case V4L2_CID_MPEG_STREAM_TYPE: + ctrl->value = params->stream_type; + break; + case V4L2_CID_MPEG_STREAM_VBI_FMT: + ctrl->value = params->stream_vbi_fmt; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: + ctrl->value = params->video_spatial_filter_mode; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: + ctrl->value = params->video_spatial_filter; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: + ctrl->value = params->video_luma_spatial_filter_type; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: + ctrl->value = params->video_chroma_spatial_filter_type; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: + ctrl->value = params->video_temporal_filter_mode; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: + ctrl->value = params->video_temporal_filter; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: + ctrl->value = params->video_median_filter_type; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: + ctrl->value = params->video_luma_median_filter_top; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: + ctrl->value = params->video_luma_median_filter_bottom; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: + ctrl->value = params->video_chroma_median_filter_top; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: + ctrl->value = params->video_chroma_median_filter_bottom; + break; + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + ctrl->value = params->stream_insert_nav_packets; + break; + default: + return -EINVAL; + } + return 0; +} + +/* Map the control ID to the correct field in the cx2341x_mpeg_params + struct. Return -EINVAL if the ID is unknown, else return 0. */ +static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params, int busy, + struct v4l2_ext_control *ctrl) +{ + switch (ctrl->id) { + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + if (busy) + return -EBUSY; + params->audio_sampling_freq = ctrl->value; + break; + case V4L2_CID_MPEG_AUDIO_ENCODING: + if (busy) + return -EBUSY; + if (params->capabilities & CX2341X_CAP_HAS_AC3) + if (ctrl->value != V4L2_MPEG_AUDIO_ENCODING_LAYER_2 && + ctrl->value != V4L2_MPEG_AUDIO_ENCODING_AC3) + return -ERANGE; + params->audio_encoding = ctrl->value; + break; + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: + if (busy) + return -EBUSY; + params->audio_l2_bitrate = ctrl->value; + break; + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: + if (busy) + return -EBUSY; + if (!(params->capabilities & CX2341X_CAP_HAS_AC3)) + return -EINVAL; + params->audio_ac3_bitrate = ctrl->value; + break; + case V4L2_CID_MPEG_AUDIO_MODE: + params->audio_mode = ctrl->value; + break; + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: + params->audio_mode_extension = ctrl->value; + break; + case V4L2_CID_MPEG_AUDIO_EMPHASIS: + params->audio_emphasis = ctrl->value; + break; + case V4L2_CID_MPEG_AUDIO_CRC: + params->audio_crc = ctrl->value; + break; + case V4L2_CID_MPEG_AUDIO_MUTE: + params->audio_mute = ctrl->value; + break; + case V4L2_CID_MPEG_VIDEO_ASPECT: + params->video_aspect = ctrl->value; + break; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: { + int b = ctrl->value + 1; + int gop = params->video_gop_size; + params->video_b_frames = ctrl->value; + params->video_gop_size = b * ((gop + b - 1) / b); + /* Max GOP size = 34 */ + while (params->video_gop_size > 34) + params->video_gop_size -= b; + break; + } + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: { + int b = params->video_b_frames + 1; + int gop = ctrl->value; + params->video_gop_size = b * ((gop + b - 1) / b); + /* Max GOP size = 34 */ + while (params->video_gop_size > 34) + params->video_gop_size -= b; + ctrl->value = params->video_gop_size; + break; + } + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: + params->video_gop_closure = ctrl->value; + break; + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + if (busy) + return -EBUSY; + /* MPEG-1 only allows CBR */ + if (params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1 && + ctrl->value != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) + return -EINVAL; + params->video_bitrate_mode = ctrl->value; + break; + case V4L2_CID_MPEG_VIDEO_BITRATE: + if (busy) + return -EBUSY; + params->video_bitrate = ctrl->value; + break; + case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: + if (busy) + return -EBUSY; + params->video_bitrate_peak = ctrl->value; + break; + case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: + params->video_temporal_decimation = ctrl->value; + break; + case V4L2_CID_MPEG_VIDEO_MUTE: + params->video_mute = (ctrl->value != 0); + break; + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: + params->video_mute_yuv = ctrl->value; + break; + case V4L2_CID_MPEG_STREAM_TYPE: + if (busy) + return -EBUSY; + params->stream_type = ctrl->value; + params->video_encoding = + (params->stream_type == V4L2_MPEG_STREAM_TYPE_MPEG1_SS || + params->stream_type == V4L2_MPEG_STREAM_TYPE_MPEG1_VCD) ? + V4L2_MPEG_VIDEO_ENCODING_MPEG_1 : + V4L2_MPEG_VIDEO_ENCODING_MPEG_2; + if (params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) + /* MPEG-1 implies CBR */ + params->video_bitrate_mode = + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; + break; + case V4L2_CID_MPEG_STREAM_VBI_FMT: + params->stream_vbi_fmt = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: + params->video_spatial_filter_mode = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: + params->video_spatial_filter = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: + params->video_luma_spatial_filter_type = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: + params->video_chroma_spatial_filter_type = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: + params->video_temporal_filter_mode = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: + params->video_temporal_filter = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: + params->video_median_filter_type = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: + params->video_luma_median_filter_top = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: + params->video_luma_median_filter_bottom = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: + params->video_chroma_median_filter_top = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: + params->video_chroma_median_filter_bottom = ctrl->value; + break; + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + params->stream_insert_nav_packets = ctrl->value; + break; + default: + return -EINVAL; + } + return 0; +} + +static int cx2341x_ctrl_query_fill(struct v4l2_queryctrl *qctrl, + s32 min, s32 max, s32 step, s32 def) +{ + const char *name; + + switch (qctrl->id) { + /* MPEG controls */ + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: + case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + cx2341x_ctrl_fill(qctrl->id, &name, &qctrl->type, + &min, &max, &step, &def, &qctrl->flags); + qctrl->minimum = min; + qctrl->maximum = max; + qctrl->step = step; + qctrl->default_value = def; + qctrl->reserved[0] = qctrl->reserved[1] = 0; + strscpy(qctrl->name, name, sizeof(qctrl->name)); + return 0; + + default: + return v4l2_ctrl_query_fill(qctrl, min, max, step, def); + } +} + +int cx2341x_ctrl_query(const struct cx2341x_mpeg_params *params, + struct v4l2_queryctrl *qctrl) +{ + int err; + + switch (qctrl->id) { + case V4L2_CID_CODEC_CLASS: + return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0); + case V4L2_CID_MPEG_STREAM_TYPE: + return v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_STREAM_TYPE_MPEG2_PS, + V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD, 1, + V4L2_MPEG_STREAM_TYPE_MPEG2_PS); + + case V4L2_CID_MPEG_STREAM_VBI_FMT: + if (params->capabilities & CX2341X_CAP_HAS_SLICED_VBI) + return v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_STREAM_VBI_FMT_NONE, + V4L2_MPEG_STREAM_VBI_FMT_IVTV, 1, + V4L2_MPEG_STREAM_VBI_FMT_NONE); + return cx2341x_ctrl_query_fill(qctrl, + V4L2_MPEG_STREAM_VBI_FMT_NONE, + V4L2_MPEG_STREAM_VBI_FMT_NONE, 1, + default_params.stream_vbi_fmt); + + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + return v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100, + V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000, 1, + V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000); + + case V4L2_CID_MPEG_AUDIO_ENCODING: + if (params->capabilities & CX2341X_CAP_HAS_AC3) { + /* + * The state of L2 & AC3 bitrate controls can change + * when this control changes, but v4l2_ctrl_query_fill() + * already sets V4L2_CTRL_FLAG_UPDATE for + * V4L2_CID_MPEG_AUDIO_ENCODING, so we don't here. + */ + return v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_AUDIO_ENCODING_LAYER_2, + V4L2_MPEG_AUDIO_ENCODING_AC3, 1, + default_params.audio_encoding); + } + + return v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_AUDIO_ENCODING_LAYER_2, + V4L2_MPEG_AUDIO_ENCODING_LAYER_2, 1, + default_params.audio_encoding); + + case V4L2_CID_MPEG_AUDIO_L2_BITRATE: + err = v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_AUDIO_L2_BITRATE_192K, + V4L2_MPEG_AUDIO_L2_BITRATE_384K, 1, + default_params.audio_l2_bitrate); + if (err) + return err; + if (params->capabilities & CX2341X_CAP_HAS_AC3 && + params->audio_encoding != V4L2_MPEG_AUDIO_ENCODING_LAYER_2) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return 0; + + case V4L2_CID_MPEG_AUDIO_MODE: + return v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_AUDIO_MODE_STEREO, + V4L2_MPEG_AUDIO_MODE_MONO, 1, + V4L2_MPEG_AUDIO_MODE_STEREO); + + case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: + err = v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16, 1, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4); + if (err == 0 && + params->audio_mode != V4L2_MPEG_AUDIO_MODE_JOINT_STEREO) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return err; + + case V4L2_CID_MPEG_AUDIO_EMPHASIS: + return v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_AUDIO_EMPHASIS_NONE, + V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17, 1, + V4L2_MPEG_AUDIO_EMPHASIS_NONE); + + case V4L2_CID_MPEG_AUDIO_CRC: + return v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_AUDIO_CRC_NONE, + V4L2_MPEG_AUDIO_CRC_CRC16, 1, + V4L2_MPEG_AUDIO_CRC_NONE); + + case V4L2_CID_MPEG_AUDIO_MUTE: + return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0); + + case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: + err = v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_AUDIO_AC3_BITRATE_48K, + V4L2_MPEG_AUDIO_AC3_BITRATE_448K, 1, + default_params.audio_ac3_bitrate); + if (err) + return err; + if (params->capabilities & CX2341X_CAP_HAS_AC3) { + if (params->audio_encoding != + V4L2_MPEG_AUDIO_ENCODING_AC3) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + } else + qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; + return 0; + + case V4L2_CID_MPEG_VIDEO_ENCODING: + /* this setting is read-only for the cx2341x since the + V4L2_CID_MPEG_STREAM_TYPE really determines the + MPEG-1/2 setting */ + err = v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_VIDEO_ENCODING_MPEG_1, + V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 1, + V4L2_MPEG_VIDEO_ENCODING_MPEG_2); + if (err == 0) + qctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; + return err; + + case V4L2_CID_MPEG_VIDEO_ASPECT: + return v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_VIDEO_ASPECT_1x1, + V4L2_MPEG_VIDEO_ASPECT_221x100, 1, + V4L2_MPEG_VIDEO_ASPECT_4x3); + + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + return v4l2_ctrl_query_fill(qctrl, 0, 33, 1, 2); + + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: + return v4l2_ctrl_query_fill(qctrl, 1, 34, 1, + params->is_50hz ? 12 : 15); + + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: + return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 1); + + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + err = v4l2_ctrl_query_fill(qctrl, + V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 1, + V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); + if (err == 0 && + params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return err; + + case V4L2_CID_MPEG_VIDEO_BITRATE: + return v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 6000000); + + case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: + err = v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 8000000); + if (err == 0 && + params->video_bitrate_mode == + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return err; + + case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: + return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0); + + case V4L2_CID_MPEG_VIDEO_MUTE: + return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0); + + case V4L2_CID_MPEG_VIDEO_MUTE_YUV: /* Init YUV (really YCbCr) to black */ + return v4l2_ctrl_query_fill(qctrl, 0, 0xffffff, 1, 0x008080); + + /* CX23415/6 specific */ + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: + return cx2341x_ctrl_query_fill(qctrl, + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO, 1, + default_params.video_spatial_filter_mode); + + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: + cx2341x_ctrl_query_fill(qctrl, 0, 15, 1, + default_params.video_spatial_filter); + qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; + if (params->video_spatial_filter_mode == + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return 0; + + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: + cx2341x_ctrl_query_fill(qctrl, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE, + 1, + default_params.video_luma_spatial_filter_type); + if (params->video_spatial_filter_mode == + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return 0; + + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: + cx2341x_ctrl_query_fill(qctrl, + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF, + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR, + 1, + default_params.video_chroma_spatial_filter_type); + if (params->video_spatial_filter_mode == + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return 0; + + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: + return cx2341x_ctrl_query_fill(qctrl, + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL, + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO, 1, + default_params.video_temporal_filter_mode); + + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: + cx2341x_ctrl_query_fill(qctrl, 0, 31, 1, + default_params.video_temporal_filter); + qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; + if (params->video_temporal_filter_mode == + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return 0; + + case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: + return cx2341x_ctrl_query_fill(qctrl, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG, 1, + default_params.video_median_filter_type); + + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: + cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, + default_params.video_luma_median_filter_top); + qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; + if (params->video_median_filter_type == + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return 0; + + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: + cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, + default_params.video_luma_median_filter_bottom); + qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; + if (params->video_median_filter_type == + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return 0; + + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: + cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, + default_params.video_chroma_median_filter_top); + qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; + if (params->video_median_filter_type == + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return 0; + + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: + cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, + default_params.video_chroma_median_filter_bottom); + qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; + if (params->video_median_filter_type == + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) + qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; + return 0; + + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + return cx2341x_ctrl_query_fill(qctrl, 0, 1, 1, + default_params.stream_insert_nav_packets); + + default: + return -EINVAL; + + } +} +EXPORT_SYMBOL(cx2341x_ctrl_query); + +const char * const *cx2341x_ctrl_get_menu(const struct cx2341x_mpeg_params *p, u32 id) +{ + static const char * const mpeg_stream_type_without_ts[] = { + "MPEG-2 Program Stream", + "", + "MPEG-1 System Stream", + "MPEG-2 DVD-compatible Stream", + "MPEG-1 VCD-compatible Stream", + "MPEG-2 SVCD-compatible Stream", + NULL + }; + + static const char *mpeg_stream_type_with_ts[] = { + "MPEG-2 Program Stream", + "MPEG-2 Transport Stream", + "MPEG-1 System Stream", + "MPEG-2 DVD-compatible Stream", + "MPEG-1 VCD-compatible Stream", + "MPEG-2 SVCD-compatible Stream", + NULL + }; + + static const char *mpeg_audio_encoding_l2_ac3[] = { + "", + "MPEG-1/2 Layer II", + "", + "", + "AC-3", + NULL + }; + + switch (id) { + case V4L2_CID_MPEG_STREAM_TYPE: + return (p->capabilities & CX2341X_CAP_HAS_TS) ? + mpeg_stream_type_with_ts : mpeg_stream_type_without_ts; + case V4L2_CID_MPEG_AUDIO_ENCODING: + return (p->capabilities & CX2341X_CAP_HAS_AC3) ? + mpeg_audio_encoding_l2_ac3 : v4l2_ctrl_get_menu(id); + case V4L2_CID_MPEG_AUDIO_L1_BITRATE: + case V4L2_CID_MPEG_AUDIO_L3_BITRATE: + return NULL; + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: + case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: + case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: + case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: + return cx2341x_get_menu(id); + default: + return v4l2_ctrl_get_menu(id); + } +} +EXPORT_SYMBOL(cx2341x_ctrl_get_menu); + +static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params) +{ + params->audio_properties = + (params->audio_sampling_freq << 0) | + (params->audio_mode << 8) | + (params->audio_mode_extension << 10) | + (((params->audio_emphasis == V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17) + ? 3 : params->audio_emphasis) << 12) | + (params->audio_crc << 14); + + if ((params->capabilities & CX2341X_CAP_HAS_AC3) && + params->audio_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3) { + params->audio_properties |= + /* Not sure if this MPEG Layer II setting is required */ + ((3 - V4L2_MPEG_AUDIO_ENCODING_LAYER_2) << 2) | + (params->audio_ac3_bitrate << 4) | + (CX2341X_AUDIO_ENCODING_METHOD_AC3 << 28); + } else { + /* Assuming MPEG Layer II */ + params->audio_properties |= + ((3 - params->audio_encoding) << 2) | + ((1 + params->audio_l2_bitrate) << 4); + } +} + +/* Check for correctness of the ctrl's value based on the data from + struct v4l2_queryctrl and the available menu items. Note that + menu_items may be NULL, in that case it is ignored. */ +static int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl, + const char * const *menu_items) +{ + if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED) + return -EINVAL; + if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED) + return -EBUSY; + if (qctrl->type == V4L2_CTRL_TYPE_STRING) + return 0; + if (qctrl->type == V4L2_CTRL_TYPE_BUTTON || + qctrl->type == V4L2_CTRL_TYPE_INTEGER64 || + qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) + return 0; + if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum) + return -ERANGE; + if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) { + if (menu_items[ctrl->value] == NULL || + menu_items[ctrl->value][0] == '\0') + return -EINVAL; + } + if (qctrl->type == V4L2_CTRL_TYPE_BITMASK && + (ctrl->value & ~qctrl->maximum)) + return -ERANGE; + return 0; +} + +int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, int busy, + struct v4l2_ext_controls *ctrls, unsigned int cmd) +{ + int err = 0; + int i; + + if (cmd == VIDIOC_G_EXT_CTRLS) { + for (i = 0; i < ctrls->count; i++) { + struct v4l2_ext_control *ctrl = ctrls->controls + i; + + err = cx2341x_get_ctrl(params, ctrl); + if (err) { + ctrls->error_idx = i; + break; + } + } + return err; + } + for (i = 0; i < ctrls->count; i++) { + struct v4l2_ext_control *ctrl = ctrls->controls + i; + struct v4l2_queryctrl qctrl; + const char * const *menu_items = NULL; + + qctrl.id = ctrl->id; + err = cx2341x_ctrl_query(params, &qctrl); + if (err) + break; + if (qctrl.type == V4L2_CTRL_TYPE_MENU) + menu_items = cx2341x_ctrl_get_menu(params, qctrl.id); + err = v4l2_ctrl_check(ctrl, &qctrl, menu_items); + if (err) + break; + err = cx2341x_set_ctrl(params, busy, ctrl); + if (err) + break; + } + if (err == 0 && + params->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && + params->video_bitrate_peak < params->video_bitrate) { + err = -ERANGE; + ctrls->error_idx = ctrls->count; + } + if (err) + ctrls->error_idx = i; + else + cx2341x_calc_audio_properties(params); + return err; +} +EXPORT_SYMBOL(cx2341x_ext_ctrls); + +void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p) +{ + *p = default_params; + cx2341x_calc_audio_properties(p); +} +EXPORT_SYMBOL(cx2341x_fill_defaults); + +static int cx2341x_api(void *priv, cx2341x_mbox_func func, + u32 cmd, int args, ...) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + va_list vargs; + int i; + + va_start(vargs, args); + + for (i = 0; i < args; i++) + data[i] = va_arg(vargs, int); + va_end(vargs); + return func(priv, cmd, args, 0, data); +} + +#define CMP_FIELD(__old, __new, __field) (__old->__field != __new->__field) + +int cx2341x_update(void *priv, cx2341x_mbox_func func, + const struct cx2341x_mpeg_params *old, + const struct cx2341x_mpeg_params *new) +{ + static int mpeg_stream_type[] = { + 0, /* MPEG-2 PS */ + 1, /* MPEG-2 TS */ + 2, /* MPEG-1 SS */ + 14, /* DVD */ + 11, /* VCD */ + 12, /* SVCD */ + }; + int err; + + cx2341x_api(priv, func, CX2341X_ENC_SET_OUTPUT_PORT, 2, new->port, 0); + + if (!old || + CMP_FIELD(old, new, is_50hz)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_RATE, 1, + new->is_50hz); + if (err) + return err; + } + + if (!old || + CMP_FIELD(old, new, width) || + CMP_FIELD(old, new, height) || + CMP_FIELD(old, new, video_encoding)) { + u16 w = new->width; + u16 h = new->height; + + if (new->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) { + w /= 2; + h /= 2; + } + err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_SIZE, 2, + h, w); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, stream_type)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_STREAM_TYPE, 1, + mpeg_stream_type[new->stream_type]); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_aspect)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_ASPECT_RATIO, 1, + 1 + new->video_aspect); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_b_frames) || + CMP_FIELD(old, new, video_gop_size)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_PROPERTIES, 2, + new->video_gop_size, new->video_b_frames + 1); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_gop_closure)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_CLOSURE, 1, + new->video_gop_closure); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, audio_properties)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_AUDIO_PROPERTIES, + 1, new->audio_properties); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, audio_mute)) { + err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_AUDIO, 1, + new->audio_mute); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_bitrate_mode) || + CMP_FIELD(old, new, video_bitrate) || + CMP_FIELD(old, new, video_bitrate_peak)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_BIT_RATE, 5, + new->video_bitrate_mode, new->video_bitrate, + new->video_bitrate_peak / 400, 0, 0); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_spatial_filter_mode) || + CMP_FIELD(old, new, video_temporal_filter_mode) || + CMP_FIELD(old, new, video_median_filter_type)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_MODE, + 2, + new->video_spatial_filter_mode | + (new->video_temporal_filter_mode << 1), + new->video_median_filter_type); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_luma_median_filter_bottom) || + CMP_FIELD(old, new, video_luma_median_filter_top) || + CMP_FIELD(old, new, video_chroma_median_filter_bottom) || + CMP_FIELD(old, new, video_chroma_median_filter_top)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_CORING_LEVELS, 4, + new->video_luma_median_filter_bottom, + new->video_luma_median_filter_top, + new->video_chroma_median_filter_bottom, + new->video_chroma_median_filter_top); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_luma_spatial_filter_type) || + CMP_FIELD(old, new, video_chroma_spatial_filter_type)) { + err = cx2341x_api(priv, func, + CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, + 2, new->video_luma_spatial_filter_type, + new->video_chroma_spatial_filter_type); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_spatial_filter) || + CMP_FIELD(old, new, video_temporal_filter)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_PROPS, + 2, new->video_spatial_filter, + new->video_temporal_filter); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_temporal_decimation)) { + err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_DROP_RATE, + 1, new->video_temporal_decimation); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, video_mute) || + (new->video_mute && CMP_FIELD(old, new, video_mute_yuv))) { + err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_VIDEO, 1, + new->video_mute | (new->video_mute_yuv << 8)); + if (err) + return err; + } + if (!old || + CMP_FIELD(old, new, stream_insert_nav_packets)) { + err = cx2341x_api(priv, func, CX2341X_ENC_MISC, 2, + 7, new->stream_insert_nav_packets); + if (err) + return err; + } + return 0; +} +EXPORT_SYMBOL(cx2341x_update); + +static const char *cx2341x_menu_item(const struct cx2341x_mpeg_params *p, u32 id) +{ + const char * const *menu = cx2341x_ctrl_get_menu(p, id); + struct v4l2_ext_control ctrl; + + if (menu == NULL) + goto invalid; + ctrl.id = id; + if (cx2341x_get_ctrl(p, &ctrl)) + goto invalid; + while (ctrl.value-- && *menu) menu++; + if (*menu == NULL) + goto invalid; + return *menu; + +invalid: + return ""; +} + +void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix) +{ + int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1; + + /* Stream */ + printk(KERN_INFO "%s: Stream: %s", + prefix, + cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE)); + if (p->stream_insert_nav_packets) + printk(KERN_CONT " (with navigation packets)"); + printk(KERN_CONT "\n"); + printk(KERN_INFO "%s: VBI Format: %s\n", + prefix, + cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT)); + + /* Video */ + printk(KERN_INFO "%s: Video: %dx%d, %d fps%s\n", + prefix, + p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1), + p->is_50hz ? 25 : 30, + (p->video_mute) ? " (muted)" : ""); + printk(KERN_INFO "%s: Video: %s, %s, %s, %d", + prefix, + cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING), + cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT), + cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE), + p->video_bitrate); + if (p->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) + printk(KERN_CONT ", Peak %d", p->video_bitrate_peak); + printk(KERN_CONT "\n"); + printk(KERN_INFO + "%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure\n", + prefix, + p->video_gop_size, p->video_b_frames, + p->video_gop_closure ? "" : "No "); + if (p->video_temporal_decimation) + printk(KERN_INFO "%s: Video: Temporal Decimation %d\n", + prefix, p->video_temporal_decimation); + + /* Audio */ + printk(KERN_INFO "%s: Audio: %s, %s, %s, %s%s", + prefix, + cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ), + cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING), + cx2341x_menu_item(p, + p->audio_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3 + ? V4L2_CID_MPEG_AUDIO_AC3_BITRATE + : V4L2_CID_MPEG_AUDIO_L2_BITRATE), + cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE), + p->audio_mute ? " (muted)" : ""); + if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO) + printk(KERN_CONT ", %s", cx2341x_menu_item(p, + V4L2_CID_MPEG_AUDIO_MODE_EXTENSION)); + printk(KERN_CONT ", %s, %s\n", + cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_EMPHASIS), + cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC)); + + /* Encoding filters */ + printk(KERN_INFO "%s: Spatial Filter: %s, Luma %s, Chroma %s, %d\n", + prefix, + cx2341x_menu_item(p, + V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE), + cx2341x_menu_item(p, + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE), + cx2341x_menu_item(p, + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE), + p->video_spatial_filter); + + printk(KERN_INFO "%s: Temporal Filter: %s, %d\n", + prefix, + cx2341x_menu_item(p, + V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE), + p->video_temporal_filter); + printk(KERN_INFO + "%s: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n", + prefix, + cx2341x_menu_item(p, + V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE), + p->video_luma_median_filter_bottom, + p->video_luma_median_filter_top, + p->video_chroma_median_filter_bottom, + p->video_chroma_median_filter_top); +} +EXPORT_SYMBOL(cx2341x_log_status); + + + +/********************** NEW CODE *********************/ + +static inline struct cx2341x_handler *to_cxhdl(struct v4l2_ctrl *ctrl) +{ + return container_of(ctrl->handler, struct cx2341x_handler, hdl); +} + +static int cx2341x_hdl_api(struct cx2341x_handler *hdl, + u32 cmd, int args, ...) +{ + u32 data[CX2341X_MBOX_MAX_DATA]; + va_list vargs; + int i; + + va_start(vargs, args); + + for (i = 0; i < args; i++) + data[i] = va_arg(vargs, int); + va_end(vargs); + return hdl->func(hdl->priv, cmd, args, 0, data); +} + +/* ctrl->handler->lock is held, so it is safe to access cur.val */ +static inline int cx2341x_neq(struct v4l2_ctrl *ctrl) +{ + return ctrl && ctrl->val != ctrl->cur.val; +} + +static int cx2341x_try_ctrl(struct v4l2_ctrl *ctrl) +{ + struct cx2341x_handler *hdl = to_cxhdl(ctrl); + s32 val = ctrl->val; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_B_FRAMES: { + /* video gop cluster */ + int b = val + 1; + int gop = hdl->video_gop_size->val; + + gop = b * ((gop + b - 1) / b); + + /* Max GOP size = 34 */ + while (gop > 34) + gop -= b; + hdl->video_gop_size->val = gop; + break; + } + + case V4L2_CID_MPEG_STREAM_TYPE: + /* stream type cluster */ + hdl->video_encoding->val = + (hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_SS || + hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_VCD) ? + V4L2_MPEG_VIDEO_ENCODING_MPEG_1 : + V4L2_MPEG_VIDEO_ENCODING_MPEG_2; + if (hdl->video_encoding->val == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) + /* MPEG-1 implies CBR */ + hdl->video_bitrate_mode->val = + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; + /* peak bitrate shall be >= normal bitrate */ + if (hdl->video_bitrate_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && + hdl->video_bitrate_peak->val < hdl->video_bitrate->val) + hdl->video_bitrate_peak->val = hdl->video_bitrate->val; + break; + } + return 0; +} + +static int cx2341x_s_ctrl(struct v4l2_ctrl *ctrl) +{ + static const int mpeg_stream_type[] = { + 0, /* MPEG-2 PS */ + 1, /* MPEG-2 TS */ + 2, /* MPEG-1 SS */ + 14, /* DVD */ + 11, /* VCD */ + 12, /* SVCD */ + }; + struct cx2341x_handler *hdl = to_cxhdl(ctrl); + s32 val = ctrl->val; + u32 props; + int err; + + switch (ctrl->id) { + case V4L2_CID_MPEG_STREAM_VBI_FMT: + if (hdl->ops && hdl->ops->s_stream_vbi_fmt) + return hdl->ops->s_stream_vbi_fmt(hdl, val); + return 0; + + case V4L2_CID_MPEG_VIDEO_ASPECT: + return cx2341x_hdl_api(hdl, + CX2341X_ENC_SET_ASPECT_RATIO, 1, val + 1); + + case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: + return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_CLOSURE, 1, val); + + case V4L2_CID_MPEG_AUDIO_MUTE: + return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_AUDIO, 1, val); + + case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: + return cx2341x_hdl_api(hdl, + CX2341X_ENC_SET_FRAME_DROP_RATE, 1, val); + + case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: + return cx2341x_hdl_api(hdl, CX2341X_ENC_MISC, 2, 7, val); + + case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: + /* audio properties cluster */ + props = (hdl->audio_sampling_freq->val << 0) | + (hdl->audio_mode->val << 8) | + (hdl->audio_mode_extension->val << 10) | + (hdl->audio_crc->val << 14); + if (hdl->audio_emphasis->val == V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17) + props |= 3 << 12; + else + props |= hdl->audio_emphasis->val << 12; + + if (hdl->audio_encoding->val == V4L2_MPEG_AUDIO_ENCODING_AC3) { + props |= +#if 1 + /* Not sure if this MPEG Layer II setting is required */ + ((3 - V4L2_MPEG_AUDIO_ENCODING_LAYER_2) << 2) | +#endif + (hdl->audio_ac3_bitrate->val << 4) | + (CX2341X_AUDIO_ENCODING_METHOD_AC3 << 28); + } else { + /* Assuming MPEG Layer II */ + props |= + ((3 - hdl->audio_encoding->val) << 2) | + ((1 + hdl->audio_l2_bitrate->val) << 4); + } + err = cx2341x_hdl_api(hdl, + CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, props); + if (err) + return err; + + hdl->audio_properties = props; + if (hdl->audio_ac3_bitrate) { + int is_ac3 = hdl->audio_encoding->val == + V4L2_MPEG_AUDIO_ENCODING_AC3; + + v4l2_ctrl_activate(hdl->audio_ac3_bitrate, is_ac3); + v4l2_ctrl_activate(hdl->audio_l2_bitrate, !is_ac3); + } + v4l2_ctrl_activate(hdl->audio_mode_extension, + hdl->audio_mode->val == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO); + if (cx2341x_neq(hdl->audio_sampling_freq) && + hdl->ops && hdl->ops->s_audio_sampling_freq) + return hdl->ops->s_audio_sampling_freq(hdl, hdl->audio_sampling_freq->val); + if (cx2341x_neq(hdl->audio_mode) && + hdl->ops && hdl->ops->s_audio_mode) + return hdl->ops->s_audio_mode(hdl, hdl->audio_mode->val); + return 0; + + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + /* video gop cluster */ + return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_PROPERTIES, 2, + hdl->video_gop_size->val, + hdl->video_b_frames->val + 1); + + case V4L2_CID_MPEG_STREAM_TYPE: + /* stream type cluster */ + err = cx2341x_hdl_api(hdl, + CX2341X_ENC_SET_STREAM_TYPE, 1, mpeg_stream_type[val]); + if (err) + return err; + + err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_BIT_RATE, 5, + hdl->video_bitrate_mode->val, + hdl->video_bitrate->val, + hdl->video_bitrate_peak->val / 400, 0, 0); + if (err) + return err; + + v4l2_ctrl_activate(hdl->video_bitrate_mode, + hdl->video_encoding->val != V4L2_MPEG_VIDEO_ENCODING_MPEG_1); + v4l2_ctrl_activate(hdl->video_bitrate_peak, + hdl->video_bitrate_mode->val != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); + if (cx2341x_neq(hdl->video_encoding) && + hdl->ops && hdl->ops->s_video_encoding) + return hdl->ops->s_video_encoding(hdl, hdl->video_encoding->val); + return 0; + + case V4L2_CID_MPEG_VIDEO_MUTE: + /* video mute cluster */ + return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_VIDEO, 1, + hdl->video_mute->val | + (hdl->video_mute_yuv->val << 8)); + + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: { + int active_filter; + + /* video filter mode */ + err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_MODE, 2, + hdl->video_spatial_filter_mode->val | + (hdl->video_temporal_filter_mode->val << 1), + hdl->video_median_filter_type->val); + if (err) + return err; + + active_filter = hdl->video_spatial_filter_mode->val != + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO; + v4l2_ctrl_activate(hdl->video_spatial_filter, active_filter); + v4l2_ctrl_activate(hdl->video_luma_spatial_filter_type, active_filter); + v4l2_ctrl_activate(hdl->video_chroma_spatial_filter_type, active_filter); + active_filter = hdl->video_temporal_filter_mode->val != + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO; + v4l2_ctrl_activate(hdl->video_temporal_filter, active_filter); + active_filter = hdl->video_median_filter_type->val != + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF; + v4l2_ctrl_activate(hdl->video_luma_median_filter_bottom, active_filter); + v4l2_ctrl_activate(hdl->video_luma_median_filter_top, active_filter); + v4l2_ctrl_activate(hdl->video_chroma_median_filter_bottom, active_filter); + v4l2_ctrl_activate(hdl->video_chroma_median_filter_top, active_filter); + return 0; + } + + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: + /* video filter type cluster */ + return cx2341x_hdl_api(hdl, + CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, 2, + hdl->video_luma_spatial_filter_type->val, + hdl->video_chroma_spatial_filter_type->val); + + case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: + /* video filter cluster */ + return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_PROPS, 2, + hdl->video_spatial_filter->val, + hdl->video_temporal_filter->val); + + case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: + /* video median cluster */ + return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_CORING_LEVELS, 4, + hdl->video_luma_median_filter_bottom->val, + hdl->video_luma_median_filter_top->val, + hdl->video_chroma_median_filter_bottom->val, + hdl->video_chroma_median_filter_top->val); + } + return -EINVAL; +} + +static const struct v4l2_ctrl_ops cx2341x_ops = { + .try_ctrl = cx2341x_try_ctrl, + .s_ctrl = cx2341x_s_ctrl, +}; + +static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, + u32 id, s32 min, s32 max, s32 step, s32 def) +{ + struct v4l2_ctrl_config cfg; + + memset(&cfg, 0, sizeof(cfg)); + cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags); + cfg.ops = &cx2341x_ops; + cfg.id = id; + cfg.min = min; + cfg.max = max; + cfg.def = def; + if (cfg.type == V4L2_CTRL_TYPE_MENU) { + cfg.step = 0; + cfg.menu_skip_mask = step; + cfg.qmenu = cx2341x_get_menu(id); + } else { + cfg.step = step; + cfg.menu_skip_mask = 0; + } + return v4l2_ctrl_new_custom(hdl, &cfg, NULL); +} + +static struct v4l2_ctrl *cx2341x_ctrl_new_std(struct v4l2_ctrl_handler *hdl, + u32 id, s32 min, s32 max, s32 step, s32 def) +{ + return v4l2_ctrl_new_std(hdl, &cx2341x_ops, id, min, max, step, def); +} + +static struct v4l2_ctrl *cx2341x_ctrl_new_menu(struct v4l2_ctrl_handler *hdl, + u32 id, s32 max, s32 mask, s32 def) +{ + return v4l2_ctrl_new_std_menu(hdl, &cx2341x_ops, id, max, mask, def); +} + +int cx2341x_handler_init(struct cx2341x_handler *cxhdl, + unsigned nr_of_controls_hint) +{ + struct v4l2_ctrl_handler *hdl = &cxhdl->hdl; + u32 caps = cxhdl->capabilities; + int has_sliced_vbi = caps & CX2341X_CAP_HAS_SLICED_VBI; + int has_ac3 = caps & CX2341X_CAP_HAS_AC3; + int has_ts = caps & CX2341X_CAP_HAS_TS; + + cxhdl->width = 720; + cxhdl->height = 480; + + v4l2_ctrl_handler_init(hdl, nr_of_controls_hint); + + /* Add controls in ascending control ID order for fastest + insertion time. */ + cxhdl->stream_type = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_STREAM_TYPE, + V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD, has_ts ? 0 : 2, + V4L2_MPEG_STREAM_TYPE_MPEG2_PS); + cxhdl->stream_vbi_fmt = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_STREAM_VBI_FMT, + V4L2_MPEG_STREAM_VBI_FMT_IVTV, has_sliced_vbi ? 0 : 2, + V4L2_MPEG_STREAM_VBI_FMT_NONE); + cxhdl->audio_sampling_freq = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ, + V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000, 0, + V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000); + cxhdl->audio_encoding = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_AUDIO_ENCODING, + V4L2_MPEG_AUDIO_ENCODING_AC3, has_ac3 ? ~0x12 : ~0x2, + V4L2_MPEG_AUDIO_ENCODING_LAYER_2); + cxhdl->audio_l2_bitrate = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_AUDIO_L2_BITRATE, + V4L2_MPEG_AUDIO_L2_BITRATE_384K, 0x1ff, + V4L2_MPEG_AUDIO_L2_BITRATE_224K); + cxhdl->audio_mode = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_AUDIO_MODE, + V4L2_MPEG_AUDIO_MODE_MONO, 0, + V4L2_MPEG_AUDIO_MODE_STEREO); + cxhdl->audio_mode_extension = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_AUDIO_MODE_EXTENSION, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16, 0, + V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4); + cxhdl->audio_emphasis = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_AUDIO_EMPHASIS, + V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17, 0, + V4L2_MPEG_AUDIO_EMPHASIS_NONE); + cxhdl->audio_crc = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_AUDIO_CRC, + V4L2_MPEG_AUDIO_CRC_CRC16, 0, + V4L2_MPEG_AUDIO_CRC_NONE); + + cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_AUDIO_MUTE, 0, 1, 1, 0); + if (has_ac3) + cxhdl->audio_ac3_bitrate = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_AUDIO_AC3_BITRATE, + V4L2_MPEG_AUDIO_AC3_BITRATE_448K, 0x03, + V4L2_MPEG_AUDIO_AC3_BITRATE_224K); + cxhdl->video_encoding = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_VIDEO_ENCODING, + V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 0, + V4L2_MPEG_VIDEO_ENCODING_MPEG_2); + cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_VIDEO_ASPECT, + V4L2_MPEG_VIDEO_ASPECT_221x100, 0, + V4L2_MPEG_VIDEO_ASPECT_4x3); + cxhdl->video_b_frames = cx2341x_ctrl_new_std(hdl, + V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 33, 1, 2); + cxhdl->video_gop_size = cx2341x_ctrl_new_std(hdl, + V4L2_CID_MPEG_VIDEO_GOP_SIZE, + 1, 34, 1, cxhdl->is_50hz ? 12 : 15); + cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, 0, 1, 1, 1); + cxhdl->video_bitrate_mode = cx2341x_ctrl_new_menu(hdl, + V4L2_CID_MPEG_VIDEO_BITRATE_MODE, + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0, + V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); + cxhdl->video_bitrate = cx2341x_ctrl_new_std(hdl, + V4L2_CID_MPEG_VIDEO_BITRATE, + 0, 27000000, 1, 6000000); + cxhdl->video_bitrate_peak = cx2341x_ctrl_new_std(hdl, + V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, + 0, 27000000, 1, 8000000); + cx2341x_ctrl_new_std(hdl, + V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, 0, 255, 1, 0); + cxhdl->video_mute = cx2341x_ctrl_new_std(hdl, + V4L2_CID_MPEG_VIDEO_MUTE, 0, 1, 1, 0); + cxhdl->video_mute_yuv = cx2341x_ctrl_new_std(hdl, + V4L2_CID_MPEG_VIDEO_MUTE_YUV, 0, 0xffffff, 1, 0x008080); + + /* CX23415/6 specific */ + cxhdl->video_spatial_filter_mode = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE, + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO, 0, + V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL); + cxhdl->video_spatial_filter = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER, + 0, 15, 1, 0); + cxhdl->video_luma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE, + 0, + V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR); + cxhdl->video_chroma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE, + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF, + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR, + 0, + V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR); + cxhdl->video_temporal_filter_mode = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE, + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL, + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO, + 0, + V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL); + cxhdl->video_temporal_filter = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER, + 0, 31, 1, 8); + cxhdl->video_median_filter_type = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG, + 0, + V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF); + cxhdl->video_luma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM, + 0, 255, 1, 0); + cxhdl->video_luma_median_filter_top = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP, + 0, 255, 1, 255); + cxhdl->video_chroma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM, + 0, 255, 1, 0); + cxhdl->video_chroma_median_filter_top = cx2341x_ctrl_new_custom(hdl, + V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP, + 0, 255, 1, 255); + cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS, + 0, 1, 1, 0); + + if (hdl->error) { + int err = hdl->error; + + v4l2_ctrl_handler_free(hdl); + return err; + } + + v4l2_ctrl_cluster(8, &cxhdl->audio_sampling_freq); + v4l2_ctrl_cluster(2, &cxhdl->video_b_frames); + v4l2_ctrl_cluster(5, &cxhdl->stream_type); + v4l2_ctrl_cluster(2, &cxhdl->video_mute); + v4l2_ctrl_cluster(3, &cxhdl->video_spatial_filter_mode); + v4l2_ctrl_cluster(2, &cxhdl->video_luma_spatial_filter_type); + v4l2_ctrl_cluster(2, &cxhdl->video_spatial_filter); + v4l2_ctrl_cluster(4, &cxhdl->video_luma_median_filter_top); + + return 0; +} +EXPORT_SYMBOL(cx2341x_handler_init); + +void cx2341x_handler_set_50hz(struct cx2341x_handler *cxhdl, int is_50hz) +{ + cxhdl->is_50hz = is_50hz; + cxhdl->video_gop_size->default_value = cxhdl->is_50hz ? 12 : 15; +} +EXPORT_SYMBOL(cx2341x_handler_set_50hz); + +int cx2341x_handler_setup(struct cx2341x_handler *cxhdl) +{ + int h = cxhdl->height; + int w = cxhdl->width; + int err; + + err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_OUTPUT_PORT, 2, cxhdl->port, 0); + if (err) + return err; + err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_RATE, 1, cxhdl->is_50hz); + if (err) + return err; + + if (v4l2_ctrl_g_ctrl(cxhdl->video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) { + w /= 2; + h /= 2; + } + err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_SIZE, 2, h, w); + if (err) + return err; + return v4l2_ctrl_handler_setup(&cxhdl->hdl); +} +EXPORT_SYMBOL(cx2341x_handler_setup); + +void cx2341x_handler_set_busy(struct cx2341x_handler *cxhdl, int busy) +{ + v4l2_ctrl_grab(cxhdl->audio_sampling_freq, busy); + v4l2_ctrl_grab(cxhdl->audio_encoding, busy); + v4l2_ctrl_grab(cxhdl->audio_l2_bitrate, busy); + v4l2_ctrl_grab(cxhdl->audio_ac3_bitrate, busy); + v4l2_ctrl_grab(cxhdl->stream_vbi_fmt, busy); + v4l2_ctrl_grab(cxhdl->stream_type, busy); + v4l2_ctrl_grab(cxhdl->video_bitrate_mode, busy); + v4l2_ctrl_grab(cxhdl->video_bitrate, busy); + v4l2_ctrl_grab(cxhdl->video_bitrate_peak, busy); +} +EXPORT_SYMBOL(cx2341x_handler_set_busy); diff --git a/drivers/media/common/cypress_firmware.c b/drivers/media/common/cypress_firmware.c new file mode 100644 index 0000000000..cdc7050ed3 --- /dev/null +++ b/drivers/media/common/cypress_firmware.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* cypress_firmware.c is part of the DVB USB library. + * + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) + * see dvb-usb-init.c for copyright information. + * + * This file contains functions for downloading the firmware to Cypress FX 1 + * and 2 based devices. + * + */ + +#include +#include +#include +#include +#include "cypress_firmware.h" + +struct usb_cypress_controller { + u8 id; + const char *name; /* name of the usb controller */ + u16 cs_reg; /* needs to be restarted, + * when the firmware has been downloaded */ +}; + +static const struct usb_cypress_controller cypress[] = { + { .id = CYPRESS_AN2135, .name = "Cypress AN2135", .cs_reg = 0x7f92 }, + { .id = CYPRESS_AN2235, .name = "Cypress AN2235", .cs_reg = 0x7f92 }, + { .id = CYPRESS_FX2, .name = "Cypress FX2", .cs_reg = 0xe600 }, +}; + +/* + * load a firmware packet to the device + */ +static int usb_cypress_writemem(struct usb_device *udev, u16 addr, u8 *data, + u8 len) +{ + return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + 0xa0, USB_TYPE_VENDOR, addr, 0x00, data, len, 5000); +} + +static int cypress_get_hexline(const struct firmware *fw, + struct hexline *hx, int *pos) +{ + u8 *b = (u8 *) &fw->data[*pos]; + int data_offs = 4; + + if (*pos >= fw->size) + return 0; + + memset(hx, 0, sizeof(struct hexline)); + hx->len = b[0]; + + if ((*pos + hx->len + 4) >= fw->size) + return -EINVAL; + + hx->addr = b[1] | (b[2] << 8); + hx->type = b[3]; + + if (hx->type == 0x04) { + /* b[4] and b[5] are the Extended linear address record data + * field */ + hx->addr |= (b[4] << 24) | (b[5] << 16); + } + + memcpy(hx->data, &b[data_offs], hx->len); + hx->chk = b[hx->len + data_offs]; + *pos += hx->len + 5; + + return *pos; +} + +int cypress_load_firmware(struct usb_device *udev, + const struct firmware *fw, int type) +{ + struct hexline *hx; + int ret, pos = 0; + + hx = kmalloc(sizeof(*hx), GFP_KERNEL); + if (!hx) + return -ENOMEM; + + /* stop the CPU */ + hx->data[0] = 1; + ret = usb_cypress_writemem(udev, cypress[type].cs_reg, hx->data, 1); + if (ret != 1) { + dev_err(&udev->dev, "%s: CPU stop failed=%d\n", + KBUILD_MODNAME, ret); + ret = -EIO; + goto err_kfree; + } + + /* write firmware to memory */ + for (;;) { + ret = cypress_get_hexline(fw, hx, &pos); + if (ret < 0) + goto err_kfree; + else if (ret == 0) + break; + + ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len); + if (ret < 0) { + goto err_kfree; + } else if (ret != hx->len) { + dev_err(&udev->dev, + "%s: error while transferring firmware (transferred size=%d, block size=%d)\n", + KBUILD_MODNAME, ret, hx->len); + ret = -EIO; + goto err_kfree; + } + } + + /* start the CPU */ + hx->data[0] = 0; + ret = usb_cypress_writemem(udev, cypress[type].cs_reg, hx->data, 1); + if (ret != 1) { + dev_err(&udev->dev, "%s: CPU start failed=%d\n", + KBUILD_MODNAME, ret); + ret = -EIO; + goto err_kfree; + } + + ret = 0; +err_kfree: + kfree(hx); + return ret; +} +EXPORT_SYMBOL(cypress_load_firmware); + +MODULE_AUTHOR("Antti Palosaari "); +MODULE_DESCRIPTION("Cypress firmware download"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/common/cypress_firmware.h b/drivers/media/common/cypress_firmware.h new file mode 100644 index 0000000000..0aa46e602b --- /dev/null +++ b/drivers/media/common/cypress_firmware.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) + * see dvb-usb-init.c for copyright information. + * + * This file contains functions for downloading the firmware to Cypress FX 1 + * and 2 based devices. + * + */ + +#ifndef CYPRESS_FIRMWARE_H +#define CYPRESS_FIRMWARE_H + +#define CYPRESS_AN2135 0 +#define CYPRESS_AN2235 1 +#define CYPRESS_FX2 2 + +/* commonly used firmware download types and function */ +struct hexline { + u8 len; + u32 addr; + u8 type; + u8 data[255]; + u8 chk; +}; + +int cypress_load_firmware(struct usb_device *, const struct firmware *, int); + +#endif diff --git a/drivers/media/common/saa7146/Kconfig b/drivers/media/common/saa7146/Kconfig new file mode 100644 index 0000000000..dfec86e50d --- /dev/null +++ b/drivers/media/common/saa7146/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +config VIDEO_SAA7146 + tristate + depends on I2C && PCI + +config VIDEO_SAA7146_VV + tristate + depends on VIDEO_DEV + select VIDEOBUF2_DMA_SG + select VIDEO_SAA7146 diff --git a/drivers/media/common/saa7146/Makefile b/drivers/media/common/saa7146/Makefile new file mode 100644 index 0000000000..2a6337feae --- /dev/null +++ b/drivers/media/common/saa7146/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +saa7146-objs := saa7146_i2c.o saa7146_core.o +saa7146_vv-objs := saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o + +obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o +obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c new file mode 100644 index 0000000000..27c53eed8f --- /dev/null +++ b/drivers/media/common/saa7146/saa7146_core.c @@ -0,0 +1,566 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + saa7146.o - driver for generic saa7146-based hardware + + Copyright (C) 1998-2003 Michael Hunold + +*/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +static int saa7146_num; + +unsigned int saa7146_debug; + +module_param(saa7146_debug, uint, 0644); +MODULE_PARM_DESC(saa7146_debug, "debug level (default: 0)"); + +#if 0 +static void dump_registers(struct saa7146_dev* dev) +{ + int i = 0; + + pr_info(" @ %li jiffies:\n", jiffies); + for (i = 0; i <= 0x148; i += 4) + pr_info("0x%03x: 0x%08x\n", i, saa7146_read(dev, i)); +} +#endif + +/**************************************************************************** + * gpio and debi helper functions + ****************************************************************************/ + +void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data) +{ + u32 value = 0; + + if (WARN_ON(port > 3)) + return; + + value = saa7146_read(dev, GPIO_CTRL); + value &= ~(0xff << (8*port)); + value |= (data << (8*port)); + saa7146_write(dev, GPIO_CTRL, value); +} + +/* This DEBI code is based on the saa7146 Stradis driver by Nathan Laredo */ +static inline int saa7146_wait_for_debi_done_sleep(struct saa7146_dev *dev, + unsigned long us1, unsigned long us2) +{ + unsigned long timeout; + int err; + + /* wait for registers to be programmed */ + timeout = jiffies + usecs_to_jiffies(us1); + while (1) { + err = time_after(jiffies, timeout); + if (saa7146_read(dev, MC2) & 2) + break; + if (err) { + pr_debug("%s: %s timed out while waiting for registers getting programmed\n", + dev->name, __func__); + return -ETIMEDOUT; + } + msleep(1); + } + + /* wait for transfer to complete */ + timeout = jiffies + usecs_to_jiffies(us2); + while (1) { + err = time_after(jiffies, timeout); + if (!(saa7146_read(dev, PSR) & SPCI_DEBI_S)) + break; + saa7146_read(dev, MC2); + if (err) { + DEB_S("%s: %s timed out while waiting for transfer completion\n", + dev->name, __func__); + return -ETIMEDOUT; + } + msleep(1); + } + + return 0; +} + +static inline int saa7146_wait_for_debi_done_busyloop(struct saa7146_dev *dev, + unsigned long us1, unsigned long us2) +{ + unsigned long loops; + + /* wait for registers to be programmed */ + loops = us1; + while (1) { + if (saa7146_read(dev, MC2) & 2) + break; + if (!loops--) { + pr_err("%s: %s timed out while waiting for registers getting programmed\n", + dev->name, __func__); + return -ETIMEDOUT; + } + udelay(1); + } + + /* wait for transfer to complete */ + loops = us2 / 5; + while (1) { + if (!(saa7146_read(dev, PSR) & SPCI_DEBI_S)) + break; + saa7146_read(dev, MC2); + if (!loops--) { + DEB_S("%s: %s timed out while waiting for transfer completion\n", + dev->name, __func__); + return -ETIMEDOUT; + } + udelay(5); + } + + return 0; +} + +int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop) +{ + if (nobusyloop) + return saa7146_wait_for_debi_done_sleep(dev, 50000, 250000); + else + return saa7146_wait_for_debi_done_busyloop(dev, 50000, 250000); +} + +/**************************************************************************** + * general helper functions + ****************************************************************************/ + +/* this is videobuf_vmalloc_to_sg() from videobuf-dma-sg.c + make sure virt has been allocated with vmalloc_32(), otherwise return NULL + on highmem machines */ +static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages) +{ + struct scatterlist *sglist; + struct page *pg; + int i; + + sglist = kmalloc_array(nr_pages, sizeof(struct scatterlist), GFP_KERNEL); + if (NULL == sglist) + return NULL; + sg_init_table(sglist, nr_pages); + for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { + pg = vmalloc_to_page(virt); + if (NULL == pg) + goto err; + if (WARN_ON(PageHighMem(pg))) + goto err; + sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); + } + return sglist; + + err: + kfree(sglist); + return NULL; +} + +/********************************************************************************/ +/* common page table functions */ + +void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt) +{ + int pages = (length+PAGE_SIZE-1)/PAGE_SIZE; + void *mem = vmalloc_32(length); + int slen = 0; + + if (NULL == mem) + goto err_null; + + if (!(pt->slist = vmalloc_to_sg(mem, pages))) + goto err_free_mem; + + if (saa7146_pgtable_alloc(pci, pt)) + goto err_free_slist; + + pt->nents = pages; + slen = dma_map_sg(&pci->dev, pt->slist, pt->nents, DMA_FROM_DEVICE); + if (0 == slen) + goto err_free_pgtable; + + if (0 != saa7146_pgtable_build_single(pci, pt, pt->slist, slen)) + goto err_unmap_sg; + + return mem; + +err_unmap_sg: + dma_unmap_sg(&pci->dev, pt->slist, pt->nents, DMA_FROM_DEVICE); +err_free_pgtable: + saa7146_pgtable_free(pci, pt); +err_free_slist: + kfree(pt->slist); + pt->slist = NULL; +err_free_mem: + vfree(mem); +err_null: + return NULL; +} + +void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt) +{ + dma_unmap_sg(&pci->dev, pt->slist, pt->nents, DMA_FROM_DEVICE); + saa7146_pgtable_free(pci, pt); + kfree(pt->slist); + pt->slist = NULL; + vfree(mem); +} + +void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt) +{ + if (NULL == pt->cpu) + return; + dma_free_coherent(&pci->dev, pt->size, pt->cpu, pt->dma); + pt->cpu = NULL; +} + +int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt) +{ + __le32 *cpu; + dma_addr_t dma_addr = 0; + + cpu = dma_alloc_coherent(&pci->dev, PAGE_SIZE, &dma_addr, GFP_KERNEL); + if (NULL == cpu) { + return -ENOMEM; + } + pt->size = PAGE_SIZE; + pt->cpu = cpu; + pt->dma = dma_addr; + + return 0; +} + +int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, + struct scatterlist *list, int sglen) +{ + struct sg_dma_page_iter dma_iter; + __le32 *ptr, fill; + int nr_pages = 0; + int i; + + if (WARN_ON(!sglen) || + WARN_ON(list->offset > PAGE_SIZE)) + return -EIO; + + /* if we have a user buffer, the first page may not be + aligned to a page boundary. */ + pt->offset = list->offset; + + ptr = pt->cpu; + for_each_sg_dma_page(list, &dma_iter, sglen, 0) { + *ptr++ = cpu_to_le32(sg_page_iter_dma_address(&dma_iter)); + nr_pages++; + } + + + /* safety; fill the page table up with the last valid page */ + fill = *(ptr-1); + for (i = nr_pages; i < 1024; i++) + *ptr++ = fill; + return 0; +} + +/********************************************************************************/ +/* interrupt handler */ +static irqreturn_t interrupt_hw(int irq, void *dev_id) +{ + struct saa7146_dev *dev = dev_id; + u32 isr; + u32 ack_isr; + + /* read out the interrupt status register */ + ack_isr = isr = saa7146_read(dev, ISR); + + /* is this our interrupt? */ + if ( 0 == isr ) { + /* nope, some other device */ + return IRQ_NONE; + } + + if (dev->ext) { + if (dev->ext->irq_mask & isr) { + if (dev->ext->irq_func) + dev->ext->irq_func(dev, &isr); + isr &= ~dev->ext->irq_mask; + } + } + if (0 != (isr & (MASK_27))) { + DEB_INT("irq: RPS0 (0x%08x)\n", isr); + if (dev->vv_data && dev->vv_callback) + dev->vv_callback(dev,isr); + isr &= ~MASK_27; + } + if (0 != (isr & (MASK_28))) { + if (dev->vv_data && dev->vv_callback) + dev->vv_callback(dev,isr); + isr &= ~MASK_28; + } + if (0 != (isr & (MASK_16|MASK_17))) { + SAA7146_IER_DISABLE(dev, MASK_16|MASK_17); + /* only wake up if we expect something */ + if (0 != dev->i2c_op) { + dev->i2c_op = 0; + wake_up(&dev->i2c_wq); + } else { + u32 psr = saa7146_read(dev, PSR); + u32 ssr = saa7146_read(dev, SSR); + pr_warn("%s: unexpected i2c irq: isr %08x psr %08x ssr %08x\n", + dev->name, isr, psr, ssr); + } + isr &= ~(MASK_16|MASK_17); + } + if( 0 != isr ) { + ERR("warning: interrupt enabled, but not handled properly.(0x%08x)\n", + isr); + ERR("disabling interrupt source(s)!\n"); + SAA7146_IER_DISABLE(dev,isr); + } + saa7146_write(dev, ISR, ack_isr); + return IRQ_HANDLED; +} + +/*********************************************************************************/ +/* configuration-functions */ + +static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent) +{ + struct saa7146_pci_extension_data *pci_ext = (struct saa7146_pci_extension_data *)ent->driver_data; + struct saa7146_extension *ext = pci_ext->ext; + struct saa7146_dev *dev; + int err = -ENOMEM; + + /* clear out mem for sure */ + dev = kzalloc(sizeof(struct saa7146_dev), GFP_KERNEL); + if (!dev) { + ERR("out of memory\n"); + goto out; + } + + /* create a nice device name */ + sprintf(dev->name, "saa7146 (%d)", saa7146_num); + + DEB_EE("pci:%p\n", pci); + + err = pci_enable_device(pci); + if (err < 0) { + ERR("pci_enable_device() failed\n"); + goto err_free; + } + + /* enable bus-mastering */ + pci_set_master(pci); + + dev->pci = pci; + + /* get chip-revision; this is needed to enable bug-fixes */ + dev->revision = pci->revision; + + /* remap the memory from virtual to physical address */ + + err = pci_request_region(pci, 0, "saa7146"); + if (err < 0) + goto err_disable; + + dev->mem = ioremap(pci_resource_start(pci, 0), + pci_resource_len(pci, 0)); + if (!dev->mem) { + ERR("ioremap() failed\n"); + err = -ENODEV; + goto err_release; + } + + /* we don't do a master reset here anymore, it screws up + some boards that don't have an i2c-eeprom for configuration + values */ +/* + saa7146_write(dev, MC1, MASK_31); +*/ + + /* disable all irqs */ + saa7146_write(dev, IER, 0); + + /* shut down all dma transfers and rps tasks */ + saa7146_write(dev, MC1, 0x30ff0000); + + /* clear out any rps-signals pending */ + saa7146_write(dev, MC2, 0xf8000000); + + /* request an interrupt for the saa7146 */ + err = request_irq(pci->irq, interrupt_hw, IRQF_SHARED, + dev->name, dev); + if (err < 0) { + ERR("request_irq() failed\n"); + goto err_unmap; + } + + err = -ENOMEM; + + /* get memory for various stuff */ + dev->d_rps0.cpu_addr = dma_alloc_coherent(&pci->dev, SAA7146_RPS_MEM, + &dev->d_rps0.dma_handle, + GFP_KERNEL); + if (!dev->d_rps0.cpu_addr) + goto err_free_irq; + + dev->d_rps1.cpu_addr = dma_alloc_coherent(&pci->dev, SAA7146_RPS_MEM, + &dev->d_rps1.dma_handle, + GFP_KERNEL); + if (!dev->d_rps1.cpu_addr) + goto err_free_rps0; + + dev->d_i2c.cpu_addr = dma_alloc_coherent(&pci->dev, SAA7146_RPS_MEM, + &dev->d_i2c.dma_handle, GFP_KERNEL); + if (!dev->d_i2c.cpu_addr) + goto err_free_rps1; + + /* the rest + print status message */ + + pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n", + dev->mem, dev->revision, pci->irq, + pci->subsystem_vendor, pci->subsystem_device); + dev->ext = ext; + + mutex_init(&dev->v4l2_lock); + spin_lock_init(&dev->int_slock); + spin_lock_init(&dev->slock); + + mutex_init(&dev->i2c_lock); + + dev->module = THIS_MODULE; + init_waitqueue_head(&dev->i2c_wq); + + /* set some sane pci arbitrition values */ + saa7146_write(dev, PCI_BT_V1, 0x1c00101f); + + /* TODO: use the status code of the callback */ + + err = -ENODEV; + + if (ext->probe && ext->probe(dev)) { + DEB_D("ext->probe() failed for %p. skipping device.\n", dev); + goto err_free_i2c; + } + + if (ext->attach(dev, pci_ext)) { + DEB_D("ext->attach() failed for %p. skipping device.\n", dev); + goto err_free_i2c; + } + /* V4L extensions will set the pci drvdata to the v4l2_device in the + attach() above. So for those cards that do not use V4L we have to + set it explicitly. */ + pci_set_drvdata(pci, &dev->v4l2_dev); + + saa7146_num++; + + err = 0; +out: + return err; + +err_free_i2c: + dma_free_coherent(&pci->dev, SAA7146_RPS_MEM, dev->d_i2c.cpu_addr, + dev->d_i2c.dma_handle); +err_free_rps1: + dma_free_coherent(&pci->dev, SAA7146_RPS_MEM, dev->d_rps1.cpu_addr, + dev->d_rps1.dma_handle); +err_free_rps0: + dma_free_coherent(&pci->dev, SAA7146_RPS_MEM, dev->d_rps0.cpu_addr, + dev->d_rps0.dma_handle); +err_free_irq: + free_irq(pci->irq, (void *)dev); +err_unmap: + iounmap(dev->mem); +err_release: + pci_release_region(pci, 0); +err_disable: + pci_disable_device(pci); +err_free: + kfree(dev); + goto out; +} + +static void saa7146_remove_one(struct pci_dev *pdev) +{ + struct v4l2_device *v4l2_dev = pci_get_drvdata(pdev); + struct saa7146_dev *dev = to_saa7146_dev(v4l2_dev); + struct { + void *addr; + dma_addr_t dma; + } dev_map[] = { + { dev->d_i2c.cpu_addr, dev->d_i2c.dma_handle }, + { dev->d_rps1.cpu_addr, dev->d_rps1.dma_handle }, + { dev->d_rps0.cpu_addr, dev->d_rps0.dma_handle }, + { NULL, 0 } + }, *p; + + DEB_EE("dev:%p\n", dev); + + dev->ext->detach(dev); + + /* shut down all video dma transfers */ + saa7146_write(dev, MC1, 0x00ff0000); + + /* disable all irqs, release irq-routine */ + saa7146_write(dev, IER, 0); + + free_irq(pdev->irq, dev); + + for (p = dev_map; p->addr; p++) + dma_free_coherent(&pdev->dev, SAA7146_RPS_MEM, p->addr, + p->dma); + + iounmap(dev->mem); + pci_release_region(pdev, 0); + pci_disable_device(pdev); + kfree(dev); + + saa7146_num--; +} + +/*********************************************************************************/ +/* extension handling functions */ + +int saa7146_register_extension(struct saa7146_extension* ext) +{ + DEB_EE("ext:%p\n", ext); + + ext->driver.name = ext->name; + ext->driver.id_table = ext->pci_tbl; + ext->driver.probe = saa7146_init_one; + ext->driver.remove = saa7146_remove_one; + + pr_info("register extension '%s'\n", ext->name); + return pci_register_driver(&ext->driver); +} + +int saa7146_unregister_extension(struct saa7146_extension* ext) +{ + DEB_EE("ext:%p\n", ext); + pr_info("unregister extension '%s'\n", ext->name); + pci_unregister_driver(&ext->driver); + return 0; +} + +EXPORT_SYMBOL_GPL(saa7146_register_extension); +EXPORT_SYMBOL_GPL(saa7146_unregister_extension); + +/* misc functions used by extension modules */ +EXPORT_SYMBOL_GPL(saa7146_pgtable_alloc); +EXPORT_SYMBOL_GPL(saa7146_pgtable_free); +EXPORT_SYMBOL_GPL(saa7146_pgtable_build_single); +EXPORT_SYMBOL_GPL(saa7146_vmalloc_build_pgtable); +EXPORT_SYMBOL_GPL(saa7146_vfree_destroy_pgtable); +EXPORT_SYMBOL_GPL(saa7146_wait_for_debi_done); + +EXPORT_SYMBOL_GPL(saa7146_setgpio); + +EXPORT_SYMBOL_GPL(saa7146_i2c_adapter_prepare); + +EXPORT_SYMBOL_GPL(saa7146_debug); + +MODULE_AUTHOR("Michael Hunold "); +MODULE_DESCRIPTION("driver for generic saa7146-based hardware"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c new file mode 100644 index 0000000000..7921445938 --- /dev/null +++ b/drivers/media/common/saa7146/saa7146_fops.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +/****************************************************************************/ +/* resource management functions, shamelessly stolen from saa7134 driver */ + +int saa7146_res_get(struct saa7146_dev *dev, unsigned int bit) +{ + struct saa7146_vv *vv = dev->vv_data; + + if (vv->resources & bit) { + DEB_D("already allocated! want: 0x%02x, cur:0x%02x\n", + bit, vv->resources); + /* have it already allocated */ + return 1; + } + + /* is it free? */ + if (vv->resources & bit) { + DEB_D("locked! vv->resources:0x%02x, we want:0x%02x\n", + vv->resources, bit); + /* no, someone else uses it */ + return 0; + } + /* it's free, grab it */ + vv->resources |= bit; + DEB_D("res: get 0x%02x, cur:0x%02x\n", bit, vv->resources); + return 1; +} + +void saa7146_res_free(struct saa7146_dev *dev, unsigned int bits) +{ + struct saa7146_vv *vv = dev->vv_data; + + WARN_ON((vv->resources & bits) != bits); + + vv->resources &= ~bits; + DEB_D("res: put 0x%02x, cur:0x%02x\n", bits, vv->resources); +} + + +/********************************************************************************/ +/* common buffer functions */ + +int saa7146_buffer_queue(struct saa7146_dev *dev, + struct saa7146_dmaqueue *q, + struct saa7146_buf *buf) +{ + assert_spin_locked(&dev->slock); + DEB_EE("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf); + + if (WARN_ON(!q)) + return -EIO; + + if (NULL == q->curr) { + q->curr = buf; + DEB_D("immediately activating buffer %p\n", buf); + buf->activate(dev,buf,NULL); + } else { + list_add_tail(&buf->list, &q->queue); + DEB_D("adding buffer %p to queue. (active buffer present)\n", + buf); + } + return 0; +} + +void saa7146_buffer_finish(struct saa7146_dev *dev, + struct saa7146_dmaqueue *q, + int state) +{ + struct saa7146_vv *vv = dev->vv_data; + struct saa7146_buf *buf = q->curr; + + assert_spin_locked(&dev->slock); + DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state); + DEB_EE("q->curr:%p\n", q->curr); + + /* finish current buffer */ + if (!buf) { + DEB_D("aiii. no current buffer\n"); + return; + } + + q->curr = NULL; + buf->vb.vb2_buf.timestamp = ktime_get_ns(); + if (vv->video_fmt.field == V4L2_FIELD_ALTERNATE) + buf->vb.field = vv->last_field; + else if (vv->video_fmt.field == V4L2_FIELD_ANY) + buf->vb.field = (vv->video_fmt.height > vv->standard->v_max_out / 2) + ? V4L2_FIELD_INTERLACED + : V4L2_FIELD_BOTTOM; + else + buf->vb.field = vv->video_fmt.field; + buf->vb.sequence = vv->seqnr++; + vb2_buffer_done(&buf->vb.vb2_buf, state); +} + +void saa7146_buffer_next(struct saa7146_dev *dev, + struct saa7146_dmaqueue *q, int vbi) +{ + struct saa7146_buf *buf,*next = NULL; + + if (WARN_ON(!q)) + return; + + DEB_INT("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi); + + assert_spin_locked(&dev->slock); + if (!list_empty(&q->queue)) { + /* activate next one from queue */ + buf = list_entry(q->queue.next, struct saa7146_buf, list); + list_del(&buf->list); + if (!list_empty(&q->queue)) + next = list_entry(q->queue.next, struct saa7146_buf, list); + q->curr = buf; + DEB_INT("next buffer: buf:%p, prev:%p, next:%p\n", + buf, q->queue.prev, q->queue.next); + buf->activate(dev,buf,next); + } else { + DEB_INT("no next buffer. stopping.\n"); + if( 0 != vbi ) { + /* turn off video-dma3 */ + saa7146_write(dev,MC1, MASK_20); + } else { + /* nothing to do -- just prevent next video-dma1 transfer + by lowering the protection address */ + + // fixme: fix this for vflip != 0 + + saa7146_write(dev, PROT_ADDR1, 0); + saa7146_write(dev, MC2, (MASK_02|MASK_18)); + + /* write the address of the rps-program */ + saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle); + /* turn on rps */ + saa7146_write(dev, MC1, (MASK_12 | MASK_28)); + +/* + printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1)); + printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1)); + printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1)); + printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1)); + printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1)); + printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1)); +*/ + } + del_timer(&q->timeout); + } +} + +void saa7146_buffer_timeout(struct timer_list *t) +{ + struct saa7146_dmaqueue *q = from_timer(q, t, timeout); + struct saa7146_dev *dev = q->dev; + unsigned long flags; + + DEB_EE("dev:%p, dmaq:%p\n", dev, q); + + spin_lock_irqsave(&dev->slock,flags); + if (q->curr) { + DEB_D("timeout on %p\n", q->curr); + saa7146_buffer_finish(dev, q, VB2_BUF_STATE_ERROR); + } + + /* we don't restart the transfer here like other drivers do. when + a streaming capture is disabled, the timeout function will be + called for the current buffer. if we activate the next buffer now, + we mess up our capture logic. if a timeout occurs on another buffer, + then something is seriously broken before, so no need to buffer the + next capture IMHO... */ + + saa7146_buffer_next(dev, q, 0); + + spin_unlock_irqrestore(&dev->slock,flags); +} + +/********************************************************************************/ +/* file operations */ + +static ssize_t fops_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) +{ + struct video_device *vdev = video_devdata(file); + struct saa7146_dev *dev = video_drvdata(file); + int ret; + + if (vdev->vfl_type != VFL_TYPE_VBI || !dev->ext_vv_data->vbi_fops.write) + return -EINVAL; + if (mutex_lock_interruptible(vdev->lock)) + return -ERESTARTSYS; + ret = dev->ext_vv_data->vbi_fops.write(file, data, count, ppos); + mutex_unlock(vdev->lock); + return ret; +} + +static const struct v4l2_file_operations video_fops = +{ + .owner = THIS_MODULE, + .open = v4l2_fh_open, + .release = vb2_fop_release, + .read = vb2_fop_read, + .write = fops_write, + .poll = vb2_fop_poll, + .mmap = vb2_fop_mmap, + .unlocked_ioctl = video_ioctl2, +}; + +static void vv_callback(struct saa7146_dev *dev, unsigned long status) +{ + u32 isr = status; + + DEB_INT("dev:%p, isr:0x%08x\n", dev, (u32)status); + + if (0 != (isr & (MASK_27))) { + DEB_INT("irq: RPS0 (0x%08x)\n", isr); + saa7146_video_uops.irq_done(dev,isr); + } + + if (0 != (isr & (MASK_28))) { + u32 mc2 = saa7146_read(dev, MC2); + if( 0 != (mc2 & MASK_15)) { + DEB_INT("irq: RPS1 vbi workaround (0x%08x)\n", isr); + wake_up(&dev->vv_data->vbi_wq); + saa7146_write(dev,MC2, MASK_31); + return; + } + DEB_INT("irq: RPS1 (0x%08x)\n", isr); + saa7146_vbi_uops.irq_done(dev,isr); + } +} + +static const struct v4l2_ctrl_ops saa7146_ctrl_ops = { + .s_ctrl = saa7146_s_ctrl, +}; + +int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) +{ + struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler; + struct v4l2_pix_format *fmt; + struct v4l2_vbi_format *vbi; + struct saa7146_vv *vv; + int err; + + err = v4l2_device_register(&dev->pci->dev, &dev->v4l2_dev); + if (err) + return err; + + v4l2_ctrl_handler_init(hdl, 6); + v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, + V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); + v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, + V4L2_CID_CONTRAST, 0, 127, 1, 64); + v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, + V4L2_CID_SATURATION, 0, 127, 1, 64); + v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, + V4L2_CID_VFLIP, 0, 1, 1, 0); + v4l2_ctrl_new_std(hdl, &saa7146_ctrl_ops, + V4L2_CID_HFLIP, 0, 1, 1, 0); + if (hdl->error) { + err = hdl->error; + v4l2_ctrl_handler_free(hdl); + v4l2_device_unregister(&dev->v4l2_dev); + return err; + } + dev->v4l2_dev.ctrl_handler = hdl; + + vv = kzalloc(sizeof(struct saa7146_vv), GFP_KERNEL); + if (vv == NULL) { + ERR("out of memory. aborting.\n"); + v4l2_ctrl_handler_free(hdl); + v4l2_device_unregister(&dev->v4l2_dev); + return -ENOMEM; + } + ext_vv->vid_ops = saa7146_video_ioctl_ops; + ext_vv->vbi_ops = saa7146_vbi_ioctl_ops; + ext_vv->core_ops = &saa7146_video_ioctl_ops; + + DEB_EE("dev:%p\n", dev); + + /* set default values for video parts of the saa7146 */ + saa7146_write(dev, BCS_CTRL, 0x80400040); + + /* enable video-port pins */ + saa7146_write(dev, MC1, (MASK_10 | MASK_26)); + + /* save per-device extension data (one extension can + handle different devices that might need different + configuration data) */ + dev->ext_vv_data = ext_vv; + + saa7146_video_uops.init(dev,vv); + if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE) + saa7146_vbi_uops.init(dev,vv); + + fmt = &vv->video_fmt; + fmt->width = 384; + fmt->height = 288; + fmt->pixelformat = V4L2_PIX_FMT_BGR24; + fmt->field = V4L2_FIELD_INTERLACED; + fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; + fmt->bytesperline = 3 * fmt->width; + fmt->sizeimage = fmt->bytesperline * fmt->height; + + vbi = &vv->vbi_fmt; + vbi->sampling_rate = 27000000; + vbi->offset = 248; /* todo */ + vbi->samples_per_line = 720 * 2; + vbi->sample_format = V4L2_PIX_FMT_GREY; + + /* fixme: this only works for PAL */ + vbi->start[0] = 5; + vbi->count[0] = 16; + vbi->start[1] = 312; + vbi->count[1] = 16; + + timer_setup(&vv->vbi_read_timeout, NULL, 0); + + dev->vv_data = vv; + dev->vv_callback = &vv_callback; + + return 0; +} +EXPORT_SYMBOL_GPL(saa7146_vv_init); + +int saa7146_vv_release(struct saa7146_dev* dev) +{ + struct saa7146_vv *vv = dev->vv_data; + + DEB_EE("dev:%p\n", dev); + + v4l2_device_unregister(&dev->v4l2_dev); + v4l2_ctrl_handler_free(&dev->ctrl_handler); + kfree(vv); + dev->vv_data = NULL; + dev->vv_callback = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(saa7146_vv_release); + +int saa7146_register_device(struct video_device *vfd, struct saa7146_dev *dev, + char *name, int type) +{ + struct vb2_queue *q; + int err; + int i; + + DEB_EE("dev:%p, name:'%s', type:%d\n", dev, name, type); + + vfd->fops = &video_fops; + if (type == VFL_TYPE_VIDEO) { + vfd->ioctl_ops = &dev->ext_vv_data->vid_ops; + q = &dev->vv_data->video_dmaq.q; + } else { + vfd->ioctl_ops = &dev->ext_vv_data->vbi_ops; + q = &dev->vv_data->vbi_dmaq.q; + } + vfd->release = video_device_release_empty; + vfd->lock = &dev->v4l2_lock; + vfd->v4l2_dev = &dev->v4l2_dev; + vfd->tvnorms = 0; + for (i = 0; i < dev->ext_vv_data->num_stds; i++) + vfd->tvnorms |= dev->ext_vv_data->stds[i].id; + strscpy(vfd->name, name, sizeof(vfd->name)); + vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; + vfd->device_caps |= dev->ext_vv_data->capabilities; + if (type == VFL_TYPE_VIDEO) { + vfd->device_caps &= + ~(V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_OUTPUT); + } else if (vfd->device_caps & V4L2_CAP_SLICED_VBI_OUTPUT) { + vfd->vfl_dir = VFL_DIR_TX; + vfd->device_caps &= ~(V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | + V4L2_CAP_AUDIO | V4L2_CAP_TUNER); + } else { + vfd->device_caps &= ~V4L2_CAP_VIDEO_CAPTURE; + } + + q->type = type == VFL_TYPE_VIDEO ? V4L2_BUF_TYPE_VIDEO_CAPTURE : V4L2_BUF_TYPE_VBI_CAPTURE; + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; + q->ops = type == VFL_TYPE_VIDEO ? &video_qops : &vbi_qops; + q->mem_ops = &vb2_dma_sg_memops; + q->drv_priv = dev; + q->gfp_flags = __GFP_DMA32; + q->buf_struct_size = sizeof(struct saa7146_buf); + q->lock = &dev->v4l2_lock; + q->min_buffers_needed = 2; + q->dev = &dev->pci->dev; + err = vb2_queue_init(q); + if (err) + return err; + vfd->queue = q; + + video_set_drvdata(vfd, dev); + + err = video_register_device(vfd, type, -1); + if (err < 0) { + ERR("cannot register v4l2 device. skipping.\n"); + return err; + } + + pr_info("%s: registered device %s [v4l2]\n", + dev->name, video_device_node_name(vfd)); + return 0; +} +EXPORT_SYMBOL_GPL(saa7146_register_device); + +int saa7146_unregister_device(struct video_device *vfd, struct saa7146_dev *dev) +{ + DEB_EE("dev:%p\n", dev); + + video_unregister_device(vfd); + return 0; +} +EXPORT_SYMBOL_GPL(saa7146_unregister_device); + +static int __init saa7146_vv_init_module(void) +{ + return 0; +} + + +static void __exit saa7146_vv_cleanup_module(void) +{ +} + +module_init(saa7146_vv_init_module); +module_exit(saa7146_vv_cleanup_module); + +MODULE_AUTHOR("Michael Hunold "); +MODULE_DESCRIPTION("video4linux driver for saa7146-based hardware"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/common/saa7146/saa7146_hlp.c b/drivers/media/common/saa7146/saa7146_hlp.c new file mode 100644 index 0000000000..7569d8cdd4 --- /dev/null +++ b/drivers/media/common/saa7146/saa7146_hlp.c @@ -0,0 +1,771 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include + +static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format) +{ + /* clear out the necessary bits */ + *clip_format &= 0x0000ffff; + /* set these bits new */ + *clip_format |= (( ((palette&0xf00)>>8) << 30) | ((palette&0x00f) << 24) | (((palette&0x0f0)>>4) << 16)); +} + +static void calculate_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync, u32* hps_ctrl) +{ + *hps_ctrl &= ~(MASK_30 | MASK_31 | MASK_28); + *hps_ctrl |= (source << 30) | (sync << 28); +} + +static void calculate_hxo_and_hyo(struct saa7146_vv *vv, u32* hps_h_scale, u32* hps_ctrl) +{ + int hyo = 0, hxo = 0; + + hyo = vv->standard->v_offset; + hxo = vv->standard->h_offset; + + *hps_h_scale &= ~(MASK_B0 | 0xf00); + *hps_h_scale |= (hxo << 0); + + *hps_ctrl &= ~(MASK_W0 | MASK_B2); + *hps_ctrl |= (hyo << 12); +} + +/* helper functions for the calculation of the horizontal- and vertical + scaling registers, clip-format-register etc ... + these functions take pointers to the (most-likely read-out + original-values) and manipulate them according to the requested + changes. +*/ + +/* hps_coeff used for CXY and CXUV; scale 1/1 -> scale 1/64 */ +static struct { + u16 hps_coeff; + u16 weight_sum; +} hps_h_coeff_tab [] = { + {0x00, 2}, {0x02, 4}, {0x00, 4}, {0x06, 8}, {0x02, 8}, + {0x08, 8}, {0x00, 8}, {0x1E, 16}, {0x0E, 8}, {0x26, 8}, + {0x06, 8}, {0x42, 8}, {0x02, 8}, {0x80, 8}, {0x00, 8}, + {0xFE, 16}, {0xFE, 8}, {0x7E, 8}, {0x7E, 8}, {0x3E, 8}, + {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, {0x0E, 8}, {0x0E, 8}, + {0x06, 8}, {0x06, 8}, {0x02, 8}, {0x02, 8}, {0x00, 8}, + {0x00, 8}, {0xFE, 16}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, + {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, + {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, + {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0xFE, 8}, {0x7E, 8}, + {0x7E, 8}, {0x3E, 8}, {0x3E, 8}, {0x1E, 8}, {0x1E, 8}, + {0x0E, 8}, {0x0E, 8}, {0x06, 8}, {0x06, 8}, {0x02, 8}, + {0x02, 8}, {0x00, 8}, {0x00, 8}, {0xFE, 16} +}; + +/* table of attenuation values for horizontal scaling */ +static u8 h_attenuation[] = { 1, 2, 4, 8, 2, 4, 8, 16, 0}; + +/* calculate horizontal scale registers */ +static int calculate_h_scale_registers(struct saa7146_dev *dev, + int in_x, int out_x, int flip_lr, + u32* hps_ctrl, u32* hps_v_gain, u32* hps_h_prescale, u32* hps_h_scale) +{ + /* horizontal prescaler */ + u32 dcgx = 0, xpsc = 0, xacm = 0, cxy = 0, cxuv = 0; + /* horizontal scaler */ + u32 xim = 0, xp = 0, xsci =0; + /* vertical scale & gain */ + u32 pfuv = 0; + + /* helper variables */ + u32 h_atten = 0, i = 0; + + if ( 0 == out_x ) { + return -EINVAL; + } + + /* mask out vanity-bit */ + *hps_ctrl &= ~MASK_29; + + /* calculate prescale-(xspc)-value: [n .. 1/2) : 1 + [1/2 .. 1/3) : 2 + [1/3 .. 1/4) : 3 + ... */ + if (in_x > out_x) { + xpsc = in_x / out_x; + } + else { + /* zooming */ + xpsc = 1; + } + + /* if flip_lr-bit is set, number of pixels after + horizontal prescaling must be < 384 */ + if ( 0 != flip_lr ) { + + /* set vanity bit */ + *hps_ctrl |= MASK_29; + + while (in_x / xpsc >= 384 ) + xpsc++; + } + /* if zooming is wanted, number of pixels after + horizontal prescaling must be < 768 */ + else { + while ( in_x / xpsc >= 768 ) + xpsc++; + } + + /* maximum prescale is 64 (p.69) */ + if ( xpsc > 64 ) + xpsc = 64; + + /* keep xacm clear*/ + xacm = 0; + + /* set horizontal filter parameters (CXY = CXUV) */ + cxy = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].hps_coeff; + cxuv = cxy; + + /* calculate and set horizontal fine scale (xsci) */ + + /* bypass the horizontal scaler ? */ + if ( (in_x == out_x) && ( 1 == xpsc ) ) + xsci = 0x400; + else + xsci = ( (1024 * in_x) / (out_x * xpsc) ) + xpsc; + + /* set start phase for horizontal fine scale (xp) to 0 */ + xp = 0; + + /* set xim, if we bypass the horizontal scaler */ + if ( 0x400 == xsci ) + xim = 1; + else + xim = 0; + + /* if the prescaler is bypassed, enable horizontal + accumulation mode (xacm) and clear dcgx */ + if( 1 == xpsc ) { + xacm = 1; + dcgx = 0; + } else { + xacm = 0; + /* get best match in the table of attenuations + for horizontal scaling */ + h_atten = hps_h_coeff_tab[( (xpsc - 1) < 63 ? (xpsc - 1) : 63 )].weight_sum; + + for (i = 0; h_attenuation[i] != 0; i++) { + if (h_attenuation[i] >= h_atten) + break; + } + + dcgx = i; + } + + /* the horizontal scaling increment controls the UV filter + to reduce the bandwidth to improve the display quality, + so set it ... */ + if ( xsci == 0x400) + pfuv = 0x00; + else if ( xsci < 0x600) + pfuv = 0x01; + else if ( xsci < 0x680) + pfuv = 0x11; + else if ( xsci < 0x700) + pfuv = 0x22; + else + pfuv = 0x33; + + + *hps_v_gain &= MASK_W0|MASK_B2; + *hps_v_gain |= (pfuv << 24); + + *hps_h_scale &= ~(MASK_W1 | 0xf000); + *hps_h_scale |= (xim << 31) | (xp << 24) | (xsci << 12); + + *hps_h_prescale |= (dcgx << 27) | ((xpsc-1) << 18) | (xacm << 17) | (cxy << 8) | (cxuv << 0); + + return 0; +} + +static struct { + u16 hps_coeff; + u16 weight_sum; +} hps_v_coeff_tab [] = { + {0x0100, 2}, {0x0102, 4}, {0x0300, 4}, {0x0106, 8}, {0x0502, 8}, + {0x0708, 8}, {0x0F00, 8}, {0x011E, 16}, {0x110E, 16}, {0x1926, 16}, + {0x3906, 16}, {0x3D42, 16}, {0x7D02, 16}, {0x7F80, 16}, {0xFF00, 16}, + {0x01FE, 32}, {0x01FE, 32}, {0x817E, 32}, {0x817E, 32}, {0xC13E, 32}, + {0xC13E, 32}, {0xE11E, 32}, {0xE11E, 32}, {0xF10E, 32}, {0xF10E, 32}, + {0xF906, 32}, {0xF906, 32}, {0xFD02, 32}, {0xFD02, 32}, {0xFF00, 32}, + {0xFF00, 32}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, + {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, + {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, + {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x01FE, 64}, {0x817E, 64}, + {0x817E, 64}, {0xC13E, 64}, {0xC13E, 64}, {0xE11E, 64}, {0xE11E, 64}, + {0xF10E, 64}, {0xF10E, 64}, {0xF906, 64}, {0xF906, 64}, {0xFD02, 64}, + {0xFD02, 64}, {0xFF00, 64}, {0xFF00, 64}, {0x01FE, 128} +}; + +/* table of attenuation values for vertical scaling */ +static u16 v_attenuation[] = { 2, 4, 8, 16, 32, 64, 128, 256, 0}; + +/* calculate vertical scale registers */ +static int calculate_v_scale_registers(struct saa7146_dev *dev, enum v4l2_field field, + int in_y, int out_y, u32* hps_v_scale, u32* hps_v_gain) +{ + int lpi = 0; + + /* vertical scaling */ + u32 yacm = 0, ysci = 0, yacl = 0, ypo = 0, ype = 0; + /* vertical scale & gain */ + u32 dcgy = 0, cya_cyb = 0; + + /* helper variables */ + u32 v_atten = 0, i = 0; + + /* error, if vertical zooming */ + if ( in_y < out_y ) { + return -EINVAL; + } + + /* linear phase interpolation may be used + if scaling is between 1 and 1/2 (both fields used) + or scaling is between 1/2 and 1/4 (if only one field is used) */ + + if (V4L2_FIELD_HAS_BOTH(field)) { + if( 2*out_y >= in_y) { + lpi = 1; + } + } else if (field == V4L2_FIELD_TOP + || field == V4L2_FIELD_ALTERNATE + || field == V4L2_FIELD_BOTTOM) { + if( 4*out_y >= in_y ) { + lpi = 1; + } + out_y *= 2; + } + if( 0 != lpi ) { + + yacm = 0; + yacl = 0; + cya_cyb = 0x00ff; + + /* calculate scaling increment */ + if ( in_y > out_y ) + ysci = ((1024 * in_y) / (out_y + 1)) - 1024; + else + ysci = 0; + + dcgy = 0; + + /* calculate ype and ypo */ + ype = ysci / 16; + ypo = ype + (ysci / 64); + + } else { + yacm = 1; + + /* calculate scaling increment */ + ysci = (((10 * 1024 * (in_y - out_y - 1)) / in_y) + 9) / 10; + + /* calculate ype and ypo */ + ypo = ype = ((ysci + 15) / 16); + + /* the sequence length interval (yacl) has to be set according + to the prescale value, e.g. [n .. 1/2) : 0 + [1/2 .. 1/3) : 1 + [1/3 .. 1/4) : 2 + ... */ + if ( ysci < 512) { + yacl = 0; + } else { + yacl = ( ysci / (1024 - ysci) ); + } + + /* get filter coefficients for cya, cyb from table hps_v_coeff_tab */ + cya_cyb = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].hps_coeff; + + /* get best match in the table of attenuations for vertical scaling */ + v_atten = hps_v_coeff_tab[ (yacl < 63 ? yacl : 63 ) ].weight_sum; + + for (i = 0; v_attenuation[i] != 0; i++) { + if (v_attenuation[i] >= v_atten) + break; + } + + dcgy = i; + } + + /* ypo and ype swapped in spec ? */ + *hps_v_scale |= (yacm << 31) | (ysci << 21) | (yacl << 15) | (ypo << 8 ) | (ype << 1); + + *hps_v_gain &= ~(MASK_W0|MASK_B2); + *hps_v_gain |= (dcgy << 16) | (cya_cyb << 0); + + return 0; +} + +/* simple bubble-sort algorithm with duplicate elimination */ +static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, enum v4l2_field field) +{ + struct saa7146_vv *vv = dev->vv_data; + + int source = vv->current_hps_source; + int sync = vv->current_hps_sync; + + u32 hps_v_scale = 0, hps_v_gain = 0, hps_ctrl = 0, hps_h_prescale = 0, hps_h_scale = 0; + + /* set vertical scale */ + hps_v_scale = 0; /* all bits get set by the function-call */ + hps_v_gain = 0; /* fixme: saa7146_read(dev, HPS_V_GAIN);*/ + calculate_v_scale_registers(dev, field, vv->standard->v_field*2, height, &hps_v_scale, &hps_v_gain); + + /* set horizontal scale */ + hps_ctrl = 0; + hps_h_prescale = 0; /* all bits get set in the function */ + hps_h_scale = 0; + calculate_h_scale_registers(dev, vv->standard->h_pixels, width, vv->hflip, &hps_ctrl, &hps_v_gain, &hps_h_prescale, &hps_h_scale); + + /* set hyo and hxo */ + calculate_hxo_and_hyo(vv, &hps_h_scale, &hps_ctrl); + calculate_hps_source_and_sync(dev, source, sync, &hps_ctrl); + + /* write out new register contents */ + saa7146_write(dev, HPS_V_SCALE, hps_v_scale); + saa7146_write(dev, HPS_V_GAIN, hps_v_gain); + saa7146_write(dev, HPS_CTRL, hps_ctrl); + saa7146_write(dev, HPS_H_PRESCALE,hps_h_prescale); + saa7146_write(dev, HPS_H_SCALE, hps_h_scale); + + /* upload shadow-ram registers */ + saa7146_write(dev, MC2, (MASK_05 | MASK_06 | MASK_21 | MASK_22) ); +} + +static void saa7146_set_output_format(struct saa7146_dev *dev, unsigned long palette) +{ + u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL); + + /* call helper function */ + calculate_output_format_register(dev,palette,&clip_format); + + /* update the hps registers */ + saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format); + saa7146_write(dev, MC2, (MASK_05 | MASK_21)); +} + +/* select input-source */ +void saa7146_set_hps_source_and_sync(struct saa7146_dev *dev, int source, int sync) +{ + struct saa7146_vv *vv = dev->vv_data; + u32 hps_ctrl = 0; + + /* read old state */ + hps_ctrl = saa7146_read(dev, HPS_CTRL); + + hps_ctrl &= ~( MASK_31 | MASK_30 | MASK_28 ); + hps_ctrl |= (source << 30) | (sync << 28); + + /* write back & upload register */ + saa7146_write(dev, HPS_CTRL, hps_ctrl); + saa7146_write(dev, MC2, (MASK_05 | MASK_21)); + + vv->current_hps_source = source; + vv->current_hps_sync = sync; +} +EXPORT_SYMBOL_GPL(saa7146_set_hps_source_and_sync); + +void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma) +{ + int where = 0; + + if( which < 1 || which > 3) { + return; + } + + /* calculate starting address */ + where = (which-1)*0x18; + + saa7146_write(dev, where, vdma->base_odd); + saa7146_write(dev, where+0x04, vdma->base_even); + saa7146_write(dev, where+0x08, vdma->prot_addr); + saa7146_write(dev, where+0x0c, vdma->pitch); + saa7146_write(dev, where+0x10, vdma->base_page); + saa7146_write(dev, where+0x14, vdma->num_line_byte); + + /* upload */ + saa7146_write(dev, MC2, (MASK_02<<(which-1))|(MASK_18<<(which-1))); +/* + printk("vdma%d.base_even: 0x%08x\n", which,vdma->base_even); + printk("vdma%d.base_odd: 0x%08x\n", which,vdma->base_odd); + printk("vdma%d.prot_addr: 0x%08x\n", which,vdma->prot_addr); + printk("vdma%d.base_page: 0x%08x\n", which,vdma->base_page); + printk("vdma%d.pitch: 0x%08x\n", which,vdma->pitch); + printk("vdma%d.num_line_byte: 0x%08x\n", which,vdma->num_line_byte); +*/ +} + +static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa7146_buf *buf) +{ + struct saa7146_vv *vv = dev->vv_data; + struct v4l2_pix_format *pix = &vv->video_fmt; + struct saa7146_video_dma vdma1; + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pix->pixelformat); + + int width = pix->width; + int height = pix->height; + int bytesperline = pix->bytesperline; + enum v4l2_field field = pix->field; + + int depth = sfmt->depth; + + DEB_CAP("[size=%dx%d,fields=%s]\n", + width, height, v4l2_field_names[field]); + + if( bytesperline != 0) { + vdma1.pitch = bytesperline*2; + } else { + vdma1.pitch = (width*depth*2)/8; + } + vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels); + vdma1.base_page = buf->pt[0].dma | ME1 | sfmt->swap; + + if( 0 != vv->vflip ) { + vdma1.prot_addr = buf->pt[0].offset; + vdma1.base_even = buf->pt[0].offset+(vdma1.pitch/2)*height; + vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2); + } else { + vdma1.base_even = buf->pt[0].offset; + vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2); + vdma1.prot_addr = buf->pt[0].offset+(vdma1.pitch/2)*height; + } + + if (V4L2_FIELD_HAS_BOTH(field)) { + } else if (field == V4L2_FIELD_ALTERNATE) { + /* fixme */ + if ( vv->last_field == V4L2_FIELD_TOP ) { + vdma1.base_odd = vdma1.prot_addr; + vdma1.pitch /= 2; + } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) { + vdma1.base_odd = vdma1.base_even; + vdma1.base_even = vdma1.prot_addr; + vdma1.pitch /= 2; + } + } else if (field == V4L2_FIELD_TOP) { + vdma1.base_odd = vdma1.prot_addr; + vdma1.pitch /= 2; + } else if (field == V4L2_FIELD_BOTTOM) { + vdma1.base_odd = vdma1.base_even; + vdma1.base_even = vdma1.prot_addr; + vdma1.pitch /= 2; + } + + if( 0 != vv->vflip ) { + vdma1.pitch *= -1; + } + + saa7146_write_out_dma(dev, 1, &vdma1); + return 0; +} + +static int calc_planar_422(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3) +{ + struct v4l2_pix_format *pix = &vv->video_fmt; + int height = pix->height; + int width = pix->width; + + vdma2->pitch = width; + vdma3->pitch = width; + + /* fixme: look at bytesperline! */ + + if( 0 != vv->vflip ) { + vdma2->prot_addr = buf->pt[1].offset; + vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[1].offset; + vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2); + + vdma3->prot_addr = buf->pt[2].offset; + vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[2].offset; + vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2); + } else { + vdma3->base_even = buf->pt[2].offset; + vdma3->base_odd = vdma3->base_even + (vdma3->pitch/2); + vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset; + + vdma2->base_even = buf->pt[1].offset; + vdma2->base_odd = vdma2->base_even + (vdma2->pitch/2); + vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset; + } + + return 0; +} + +static int calc_planar_420(struct saa7146_vv *vv, struct saa7146_buf *buf, struct saa7146_video_dma *vdma2, struct saa7146_video_dma *vdma3) +{ + struct v4l2_pix_format *pix = &vv->video_fmt; + int height = pix->height; + int width = pix->width; + + vdma2->pitch = width/2; + vdma3->pitch = width/2; + + if( 0 != vv->vflip ) { + vdma2->prot_addr = buf->pt[2].offset; + vdma2->base_even = ((vdma2->pitch/2)*height)+buf->pt[2].offset; + vdma2->base_odd = vdma2->base_even - (vdma2->pitch/2); + + vdma3->prot_addr = buf->pt[1].offset; + vdma3->base_even = ((vdma3->pitch/2)*height)+buf->pt[1].offset; + vdma3->base_odd = vdma3->base_even - (vdma3->pitch/2); + + } else { + vdma3->base_even = buf->pt[2].offset; + vdma3->base_odd = vdma3->base_even + (vdma3->pitch); + vdma3->prot_addr = (vdma3->pitch/2)*height+buf->pt[2].offset; + + vdma2->base_even = buf->pt[1].offset; + vdma2->base_odd = vdma2->base_even + (vdma2->pitch); + vdma2->prot_addr = (vdma2->pitch/2)*height+buf->pt[1].offset; + } + return 0; +} + +static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa7146_buf *buf) +{ + struct saa7146_vv *vv = dev->vv_data; + struct v4l2_pix_format *pix = &vv->video_fmt; + struct saa7146_video_dma vdma1; + struct saa7146_video_dma vdma2; + struct saa7146_video_dma vdma3; + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pix->pixelformat); + + int width = pix->width; + int height = pix->height; + enum v4l2_field field = pix->field; + + if (WARN_ON(!buf->pt[0].dma) || + WARN_ON(!buf->pt[1].dma) || + WARN_ON(!buf->pt[2].dma)) + return -1; + + DEB_CAP("[size=%dx%d,fields=%s]\n", + width, height, v4l2_field_names[field]); + + /* fixme: look at bytesperline! */ + + /* fixme: what happens for user space buffers here?. The offsets are + most likely wrong, this version here only works for page-aligned + buffers, modifications to the pagetable-functions are necessary...*/ + + vdma1.pitch = width*2; + vdma1.num_line_byte = ((vv->standard->v_field<<16) + vv->standard->h_pixels); + vdma1.base_page = buf->pt[0].dma | ME1; + + if( 0 != vv->vflip ) { + vdma1.prot_addr = buf->pt[0].offset; + vdma1.base_even = ((vdma1.pitch/2)*height)+buf->pt[0].offset; + vdma1.base_odd = vdma1.base_even - (vdma1.pitch/2); + } else { + vdma1.base_even = buf->pt[0].offset; + vdma1.base_odd = vdma1.base_even + (vdma1.pitch/2); + vdma1.prot_addr = (vdma1.pitch/2)*height+buf->pt[0].offset; + } + + vdma2.num_line_byte = 0; /* unused */ + vdma2.base_page = buf->pt[1].dma | ME1; + + vdma3.num_line_byte = 0; /* unused */ + vdma3.base_page = buf->pt[2].dma | ME1; + + switch( sfmt->depth ) { + case 12: { + calc_planar_420(vv,buf,&vdma2,&vdma3); + break; + } + case 16: { + calc_planar_422(vv,buf,&vdma2,&vdma3); + break; + } + default: { + return -1; + } + } + + if (V4L2_FIELD_HAS_BOTH(field)) { + } else if (field == V4L2_FIELD_ALTERNATE) { + /* fixme */ + vdma1.base_odd = vdma1.prot_addr; + vdma1.pitch /= 2; + vdma2.base_odd = vdma2.prot_addr; + vdma2.pitch /= 2; + vdma3.base_odd = vdma3.prot_addr; + vdma3.pitch /= 2; + } else if (field == V4L2_FIELD_TOP) { + vdma1.base_odd = vdma1.prot_addr; + vdma1.pitch /= 2; + vdma2.base_odd = vdma2.prot_addr; + vdma2.pitch /= 2; + vdma3.base_odd = vdma3.prot_addr; + vdma3.pitch /= 2; + } else if (field == V4L2_FIELD_BOTTOM) { + vdma1.base_odd = vdma1.base_even; + vdma1.base_even = vdma1.prot_addr; + vdma1.pitch /= 2; + vdma2.base_odd = vdma2.base_even; + vdma2.base_even = vdma2.prot_addr; + vdma2.pitch /= 2; + vdma3.base_odd = vdma3.base_even; + vdma3.base_even = vdma3.prot_addr; + vdma3.pitch /= 2; + } + + if( 0 != vv->vflip ) { + vdma1.pitch *= -1; + vdma2.pitch *= -1; + vdma3.pitch *= -1; + } + + saa7146_write_out_dma(dev, 1, &vdma1); + if( (sfmt->flags & FORMAT_BYTE_SWAP) != 0 ) { + saa7146_write_out_dma(dev, 3, &vdma2); + saa7146_write_out_dma(dev, 2, &vdma3); + } else { + saa7146_write_out_dma(dev, 2, &vdma2); + saa7146_write_out_dma(dev, 3, &vdma3); + } + return 0; +} + +static void program_capture_engine(struct saa7146_dev *dev, int planar) +{ + struct saa7146_vv *vv = dev->vv_data; + int count = 0; + + unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B; + unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B; + + /* wait for o_fid_a/b / e_fid_a/b toggle only if rps register 0 is not set*/ + WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | o_wait); + WRITE_RPS0(CMD_PAUSE | CMD_OAN | CMD_SIG0 | e_wait); + + /* set rps register 0 */ + WRITE_RPS0(CMD_WR_REG | (1 << 8) | (MC2/4)); + WRITE_RPS0(MASK_27 | MASK_11); + + /* turn on video-dma1 */ + WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); + WRITE_RPS0(MASK_06 | MASK_22); /* => mask */ + WRITE_RPS0(MASK_06 | MASK_22); /* => values */ + if( 0 != planar ) { + /* turn on video-dma2 */ + WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); + WRITE_RPS0(MASK_05 | MASK_21); /* => mask */ + WRITE_RPS0(MASK_05 | MASK_21); /* => values */ + + /* turn on video-dma3 */ + WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); + WRITE_RPS0(MASK_04 | MASK_20); /* => mask */ + WRITE_RPS0(MASK_04 | MASK_20); /* => values */ + } + + /* wait for o_fid_a/b / e_fid_a/b toggle */ + if ( vv->last_field == V4L2_FIELD_INTERLACED ) { + WRITE_RPS0(CMD_PAUSE | o_wait); + WRITE_RPS0(CMD_PAUSE | e_wait); + } else if ( vv->last_field == V4L2_FIELD_TOP ) { + WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09)); + WRITE_RPS0(CMD_PAUSE | o_wait); + } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) { + WRITE_RPS0(CMD_PAUSE | (vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? MASK_10 : MASK_09)); + WRITE_RPS0(CMD_PAUSE | e_wait); + } + + /* turn off video-dma1 */ + WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); + WRITE_RPS0(MASK_22 | MASK_06); /* => mask */ + WRITE_RPS0(MASK_22); /* => values */ + if( 0 != planar ) { + /* turn off video-dma2 */ + WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); + WRITE_RPS0(MASK_05 | MASK_21); /* => mask */ + WRITE_RPS0(MASK_21); /* => values */ + + /* turn off video-dma3 */ + WRITE_RPS0(CMD_WR_REG_MASK | (MC1/4)); + WRITE_RPS0(MASK_04 | MASK_20); /* => mask */ + WRITE_RPS0(MASK_20); /* => values */ + } + + /* generate interrupt */ + WRITE_RPS0(CMD_INTERRUPT); + + /* stop */ + WRITE_RPS0(CMD_STOP); +} + +/* disable clipping */ +static void saa7146_disable_clipping(struct saa7146_dev *dev) +{ + u32 clip_format = saa7146_read(dev, CLIP_FORMAT_CTRL); + + /* mask out relevant bits (=lower word)*/ + clip_format &= MASK_W1; + + /* upload clipping-registers*/ + saa7146_write(dev, CLIP_FORMAT_CTRL, clip_format); + saa7146_write(dev, MC2, (MASK_05 | MASK_21)); + + /* disable video dma2 */ + saa7146_write(dev, MC1, MASK_21); +} + +void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next) +{ + struct saa7146_vv *vv = dev->vv_data; + struct v4l2_pix_format *pix = &vv->video_fmt; + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pix->pixelformat); + u32 vdma1_prot_addr; + + DEB_CAP("buf:%p, next:%p\n", buf, next); + + vdma1_prot_addr = saa7146_read(dev, PROT_ADDR1); + if( 0 == vdma1_prot_addr ) { + /* clear out beginning of streaming bit (rps register 0)*/ + DEB_CAP("forcing sync to new frame\n"); + saa7146_write(dev, MC2, MASK_27 ); + } + + saa7146_set_window(dev, pix->width, pix->height, pix->field); + saa7146_set_output_format(dev, sfmt->trans); + saa7146_disable_clipping(dev); + + if ( vv->last_field == V4L2_FIELD_INTERLACED ) { + } else if ( vv->last_field == V4L2_FIELD_TOP ) { + vv->last_field = V4L2_FIELD_BOTTOM; + } else if ( vv->last_field == V4L2_FIELD_BOTTOM ) { + vv->last_field = V4L2_FIELD_TOP; + } + + if( 0 != IS_PLANAR(sfmt->trans)) { + calculate_video_dma_grab_planar(dev, buf); + program_capture_engine(dev,1); + } else { + calculate_video_dma_grab_packed(dev, buf); + program_capture_engine(dev,0); + } + +/* + printk("vdma%d.base_even: 0x%08x\n", 1,saa7146_read(dev,BASE_EVEN1)); + printk("vdma%d.base_odd: 0x%08x\n", 1,saa7146_read(dev,BASE_ODD1)); + printk("vdma%d.prot_addr: 0x%08x\n", 1,saa7146_read(dev,PROT_ADDR1)); + printk("vdma%d.base_page: 0x%08x\n", 1,saa7146_read(dev,BASE_PAGE1)); + printk("vdma%d.pitch: 0x%08x\n", 1,saa7146_read(dev,PITCH1)); + printk("vdma%d.num_line_byte: 0x%08x\n", 1,saa7146_read(dev,NUM_LINE_BYTE1)); + printk("vdma%d => vptr : 0x%08x\n", 1,saa7146_read(dev,PCI_VDP1)); +*/ + + /* write the address of the rps-program */ + saa7146_write(dev, RPS_ADDR0, dev->d_rps0.dma_handle); + + /* turn on rps */ + saa7146_write(dev, MC1, (MASK_12 | MASK_28)); +} diff --git a/drivers/media/common/saa7146/saa7146_i2c.c b/drivers/media/common/saa7146/saa7146_i2c.c new file mode 100644 index 0000000000..df9ebe2a16 --- /dev/null +++ b/drivers/media/common/saa7146/saa7146_i2c.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include + +static u32 saa7146_i2c_func(struct i2c_adapter *adapter) +{ + /* DEB_I2C("'%s'\n", adapter->name); */ + + return I2C_FUNC_I2C + | I2C_FUNC_SMBUS_QUICK + | I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE + | I2C_FUNC_SMBUS_READ_BYTE_DATA | I2C_FUNC_SMBUS_WRITE_BYTE_DATA; +} + +/* this function returns the status-register of our i2c-device */ +static inline u32 saa7146_i2c_status(struct saa7146_dev *dev) +{ + u32 iicsta = saa7146_read(dev, I2C_STATUS); + /* DEB_I2C("status: 0x%08x\n", iicsta); */ + return iicsta; +} + +/* this function runs through the i2c-messages and prepares the data to be + sent through the saa7146. have a look at the specifications p. 122 ff + to understand this. it returns the number of u32s to send, or -1 + in case of an error. */ +static int saa7146_i2c_msg_prepare(const struct i2c_msg *m, int num, __le32 *op) +{ + int h1, h2; + int i, j, addr; + int mem = 0, op_count = 0; + + /* first determine size of needed memory */ + for(i = 0; i < num; i++) { + mem += m[i].len + 1; + } + + /* worst case: we need one u32 for three bytes to be send + plus one extra byte to address the device */ + mem = 1 + ((mem-1) / 3); + + /* we assume that op points to a memory of at least + * SAA7146_I2C_MEM bytes size. if we exceed this limit... + */ + if ((4 * mem) > SAA7146_I2C_MEM) { + /* DEB_I2C("cannot prepare i2c-message\n"); */ + return -ENOMEM; + } + + /* be careful: clear out the i2c-mem first */ + memset(op,0,sizeof(__le32)*mem); + + /* loop through all messages */ + for(i = 0; i < num; i++) { + + addr = i2c_8bit_addr_from_msg(&m[i]); + h1 = op_count/3; h2 = op_count%3; + op[h1] |= cpu_to_le32( (u8)addr << ((3-h2)*8)); + op[h1] |= cpu_to_le32(SAA7146_I2C_START << ((3-h2)*2)); + op_count++; + + /* loop through all bytes of message i */ + for(j = 0; j < m[i].len; j++) { + /* insert the data bytes */ + h1 = op_count/3; h2 = op_count%3; + op[h1] |= cpu_to_le32( (u32)((u8)m[i].buf[j]) << ((3-h2)*8)); + op[h1] |= cpu_to_le32( SAA7146_I2C_CONT << ((3-h2)*2)); + op_count++; + } + + } + + /* have a look at the last byte inserted: + if it was: ...CONT change it to ...STOP */ + h1 = (op_count-1)/3; h2 = (op_count-1)%3; + if ( SAA7146_I2C_CONT == (0x3 & (le32_to_cpu(op[h1]) >> ((3-h2)*2))) ) { + op[h1] &= ~cpu_to_le32(0x2 << ((3-h2)*2)); + op[h1] |= cpu_to_le32(SAA7146_I2C_STOP << ((3-h2)*2)); + } + + /* return the number of u32s to send */ + return mem; +} + +/* this functions loops through all i2c-messages. normally, it should determine + which bytes were read through the adapter and write them back to the corresponding + i2c-message. but instead, we simply write back all bytes. + fixme: this could be improved. */ +static int saa7146_i2c_msg_cleanup(const struct i2c_msg *m, int num, __le32 *op) +{ + int i, j; + int op_count = 0; + + /* loop through all messages */ + for(i = 0; i < num; i++) { + + op_count++; + + /* loop through all bytes of message i */ + for(j = 0; j < m[i].len; j++) { + /* write back all bytes that could have been read */ + m[i].buf[j] = (le32_to_cpu(op[op_count/3]) >> ((3-(op_count%3))*8)); + op_count++; + } + } + + return 0; +} + +/* this functions resets the i2c-device and returns 0 if everything was fine, otherwise -1 */ +static int saa7146_i2c_reset(struct saa7146_dev *dev) +{ + /* get current status */ + u32 status = saa7146_i2c_status(dev); + + /* clear registers for sure */ + saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); + saa7146_write(dev, I2C_TRANSFER, 0); + + /* check if any operation is still in progress */ + if ( 0 != ( status & SAA7146_I2C_BUSY) ) { + + /* yes, kill ongoing operation */ + DEB_I2C("busy_state detected\n"); + + /* set "ABORT-OPERATION"-bit (bit 7)*/ + saa7146_write(dev, I2C_STATUS, (dev->i2c_bitrate | MASK_07)); + saa7146_write(dev, MC2, (MASK_00 | MASK_16)); + msleep(SAA7146_I2C_DELAY); + + /* clear all error-bits pending; this is needed because p.123, note 1 */ + saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); + saa7146_write(dev, MC2, (MASK_00 | MASK_16)); + msleep(SAA7146_I2C_DELAY); + } + + /* check if any error is (still) present. (this can be necessary because p.123, note 1) */ + status = saa7146_i2c_status(dev); + + if ( dev->i2c_bitrate != status ) { + + DEB_I2C("error_state detected. status:0x%08x\n", status); + + /* Repeat the abort operation. This seems to be necessary + after serious protocol errors caused by e.g. the SAA7740 */ + saa7146_write(dev, I2C_STATUS, (dev->i2c_bitrate | MASK_07)); + saa7146_write(dev, MC2, (MASK_00 | MASK_16)); + msleep(SAA7146_I2C_DELAY); + + /* clear all error-bits pending */ + saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); + saa7146_write(dev, MC2, (MASK_00 | MASK_16)); + msleep(SAA7146_I2C_DELAY); + + /* the data sheet says it might be necessary to clear the status + twice after an abort */ + saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); + saa7146_write(dev, MC2, (MASK_00 | MASK_16)); + msleep(SAA7146_I2C_DELAY); + } + + /* if any error is still present, a fatal error has occurred ... */ + status = saa7146_i2c_status(dev); + if ( dev->i2c_bitrate != status ) { + DEB_I2C("fatal error. status:0x%08x\n", status); + return -1; + } + + return 0; +} + +/* this functions writes out the data-byte 'dword' to the i2c-device. + it returns 0 if ok, -1 if the transfer failed, -2 if the transfer + failed badly (e.g. address error) */ +static int saa7146_i2c_writeout(struct saa7146_dev *dev, __le32 *dword, int short_delay) +{ + u32 status = 0, mc2 = 0; + int trial = 0; + unsigned long timeout; + + /* write out i2c-command */ + DEB_I2C("before: 0x%08x (status: 0x%08x), %d\n", + *dword, saa7146_read(dev, I2C_STATUS), dev->i2c_op); + + if( 0 != (SAA7146_USE_I2C_IRQ & dev->ext->flags)) { + + saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); + saa7146_write(dev, I2C_TRANSFER, le32_to_cpu(*dword)); + + dev->i2c_op = 1; + SAA7146_ISR_CLEAR(dev, MASK_16|MASK_17); + SAA7146_IER_ENABLE(dev, MASK_16|MASK_17); + saa7146_write(dev, MC2, (MASK_00 | MASK_16)); + + timeout = HZ/100 + 1; /* 10ms */ + timeout = wait_event_interruptible_timeout(dev->i2c_wq, dev->i2c_op == 0, timeout); + if (timeout == -ERESTARTSYS || dev->i2c_op) { + SAA7146_IER_DISABLE(dev, MASK_16|MASK_17); + SAA7146_ISR_CLEAR(dev, MASK_16|MASK_17); + if (timeout == -ERESTARTSYS) + /* a signal arrived */ + return -ERESTARTSYS; + + pr_warn("%s %s [irq]: timed out waiting for end of xfer\n", + dev->name, __func__); + return -EIO; + } + status = saa7146_read(dev, I2C_STATUS); + } else { + saa7146_write(dev, I2C_STATUS, dev->i2c_bitrate); + saa7146_write(dev, I2C_TRANSFER, le32_to_cpu(*dword)); + saa7146_write(dev, MC2, (MASK_00 | MASK_16)); + + /* do not poll for i2c-status before upload is complete */ + timeout = jiffies + HZ/100 + 1; /* 10ms */ + while(1) { + mc2 = (saa7146_read(dev, MC2) & 0x1); + if( 0 != mc2 ) { + break; + } + if (time_after(jiffies,timeout)) { + pr_warn("%s %s: timed out waiting for MC2\n", + dev->name, __func__); + return -EIO; + } + } + /* wait until we get a transfer done or error */ + timeout = jiffies + HZ/100 + 1; /* 10ms */ + /* first read usually delivers bogus results... */ + saa7146_i2c_status(dev); + while(1) { + status = saa7146_i2c_status(dev); + if ((status & 0x3) != 1) + break; + if (time_after(jiffies,timeout)) { + /* this is normal when probing the bus + * (no answer from nonexisistant device...) + */ + pr_warn("%s %s [poll]: timed out waiting for end of xfer\n", + dev->name, __func__); + return -EIO; + } + if (++trial < 50 && short_delay) + udelay(10); + else + msleep(1); + } + } + + /* give a detailed status report */ + if ( 0 != (status & (SAA7146_I2C_SPERR | SAA7146_I2C_APERR | + SAA7146_I2C_DTERR | SAA7146_I2C_DRERR | + SAA7146_I2C_AL | SAA7146_I2C_ERR | + SAA7146_I2C_BUSY)) ) { + + if ( 0 == (status & SAA7146_I2C_ERR) || + 0 == (status & SAA7146_I2C_BUSY) ) { + /* it may take some time until ERR goes high - ignore */ + DEB_I2C("unexpected i2c status %04x\n", status); + } + if( 0 != (status & SAA7146_I2C_SPERR) ) { + DEB_I2C("error due to invalid start/stop condition\n"); + } + if( 0 != (status & SAA7146_I2C_DTERR) ) { + DEB_I2C("error in data transmission\n"); + } + if( 0 != (status & SAA7146_I2C_DRERR) ) { + DEB_I2C("error when receiving data\n"); + } + if( 0 != (status & SAA7146_I2C_AL) ) { + DEB_I2C("error because arbitration lost\n"); + } + + /* we handle address-errors here */ + if( 0 != (status & SAA7146_I2C_APERR) ) { + DEB_I2C("error in address phase\n"); + return -EREMOTEIO; + } + + return -EIO; + } + + /* read back data, just in case we were reading ... */ + *dword = cpu_to_le32(saa7146_read(dev, I2C_TRANSFER)); + + DEB_I2C("after: 0x%08x\n", *dword); + return 0; +} + +static int saa7146_i2c_transfer(struct saa7146_dev *dev, const struct i2c_msg *msgs, int num, int retries) +{ + int i = 0, count = 0; + __le32 *buffer = dev->d_i2c.cpu_addr; + int err = 0; + int short_delay = 0; + + if (mutex_lock_interruptible(&dev->i2c_lock)) + return -ERESTARTSYS; + + for(i=0;i count ) { + err = -EIO; + goto out; + } + + if ( count > 3 || 0 != (SAA7146_I2C_SHORT_DELAY & dev->ext->flags) ) + short_delay = 1; + + do { + /* reset the i2c-device if necessary */ + err = saa7146_i2c_reset(dev); + if ( 0 > err ) { + DEB_I2C("could not reset i2c-device\n"); + goto out; + } + + /* write out the u32s one after another */ + for(i = 0; i < count; i++) { + err = saa7146_i2c_writeout(dev, &buffer[i], short_delay); + if ( 0 != err) { + /* this one is unsatisfying: some i2c slaves on some + dvb cards don't acknowledge correctly, so the saa7146 + thinks that an address error occurred. in that case, the + transaction should be retrying, even if an address error + occurred. analog saa7146 based cards extensively rely on + i2c address probing, however, and address errors indicate that a + device is really *not* there. retrying in that case + increases the time the device needs to probe greatly, so + it should be avoided. So we bail out in irq mode after an + address error and trust the saa7146 address error detection. */ + if (-EREMOTEIO == err && 0 != (SAA7146_USE_I2C_IRQ & dev->ext->flags)) + goto out; + DEB_I2C("error while sending message(s). starting again\n"); + break; + } + } + if( 0 == err ) { + err = num; + break; + } + + /* delay a bit before retrying */ + msleep(10); + + } while (err != num && retries--); + + /* quit if any error occurred */ + if (err != num) + goto out; + + /* if any things had to be read, get the results */ + if ( 0 != saa7146_i2c_msg_cleanup(msgs, num, buffer)) { + DEB_I2C("could not cleanup i2c-message\n"); + err = -EIO; + goto out; + } + + /* return the number of delivered messages */ + DEB_I2C("transmission successful. (msg:%d)\n", err); +out: + /* another bug in revision 0: the i2c-registers get uploaded randomly by other + uploads, so we better clear them out before continuing */ + if( 0 == dev->revision ) { + __le32 zero = 0; + saa7146_i2c_reset(dev); + if( 0 != saa7146_i2c_writeout(dev, &zero, short_delay)) { + pr_info("revision 0 error. this should never happen\n"); + } + } + + mutex_unlock(&dev->i2c_lock); + return err; +} + +/* utility functions */ +static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, int num) +{ + struct v4l2_device *v4l2_dev = i2c_get_adapdata(adapter); + struct saa7146_dev *dev = to_saa7146_dev(v4l2_dev); + + /* use helper function to transfer data */ + return saa7146_i2c_transfer(dev, msg, num, adapter->retries); +} + + +/*****************************************************************************/ +/* i2c-adapter helper functions */ + +/* exported algorithm data */ +static const struct i2c_algorithm saa7146_algo = { + .master_xfer = saa7146_i2c_xfer, + .functionality = saa7146_i2c_func, +}; + +int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate) +{ + DEB_EE("bitrate: 0x%08x\n", bitrate); + + /* enable i2c-port pins */ + saa7146_write(dev, MC1, (MASK_08 | MASK_24)); + + dev->i2c_bitrate = bitrate; + saa7146_i2c_reset(dev); + + if (i2c_adapter) { + i2c_set_adapdata(i2c_adapter, &dev->v4l2_dev); + i2c_adapter->dev.parent = &dev->pci->dev; + i2c_adapter->algo = &saa7146_algo; + i2c_adapter->algo_data = NULL; + i2c_adapter->timeout = SAA7146_I2C_TIMEOUT; + i2c_adapter->retries = SAA7146_I2C_RETRIES; + } + + return 0; +} diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c new file mode 100644 index 0000000000..bb7d81f7eb --- /dev/null +++ b/drivers/media/common/saa7146/saa7146_vbi.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +static int vbi_pixel_to_capture = 720 * 2; + +static int vbi_workaround(struct saa7146_dev *dev) +{ + struct saa7146_vv *vv = dev->vv_data; + + u32 *cpu; + dma_addr_t dma_addr; + + int count = 0; + int i; + + DECLARE_WAITQUEUE(wait, current); + + DEB_VBI("dev:%p\n", dev); + + /* once again, a bug in the saa7146: the brs acquisition + is buggy and especially the BXO-counter does not work + as specified. there is this workaround, but please + don't let me explain it. ;-) */ + + cpu = dma_alloc_coherent(&dev->pci->dev, 4096, &dma_addr, GFP_KERNEL); + if (NULL == cpu) + return -ENOMEM; + + /* setup some basic programming, just for the workaround */ + saa7146_write(dev, BASE_EVEN3, dma_addr); + saa7146_write(dev, BASE_ODD3, dma_addr+vbi_pixel_to_capture); + saa7146_write(dev, PROT_ADDR3, dma_addr+4096); + saa7146_write(dev, PITCH3, vbi_pixel_to_capture); + saa7146_write(dev, BASE_PAGE3, 0x0); + saa7146_write(dev, NUM_LINE_BYTE3, (2<<16)|((vbi_pixel_to_capture)<<0)); + saa7146_write(dev, MC2, MASK_04|MASK_20); + + /* load brs-control register */ + WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4)); + /* BXO = 1h, BRS to outbound */ + WRITE_RPS1(0xc000008c); + /* wait for vbi_a or vbi_b*/ + if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) { + DEB_D("...using port b\n"); + WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_E_FID_B); + WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_O_FID_B); +/* + WRITE_RPS1(CMD_PAUSE | MASK_09); +*/ + } else { + DEB_D("...using port a\n"); + WRITE_RPS1(CMD_PAUSE | MASK_10); + } + /* upload brs */ + WRITE_RPS1(CMD_UPLOAD | MASK_08); + /* load brs-control register */ + WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4)); + /* BYO = 1, BXO = NQBIL (=1728 for PAL, for NTSC this is 858*2) - NumByte3 (=1440) = 288 */ + WRITE_RPS1(((1728-(vbi_pixel_to_capture)) << 7) | MASK_19); + /* wait for brs_done */ + WRITE_RPS1(CMD_PAUSE | MASK_08); + /* upload brs */ + WRITE_RPS1(CMD_UPLOAD | MASK_08); + /* load video-dma3 NumLines3 and NumBytes3 */ + WRITE_RPS1(CMD_WR_REG | (1 << 8) | (NUM_LINE_BYTE3/4)); + /* dev->vbi_count*2 lines, 720 pixel (= 1440 Bytes) */ + WRITE_RPS1((2 << 16) | (vbi_pixel_to_capture)); + /* load brs-control register */ + WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4)); + /* Set BRS right: note: this is an experimental value for BXO (=> PAL!) */ + WRITE_RPS1((540 << 7) | (5 << 19)); // 5 == vbi_start + /* wait for brs_done */ + WRITE_RPS1(CMD_PAUSE | MASK_08); + /* upload brs and video-dma3*/ + WRITE_RPS1(CMD_UPLOAD | MASK_08 | MASK_04); + /* load mc2 register: enable dma3 */ + WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC1/4)); + WRITE_RPS1(MASK_20 | MASK_04); + /* generate interrupt */ + WRITE_RPS1(CMD_INTERRUPT); + /* stop rps1 */ + WRITE_RPS1(CMD_STOP); + + /* we have to do the workaround twice to be sure that + everything is ok */ + for(i = 0; i < 2; i++) { + + /* indicate to the irq handler that we do the workaround */ + saa7146_write(dev, MC2, MASK_31|MASK_15); + + saa7146_write(dev, NUM_LINE_BYTE3, (1<<16)|(2<<0)); + saa7146_write(dev, MC2, MASK_04|MASK_20); + + /* enable rps1 irqs */ + SAA7146_IER_ENABLE(dev,MASK_28); + + /* prepare to wait to be woken up by the irq-handler */ + add_wait_queue(&vv->vbi_wq, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + /* start rps1 to enable workaround */ + saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle); + saa7146_write(dev, MC1, (MASK_13 | MASK_29)); + + schedule(); + + DEB_VBI("brs bug workaround %d/1\n", i); + + remove_wait_queue(&vv->vbi_wq, &wait); + __set_current_state(TASK_RUNNING); + + /* disable rps1 irqs */ + SAA7146_IER_DISABLE(dev,MASK_28); + + /* stop video-dma3 */ + saa7146_write(dev, MC1, MASK_20); + + if(signal_pending(current)) { + + DEB_VBI("aborted (rps:0x%08x)\n", + saa7146_read(dev, RPS_ADDR1)); + + /* stop rps1 for sure */ + saa7146_write(dev, MC1, MASK_29); + + dma_free_coherent(&dev->pci->dev, 4096, cpu, dma_addr); + return -EINTR; + } + } + + dma_free_coherent(&dev->pci->dev, 4096, cpu, dma_addr); + return 0; +} + +static void saa7146_set_vbi_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next) +{ + struct saa7146_vv *vv = dev->vv_data; + + struct saa7146_video_dma vdma3; + + int count = 0; + unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B; + unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B; + +/* + vdma3.base_even = 0xc8000000+2560*70; + vdma3.base_odd = 0xc8000000; + vdma3.prot_addr = 0xc8000000+2560*164; + vdma3.pitch = 2560; + vdma3.base_page = 0; + vdma3.num_line_byte = (64<<16)|((vbi_pixel_to_capture)<<0); // set above! +*/ + vdma3.base_even = buf->pt[2].offset; + vdma3.base_odd = buf->pt[2].offset + 16 * vbi_pixel_to_capture; + vdma3.prot_addr = buf->pt[2].offset + 16 * 2 * vbi_pixel_to_capture; + vdma3.pitch = vbi_pixel_to_capture; + vdma3.base_page = buf->pt[2].dma | ME1; + vdma3.num_line_byte = (16 << 16) | vbi_pixel_to_capture; + + saa7146_write_out_dma(dev, 3, &vdma3); + + /* write beginning of rps-program */ + count = 0; + + /* wait for o_fid_a/b / e_fid_a/b toggle only if bit 1 is not set */ + + /* we don't wait here for the first field anymore. this is different from the video + capture and might cause that the first buffer is only half filled (with only + one field). but since this is some sort of streaming data, this is not that negative. + but by doing this, we can use the whole engine from videobuf-dma-sg.c... */ + +/* + WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | e_wait); + WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | o_wait); +*/ + /* set bit 1 */ + WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC2/4)); + WRITE_RPS1(MASK_28 | MASK_12); + + /* turn on video-dma3 */ + WRITE_RPS1(CMD_WR_REG_MASK | (MC1/4)); + WRITE_RPS1(MASK_04 | MASK_20); /* => mask */ + WRITE_RPS1(MASK_04 | MASK_20); /* => values */ + + /* wait for o_fid_a/b / e_fid_a/b toggle */ + WRITE_RPS1(CMD_PAUSE | o_wait); + WRITE_RPS1(CMD_PAUSE | e_wait); + + /* generate interrupt */ + WRITE_RPS1(CMD_INTERRUPT); + + /* stop */ + WRITE_RPS1(CMD_STOP); + + /* enable rps1 irqs */ + SAA7146_IER_ENABLE(dev, MASK_28); + + /* write the address of the rps-program */ + saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle); + + /* turn on rps */ + saa7146_write(dev, MC1, (MASK_13 | MASK_29)); +} + +static int buffer_activate(struct saa7146_dev *dev, + struct saa7146_buf *buf, + struct saa7146_buf *next) +{ + struct saa7146_vv *vv = dev->vv_data; + + DEB_VBI("dev:%p, buf:%p, next:%p\n", dev, buf, next); + saa7146_set_vbi_capture(dev,buf,next); + + mod_timer(&vv->vbi_dmaq.timeout, jiffies+BUFFER_TIMEOUT); + return 0; +} + +/* ------------------------------------------------------------------ */ + +static int queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + unsigned int size = 16 * 2 * vbi_pixel_to_capture; + + if (*num_planes) + return sizes[0] < size ? -EINVAL : 0; + *num_planes = 1; + sizes[0] = size; + + return 0; +} + +static void buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vb2_queue *vq = vb->vb2_queue; + struct saa7146_dev *dev = vb2_get_drv_priv(vq); + struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb); + unsigned long flags; + + spin_lock_irqsave(&dev->slock, flags); + + saa7146_buffer_queue(dev, &dev->vv_data->vbi_dmaq, buf); + spin_unlock_irqrestore(&dev->slock, flags); +} + +static int buf_init(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb); + struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0); + struct scatterlist *list = sgt->sgl; + int length = sgt->nents; + struct vb2_queue *vq = vb->vb2_queue; + struct saa7146_dev *dev = vb2_get_drv_priv(vq); + int ret; + + buf->activate = buffer_activate; + + saa7146_pgtable_alloc(dev->pci, &buf->pt[2]); + + ret = saa7146_pgtable_build_single(dev->pci, &buf->pt[2], + list, length); + if (ret) + saa7146_pgtable_free(dev->pci, &buf->pt[2]); + return ret; +} + +static int buf_prepare(struct vb2_buffer *vb) +{ + unsigned int size = 16 * 2 * vbi_pixel_to_capture; + + if (vb2_plane_size(vb, 0) < size) + return -EINVAL; + vb2_set_plane_payload(vb, 0, size); + return 0; +} + +static void buf_cleanup(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb); + struct vb2_queue *vq = vb->vb2_queue; + struct saa7146_dev *dev = vb2_get_drv_priv(vq); + + saa7146_pgtable_free(dev->pci, &buf->pt[2]); +} + +static void return_buffers(struct vb2_queue *q, int state) +{ + struct saa7146_dev *dev = vb2_get_drv_priv(q); + struct saa7146_dmaqueue *dq = &dev->vv_data->vbi_dmaq; + struct saa7146_buf *buf; + + if (dq->curr) { + buf = dq->curr; + dq->curr = NULL; + vb2_buffer_done(&buf->vb.vb2_buf, state); + } + while (!list_empty(&dq->queue)) { + buf = list_entry(dq->queue.next, struct saa7146_buf, list); + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, state); + } +} + +static void vbi_stop(struct saa7146_dev *dev) +{ + struct saa7146_vv *vv = dev->vv_data; + unsigned long flags; + DEB_VBI("dev:%p\n", dev); + + spin_lock_irqsave(&dev->slock,flags); + + /* disable rps1 */ + saa7146_write(dev, MC1, MASK_29); + + /* disable rps1 irqs */ + SAA7146_IER_DISABLE(dev, MASK_28); + + /* shut down dma 3 transfers */ + saa7146_write(dev, MC1, MASK_20); + + del_timer(&vv->vbi_dmaq.timeout); + del_timer(&vv->vbi_read_timeout); + + spin_unlock_irqrestore(&dev->slock, flags); +} + +static void vbi_read_timeout(struct timer_list *t) +{ + struct saa7146_vv *vv = from_timer(vv, t, vbi_read_timeout); + struct saa7146_dev *dev = vv->vbi_dmaq.dev; + + DEB_VBI("dev:%p\n", dev); + + vbi_stop(dev); +} + +static int vbi_begin(struct saa7146_dev *dev) +{ + struct saa7146_vv *vv = dev->vv_data; + u32 arbtr_ctrl = saa7146_read(dev, PCI_BT_V1); + int ret = 0; + + DEB_VBI("dev:%p\n", dev); + + ret = saa7146_res_get(dev, RESOURCE_DMA3_BRS); + if (0 == ret) { + DEB_S("cannot get vbi RESOURCE_DMA3_BRS resource\n"); + return -EBUSY; + } + + /* adjust arbitrition control for video dma 3 */ + arbtr_ctrl &= ~0x1f0000; + arbtr_ctrl |= 0x1d0000; + saa7146_write(dev, PCI_BT_V1, arbtr_ctrl); + saa7146_write(dev, MC2, (MASK_04|MASK_20)); + + vv->vbi_read_timeout.function = vbi_read_timeout; + + /* initialize the brs */ + if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) { + saa7146_write(dev, BRS_CTRL, MASK_30|MASK_29 | (7 << 19)); + } else { + saa7146_write(dev, BRS_CTRL, 0x00000001); + + if (0 != (ret = vbi_workaround(dev))) { + DEB_VBI("vbi workaround failed!\n"); + /* return ret;*/ + } + } + + /* upload brs register */ + saa7146_write(dev, MC2, (MASK_08|MASK_24)); + return 0; +} + +static int start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct saa7146_dev *dev = vb2_get_drv_priv(q); + int ret; + + if (!vb2_is_streaming(&dev->vv_data->vbi_dmaq.q)) + dev->vv_data->seqnr = 0; + ret = vbi_begin(dev); + if (ret) + return_buffers(q, VB2_BUF_STATE_QUEUED); + return ret; +} + +static void stop_streaming(struct vb2_queue *q) +{ + struct saa7146_dev *dev = vb2_get_drv_priv(q); + + vbi_stop(dev); + return_buffers(q, VB2_BUF_STATE_ERROR); + saa7146_res_free(dev, RESOURCE_DMA3_BRS); +} + +const struct vb2_ops vbi_qops = { + .queue_setup = queue_setup, + .buf_queue = buf_queue, + .buf_init = buf_init, + .buf_prepare = buf_prepare, + .buf_cleanup = buf_cleanup, + .start_streaming = start_streaming, + .stop_streaming = stop_streaming, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +/* ------------------------------------------------------------------ */ + +static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv) +{ + DEB_VBI("dev:%p\n", dev); + + INIT_LIST_HEAD(&vv->vbi_dmaq.queue); + + timer_setup(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout, 0); + vv->vbi_dmaq.dev = dev; + + init_waitqueue_head(&vv->vbi_wq); +} + +static void vbi_irq_done(struct saa7146_dev *dev, unsigned long status) +{ + struct saa7146_vv *vv = dev->vv_data; + spin_lock(&dev->slock); + + if (vv->vbi_dmaq.curr) { + DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_dmaq.curr); + saa7146_buffer_finish(dev, &vv->vbi_dmaq, VB2_BUF_STATE_DONE); + } else { + DEB_VBI("dev:%p\n", dev); + } + saa7146_buffer_next(dev, &vv->vbi_dmaq, 1); + + spin_unlock(&dev->slock); +} + +const struct saa7146_use_ops saa7146_vbi_uops = { + .init = vbi_init, + .irq_done = vbi_irq_done, +}; diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c new file mode 100644 index 0000000000..040489e15e --- /dev/null +++ b/drivers/media/common/saa7146/saa7146_video.c @@ -0,0 +1,725 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +/* format descriptions for capture and preview */ +static struct saa7146_format formats[] = { + { + .pixelformat = V4L2_PIX_FMT_RGB332, + .trans = RGB08_COMPOSED, + .depth = 8, + .flags = 0, + }, { + .pixelformat = V4L2_PIX_FMT_RGB565, + .trans = RGB16_COMPOSED, + .depth = 16, + .flags = 0, + }, { + .pixelformat = V4L2_PIX_FMT_BGR24, + .trans = RGB24_COMPOSED, + .depth = 24, + .flags = 0, + }, { + .pixelformat = V4L2_PIX_FMT_BGR32, + .trans = RGB32_COMPOSED, + .depth = 32, + .flags = 0, + }, { + .pixelformat = V4L2_PIX_FMT_RGB32, + .trans = RGB32_COMPOSED, + .depth = 32, + .flags = 0, + .swap = 0x2, + }, { + .pixelformat = V4L2_PIX_FMT_GREY, + .trans = Y8, + .depth = 8, + .flags = 0, + }, { + .pixelformat = V4L2_PIX_FMT_YUV422P, + .trans = YUV422_DECOMPOSED, + .depth = 16, + .flags = FORMAT_IS_PLANAR, + }, { + .pixelformat = V4L2_PIX_FMT_YVU420, + .trans = YUV420_DECOMPOSED, + .depth = 12, + .flags = FORMAT_BYTE_SWAP|FORMAT_IS_PLANAR, + }, { + .pixelformat = V4L2_PIX_FMT_YUV420, + .trans = YUV420_DECOMPOSED, + .depth = 12, + .flags = FORMAT_IS_PLANAR, + }, { + .pixelformat = V4L2_PIX_FMT_UYVY, + .trans = YUV422_COMPOSED, + .depth = 16, + .flags = 0, + } +}; + +/* unfortunately, the saa7146 contains a bug which prevents it from doing on-the-fly byte swaps. + due to this, it's impossible to provide additional *packed* formats, which are simply byte swapped + (like V4L2_PIX_FMT_YUYV) ... 8-( */ + +struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(formats); i++) { + if (formats[i].pixelformat == fourcc) { + return formats+i; + } + } + + DEB_D("unknown pixelformat:'%4.4s'\n", (char *)&fourcc); + return NULL; +} + +/********************************************************************************/ +/* common pagetable functions */ + +static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *buf) +{ + struct saa7146_vv *vv = dev->vv_data; + struct pci_dev *pci = dev->pci; + struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0); + struct scatterlist *list = sgt->sgl; + int length = sgt->nents; + struct v4l2_pix_format *pix = &vv->video_fmt; + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pix->pixelformat); + + DEB_EE("dev:%p, buf:%p, sg_len:%d\n", dev, buf, length); + + if( 0 != IS_PLANAR(sfmt->trans)) { + struct saa7146_pgtable *pt1 = &buf->pt[0]; + struct saa7146_pgtable *pt2 = &buf->pt[1]; + struct saa7146_pgtable *pt3 = &buf->pt[2]; + struct sg_dma_page_iter dma_iter; + __le32 *ptr1, *ptr2, *ptr3; + __le32 fill; + + int size = pix->width * pix->height; + int i, m1, m2, m3, o1, o2; + + switch( sfmt->depth ) { + case 12: { + /* create some offsets inside the page table */ + m1 = ((size + PAGE_SIZE) / PAGE_SIZE) - 1; + m2 = ((size + (size / 4) + PAGE_SIZE) / PAGE_SIZE) - 1; + m3 = ((size + (size / 2) + PAGE_SIZE) / PAGE_SIZE) - 1; + o1 = size % PAGE_SIZE; + o2 = (size + (size / 4)) % PAGE_SIZE; + DEB_CAP("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n", + size, m1, m2, m3, o1, o2); + break; + } + case 16: { + /* create some offsets inside the page table */ + m1 = ((size + PAGE_SIZE) / PAGE_SIZE) - 1; + m2 = ((size + (size / 2) + PAGE_SIZE) / PAGE_SIZE) - 1; + m3 = ((2 * size + PAGE_SIZE) / PAGE_SIZE) - 1; + o1 = size % PAGE_SIZE; + o2 = (size + (size / 2)) % PAGE_SIZE; + DEB_CAP("size:%d, m1:%d, m2:%d, m3:%d, o1:%d, o2:%d\n", + size, m1, m2, m3, o1, o2); + break; + } + default: { + return -1; + } + } + + ptr1 = pt1->cpu; + ptr2 = pt2->cpu; + ptr3 = pt3->cpu; + + for_each_sg_dma_page(list, &dma_iter, length, 0) + *ptr1++ = cpu_to_le32(sg_page_iter_dma_address(&dma_iter) - list->offset); + + /* if we have a user buffer, the first page may not be + aligned to a page boundary. */ + pt1->offset = sgt->sgl->offset; + pt2->offset = pt1->offset + o1; + pt3->offset = pt1->offset + o2; + + /* create video-dma2 page table */ + ptr1 = pt1->cpu; + for (i = m1; i <= m2; i++, ptr2++) + *ptr2 = ptr1[i]; + fill = *(ptr2 - 1); + for (; i < 1024; i++, ptr2++) + *ptr2 = fill; + /* create video-dma3 page table */ + ptr1 = pt1->cpu; + for (i = m2; i <= m3; i++, ptr3++) + *ptr3 = ptr1[i]; + fill = *(ptr3 - 1); + for (; i < 1024; i++, ptr3++) + *ptr3 = fill; + /* finally: finish up video-dma1 page table */ + ptr1 = pt1->cpu + m1; + fill = pt1->cpu[m1]; + for (i = m1; i < 1024; i++, ptr1++) + *ptr1 = fill; + } else { + struct saa7146_pgtable *pt = &buf->pt[0]; + + return saa7146_pgtable_build_single(pci, pt, list, length); + } + + return 0; +} + + +/********************************************************************************/ +/* file operations */ + +static int video_begin(struct saa7146_dev *dev) +{ + struct saa7146_vv *vv = dev->vv_data; + struct saa7146_format *fmt = NULL; + unsigned int resource; + int ret = 0; + + DEB_EE("dev:%p\n", dev); + + fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat); + /* we need to have a valid format set here */ + if (!fmt) + return -EINVAL; + + if (0 != (fmt->flags & FORMAT_IS_PLANAR)) { + resource = RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP|RESOURCE_DMA3_BRS; + } else { + resource = RESOURCE_DMA1_HPS; + } + + ret = saa7146_res_get(dev, resource); + if (0 == ret) { + DEB_S("cannot get capture resource %d\n", resource); + return -EBUSY; + } + + /* clear out beginning of streaming bit (rps register 0)*/ + saa7146_write(dev, MC2, MASK_27 ); + + /* enable rps0 irqs */ + SAA7146_IER_ENABLE(dev, MASK_27); + + return 0; +} + +static void video_end(struct saa7146_dev *dev) +{ + struct saa7146_vv *vv = dev->vv_data; + struct saa7146_format *fmt = NULL; + unsigned long flags; + unsigned int resource; + u32 dmas = 0; + DEB_EE("dev:%p\n", dev); + + fmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat); + /* we need to have a valid format set here */ + if (!fmt) + return; + + if (0 != (fmt->flags & FORMAT_IS_PLANAR)) { + resource = RESOURCE_DMA1_HPS|RESOURCE_DMA2_CLP|RESOURCE_DMA3_BRS; + dmas = MASK_22 | MASK_21 | MASK_20; + } else { + resource = RESOURCE_DMA1_HPS; + dmas = MASK_22; + } + spin_lock_irqsave(&dev->slock,flags); + + /* disable rps0 */ + saa7146_write(dev, MC1, MASK_28); + + /* disable rps0 irqs */ + SAA7146_IER_DISABLE(dev, MASK_27); + + /* shut down all used video dma transfers */ + saa7146_write(dev, MC1, dmas); + + spin_unlock_irqrestore(&dev->slock, flags); + + saa7146_res_free(dev, resource); +} + +static int vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) +{ + struct saa7146_dev *dev = video_drvdata(file); + + strscpy((char *)cap->driver, "saa7146 v4l2", sizeof(cap->driver)); + strscpy((char *)cap->card, dev->ext->name, sizeof(cap->card)); + cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | + V4L2_CAP_DEVICE_CAPS; + cap->capabilities |= dev->ext_vv_data->capabilities; + return 0; +} + +static int vidioc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + if (f->index >= ARRAY_SIZE(formats)) + return -EINVAL; + f->pixelformat = formats[f->index].pixelformat; + return 0; +} + +int saa7146_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct saa7146_dev *dev = container_of(ctrl->handler, + struct saa7146_dev, ctrl_handler); + struct saa7146_vv *vv = dev->vv_data; + u32 val; + + switch (ctrl->id) { + case V4L2_CID_BRIGHTNESS: + val = saa7146_read(dev, BCS_CTRL); + val &= 0x00ffffff; + val |= (ctrl->val << 24); + saa7146_write(dev, BCS_CTRL, val); + saa7146_write(dev, MC2, MASK_22 | MASK_06); + break; + + case V4L2_CID_CONTRAST: + val = saa7146_read(dev, BCS_CTRL); + val &= 0xff00ffff; + val |= (ctrl->val << 16); + saa7146_write(dev, BCS_CTRL, val); + saa7146_write(dev, MC2, MASK_22 | MASK_06); + break; + + case V4L2_CID_SATURATION: + val = saa7146_read(dev, BCS_CTRL); + val &= 0xffffff00; + val |= (ctrl->val << 0); + saa7146_write(dev, BCS_CTRL, val); + saa7146_write(dev, MC2, MASK_22 | MASK_06); + break; + + case V4L2_CID_HFLIP: + /* fixme: we can support changing VFLIP and HFLIP here... */ + if (vb2_is_busy(&vv->video_dmaq.q)) + return -EBUSY; + vv->hflip = ctrl->val; + break; + + case V4L2_CID_VFLIP: + if (vb2_is_busy(&vv->video_dmaq.q)) + return -EBUSY; + vv->vflip = ctrl->val; + break; + + default: + return -EINVAL; + } + return 0; +} + +static int vidioc_g_parm(struct file *file, void *fh, + struct v4l2_streamparm *parm) +{ + struct saa7146_dev *dev = video_drvdata(file); + struct saa7146_vv *vv = dev->vv_data; + + if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + parm->parm.capture.readbuffers = 1; + v4l2_video_std_frame_period(vv->standard->id, + &parm->parm.capture.timeperframe); + return 0; +} + +static int vidioc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) +{ + struct saa7146_dev *dev = video_drvdata(file); + struct saa7146_vv *vv = dev->vv_data; + + f->fmt.pix = vv->video_fmt; + return 0; +} + +static int vidioc_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f) +{ + struct saa7146_dev *dev = video_drvdata(file); + struct saa7146_vv *vv = dev->vv_data; + + f->fmt.vbi = vv->vbi_fmt; + return 0; +} + +static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) +{ + struct saa7146_dev *dev = video_drvdata(file); + struct saa7146_vv *vv = dev->vv_data; + struct saa7146_format *fmt; + enum v4l2_field field; + int maxw, maxh; + int calc_bpl; + + DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh); + + fmt = saa7146_format_by_fourcc(dev, f->fmt.pix.pixelformat); + if (NULL == fmt) + return -EINVAL; + + field = f->fmt.pix.field; + maxw = vv->standard->h_max_out; + maxh = vv->standard->v_max_out; + + if (V4L2_FIELD_ANY == field) { + field = (f->fmt.pix.height > maxh / 2) + ? V4L2_FIELD_INTERLACED + : V4L2_FIELD_BOTTOM; + } + switch (field) { + case V4L2_FIELD_ALTERNATE: + case V4L2_FIELD_TOP: + case V4L2_FIELD_BOTTOM: + maxh = maxh / 2; + break; + default: + field = V4L2_FIELD_INTERLACED; + break; + } + + f->fmt.pix.field = field; + f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; + if (f->fmt.pix.width < 48) + f->fmt.pix.width = 48; + if (f->fmt.pix.height < 32) + f->fmt.pix.height = 32; + if (f->fmt.pix.width > maxw) + f->fmt.pix.width = maxw; + if (f->fmt.pix.height > maxh) + f->fmt.pix.height = maxh; + + calc_bpl = (f->fmt.pix.width * fmt->depth) / 8; + + if (f->fmt.pix.bytesperline < calc_bpl) + f->fmt.pix.bytesperline = calc_bpl; + + if (f->fmt.pix.bytesperline > (2 * PAGE_SIZE * fmt->depth) / 8) /* arbitrary constraint */ + f->fmt.pix.bytesperline = calc_bpl; + + f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height; + DEB_D("w:%d, h:%d, bytesperline:%d, sizeimage:%d\n", + f->fmt.pix.width, f->fmt.pix.height, + f->fmt.pix.bytesperline, f->fmt.pix.sizeimage); + + return 0; +} + +static int vidioc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) +{ + struct saa7146_dev *dev = video_drvdata(file); + struct saa7146_vv *vv = dev->vv_data; + int err; + + DEB_EE("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p\n", dev); + if (vb2_is_busy(&vv->video_dmaq.q)) { + DEB_EE("streaming capture is active\n"); + return -EBUSY; + } + err = vidioc_try_fmt_vid_cap(file, fh, f); + if (0 != err) + return err; + switch (f->fmt.pix.field) { + case V4L2_FIELD_ALTERNATE: + vv->last_field = V4L2_FIELD_TOP; + break; + default: + vv->last_field = V4L2_FIELD_INTERLACED; + break; + } + vv->video_fmt = f->fmt.pix; + DEB_EE("set to pixelformat '%4.4s'\n", + (char *)&vv->video_fmt.pixelformat); + return 0; +} + +static int vidioc_g_std(struct file *file, void *fh, v4l2_std_id *norm) +{ + struct saa7146_dev *dev = video_drvdata(file); + struct saa7146_vv *vv = dev->vv_data; + + *norm = vv->standard->id; + return 0; +} + +static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id id) +{ + struct saa7146_dev *dev = video_drvdata(file); + struct saa7146_vv *vv = dev->vv_data; + int found = 0; + int i; + + DEB_EE("VIDIOC_S_STD\n"); + + for (i = 0; i < dev->ext_vv_data->num_stds; i++) + if (id & dev->ext_vv_data->stds[i].id) + break; + + if (i != dev->ext_vv_data->num_stds && + vv->standard == &dev->ext_vv_data->stds[i]) + return 0; + + if (vb2_is_busy(&vv->video_dmaq.q) || vb2_is_busy(&vv->vbi_dmaq.q)) { + DEB_D("cannot change video standard while streaming capture is active\n"); + return -EBUSY; + } + + if (i != dev->ext_vv_data->num_stds) { + vv->standard = &dev->ext_vv_data->stds[i]; + if (NULL != dev->ext_vv_data->std_callback) + dev->ext_vv_data->std_callback(dev, vv->standard); + found = 1; + } + + if (!found) { + DEB_EE("VIDIOC_S_STD: standard not found\n"); + return -EINVAL; + } + + DEB_EE("VIDIOC_S_STD: set to standard to '%s'\n", vv->standard->name); + return 0; +} + +const struct v4l2_ioctl_ops saa7146_video_ioctl_ops = { + .vidioc_querycap = vidioc_querycap, + .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, + .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, + .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, + .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, + .vidioc_g_std = vidioc_g_std, + .vidioc_s_std = vidioc_s_std, + .vidioc_g_parm = vidioc_g_parm, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops = { + .vidioc_querycap = vidioc_querycap, + .vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, + .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, + .vidioc_s_fmt_vbi_cap = vidioc_g_fmt_vbi_cap, + .vidioc_g_std = vidioc_g_std, + .vidioc_s_std = vidioc_s_std, + .vidioc_g_parm = vidioc_g_parm, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +/*********************************************************************************/ +/* buffer handling functions */ + +static int buffer_activate (struct saa7146_dev *dev, + struct saa7146_buf *buf, + struct saa7146_buf *next) +{ + struct saa7146_vv *vv = dev->vv_data; + + saa7146_set_capture(dev,buf,next); + + mod_timer(&vv->video_dmaq.timeout, jiffies+BUFFER_TIMEOUT); + return 0; +} + +static void release_all_pagetables(struct saa7146_dev *dev, struct saa7146_buf *buf) +{ + saa7146_pgtable_free(dev->pci, &buf->pt[0]); + saa7146_pgtable_free(dev->pci, &buf->pt[1]); + saa7146_pgtable_free(dev->pci, &buf->pt[2]); +} + +static int queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct saa7146_dev *dev = vb2_get_drv_priv(q); + unsigned int size = dev->vv_data->video_fmt.sizeimage; + + if (*num_planes) + return sizes[0] < size ? -EINVAL : 0; + *num_planes = 1; + sizes[0] = size; + + return 0; +} + +static void buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vb2_queue *vq = vb->vb2_queue; + struct saa7146_dev *dev = vb2_get_drv_priv(vq); + struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb); + unsigned long flags; + + spin_lock_irqsave(&dev->slock, flags); + + saa7146_buffer_queue(dev, &dev->vv_data->video_dmaq, buf); + spin_unlock_irqrestore(&dev->slock, flags); +} + +static int buf_init(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb); + struct vb2_queue *vq = vb->vb2_queue; + struct saa7146_dev *dev = vb2_get_drv_priv(vq); + struct saa7146_vv *vv = dev->vv_data; + struct saa7146_format *sfmt; + int ret; + + buf->activate = buffer_activate; + sfmt = saa7146_format_by_fourcc(dev, vv->video_fmt.pixelformat); + + if (IS_PLANAR(sfmt->trans)) { + saa7146_pgtable_alloc(dev->pci, &buf->pt[0]); + saa7146_pgtable_alloc(dev->pci, &buf->pt[1]); + saa7146_pgtable_alloc(dev->pci, &buf->pt[2]); + } else { + saa7146_pgtable_alloc(dev->pci, &buf->pt[0]); + } + + ret = saa7146_pgtable_build(dev, buf); + if (ret) + release_all_pagetables(dev, buf); + return ret; +} + +static int buf_prepare(struct vb2_buffer *vb) +{ + struct vb2_queue *vq = vb->vb2_queue; + struct saa7146_dev *dev = vb2_get_drv_priv(vq); + struct saa7146_vv *vv = dev->vv_data; + unsigned int size = vv->video_fmt.sizeimage; + + if (vb2_plane_size(vb, 0) < size) + return -EINVAL; + vb2_set_plane_payload(vb, 0, size); + return 0; +} + +static void buf_cleanup(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct saa7146_buf *buf = container_of(vbuf, struct saa7146_buf, vb); + struct vb2_queue *vq = vb->vb2_queue; + struct saa7146_dev *dev = vb2_get_drv_priv(vq); + + release_all_pagetables(dev, buf); +} + +static void return_buffers(struct vb2_queue *q, int state) +{ + struct saa7146_dev *dev = vb2_get_drv_priv(q); + struct saa7146_dmaqueue *dq = &dev->vv_data->video_dmaq; + struct saa7146_buf *buf; + + if (dq->curr) { + buf = dq->curr; + dq->curr = NULL; + vb2_buffer_done(&buf->vb.vb2_buf, state); + } + while (!list_empty(&dq->queue)) { + buf = list_entry(dq->queue.next, struct saa7146_buf, list); + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, state); + } +} + +static int start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct saa7146_dev *dev = vb2_get_drv_priv(q); + int ret; + + if (!vb2_is_streaming(&dev->vv_data->video_dmaq.q)) + dev->vv_data->seqnr = 0; + ret = video_begin(dev); + if (ret) + return_buffers(q, VB2_BUF_STATE_QUEUED); + return ret; +} + +static void stop_streaming(struct vb2_queue *q) +{ + struct saa7146_dev *dev = vb2_get_drv_priv(q); + struct saa7146_dmaqueue *dq = &dev->vv_data->video_dmaq; + + del_timer(&dq->timeout); + video_end(dev); + return_buffers(q, VB2_BUF_STATE_ERROR); +} + +const struct vb2_ops video_qops = { + .queue_setup = queue_setup, + .buf_queue = buf_queue, + .buf_init = buf_init, + .buf_prepare = buf_prepare, + .buf_cleanup = buf_cleanup, + .start_streaming = start_streaming, + .stop_streaming = stop_streaming, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +/********************************************************************************/ +/* file operations */ + +static void video_init(struct saa7146_dev *dev, struct saa7146_vv *vv) +{ + INIT_LIST_HEAD(&vv->video_dmaq.queue); + + timer_setup(&vv->video_dmaq.timeout, saa7146_buffer_timeout, 0); + vv->video_dmaq.dev = dev; + + /* set some default values */ + vv->standard = &dev->ext_vv_data->stds[0]; + + /* FIXME: what's this? */ + vv->current_hps_source = SAA7146_HPS_SOURCE_PORT_A; + vv->current_hps_sync = SAA7146_HPS_SYNC_PORT_A; +} + +static void video_irq_done(struct saa7146_dev *dev, unsigned long st) +{ + struct saa7146_vv *vv = dev->vv_data; + struct saa7146_dmaqueue *q = &vv->video_dmaq; + + spin_lock(&dev->slock); + DEB_CAP("called\n"); + + /* only finish the buffer if we have one... */ + if (q->curr) + saa7146_buffer_finish(dev, q, VB2_BUF_STATE_DONE); + saa7146_buffer_next(dev,q,0); + + spin_unlock(&dev->slock); +} + +const struct saa7146_use_ops saa7146_video_uops = { + .init = video_init, + .irq_done = video_irq_done, +}; diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig new file mode 100644 index 0000000000..37fa6597b4 --- /dev/null +++ b/drivers/media/common/siano/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Siano Mobile Silicon Digital TV device configuration +# + +config SMS_SIANO_MDTV + tristate + depends on DVB_CORE && HAS_DMA + depends on !RC_CORE || RC_CORE + depends on SMS_USB_DRV || SMS_SDIO_DRV + default y + +config SMS_SIANO_RC + bool "Enable Remote Controller support for Siano devices" + depends on SMS_SIANO_MDTV && RC_CORE + depends on SMS_USB_DRV || SMS_SDIO_DRV + depends on MEDIA_COMMON_OPTIONS + default y + help + Choose Y to select Remote Controller support for Siano driver. + +config SMS_SIANO_DEBUGFS + bool "Enable debugfs for smsdvb" + depends on SMS_SIANO_MDTV + depends on DEBUG_FS + depends on SMS_USB_DRV = SMS_SDIO_DRV + + help + Choose Y to enable visualizing a dump of the frontend + statistics response packets via debugfs. Currently, works + only with Siano USB devices. + + Useful only for developers. In doubt, say N. diff --git a/drivers/media/common/siano/Makefile b/drivers/media/common/siano/Makefile new file mode 100644 index 0000000000..b33022e0be --- /dev/null +++ b/drivers/media/common/siano/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o +smsdvb-objs := smsdvb-main.o + +obj-$(CONFIG_SMS_SIANO_MDTV) += smsmdtv.o smsdvb.o + +ifeq ($(CONFIG_SMS_SIANO_RC),y) + smsmdtv-objs += smsir.o +endif + +ifeq ($(CONFIG_SMS_SIANO_DEBUGFS),y) + smsdvb-objs += smsdvb-debugfs.o +endif diff --git a/drivers/media/common/siano/sms-cards.c b/drivers/media/common/siano/sms-cards.c new file mode 100644 index 0000000000..d4a116ab6c --- /dev/null +++ b/drivers/media/common/siano/sms-cards.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Card-specific functions for the Siano SMS1xxx USB dongle + * + * Copyright (c) 2008 Michael Krufky + */ + +#include "sms-cards.h" +#include "smsir.h" +#include + +static struct sms_board sms_boards[] = { + [SMS_BOARD_UNKNOWN] = { + .name = "Unknown board", + .type = SMS_UNKNOWN_TYPE, + .default_mode = DEVICE_MODE_NONE, + }, + [SMS1XXX_BOARD_SIANO_STELLAR] = { + .name = "Siano Stellar Digital Receiver", + .type = SMS_STELLAR, + .default_mode = DEVICE_MODE_DVBT_BDA, + }, + [SMS1XXX_BOARD_SIANO_NOVA_A] = { + .name = "Siano Nova A Digital Receiver", + .type = SMS_NOVA_A0, + .default_mode = DEVICE_MODE_DVBT_BDA, + }, + [SMS1XXX_BOARD_SIANO_NOVA_B] = { + .name = "Siano Nova B Digital Receiver", + .type = SMS_NOVA_B0, + .default_mode = DEVICE_MODE_DVBT_BDA, + }, + [SMS1XXX_BOARD_SIANO_VEGA] = { + .name = "Siano Vega Digital Receiver", + .type = SMS_VEGA, + .default_mode = DEVICE_MODE_CMMB, + }, + [SMS1XXX_BOARD_HAUPPAUGE_CATAMOUNT] = { + .name = "Hauppauge Catamount", + .type = SMS_STELLAR, + .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVBT_STELLAR, + .default_mode = DEVICE_MODE_DVBT_BDA, + }, + [SMS1XXX_BOARD_HAUPPAUGE_OKEMO_A] = { + .name = "Hauppauge Okemo-A", + .type = SMS_NOVA_A0, + .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVBT_NOVA_A, + .default_mode = DEVICE_MODE_DVBT_BDA, + }, + [SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B] = { + .name = "Hauppauge Okemo-B", + .type = SMS_NOVA_B0, + .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVBT_NOVA_B, + .default_mode = DEVICE_MODE_DVBT_BDA, + }, + [SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = { + .name = "Hauppauge WinTV MiniStick", + .type = SMS_NOVA_B0, + .fw[DEVICE_MODE_ISDBT_BDA] = SMS_FW_ISDBT_HCW_55XXX, + .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVBT_HCW_55XXX, + .default_mode = DEVICE_MODE_DVBT_BDA, + .rc_codes = RC_MAP_HAUPPAUGE, + .board_cfg.leds_power = 26, + .board_cfg.led0 = 27, + .board_cfg.led1 = 28, + .board_cfg.ir = 9, + .led_power = 26, + .led_lo = 27, + .led_hi = 28, + }, + [SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD] = { + .name = "Hauppauge WinTV MiniCard", + .type = SMS_NOVA_B0, + .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVBT_HCW_55XXX, + .default_mode = DEVICE_MODE_DVBT_BDA, + .lna_ctrl = 29, + .board_cfg.foreign_lna0_ctrl = 29, + .rf_switch = 17, + .board_cfg.rf_switch_uhf = 17, + }, + [SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2] = { + .name = "Hauppauge WinTV MiniCard Rev 2", + .type = SMS_NOVA_B0, + .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVBT_HCW_55XXX, + .default_mode = DEVICE_MODE_DVBT_BDA, + .lna_ctrl = -1, + }, + [SMS1XXX_BOARD_SIANO_NICE] = { + .name = "Siano Nice Digital Receiver", + .type = SMS_NOVA_B0, + .default_mode = DEVICE_MODE_DVBT_BDA, + }, + [SMS1XXX_BOARD_SIANO_VENICE] = { + .name = "Siano Venice Digital Receiver", + .type = SMS_VEGA, + .default_mode = DEVICE_MODE_CMMB, + }, + [SMS1XXX_BOARD_SIANO_STELLAR_ROM] = { + .name = "Siano Stellar Digital Receiver ROM", + .type = SMS_STELLAR, + .default_mode = DEVICE_MODE_DVBT_BDA, + .intf_num = 1, + }, + [SMS1XXX_BOARD_ZTE_DVB_DATA_CARD] = { + .name = "ZTE Data Card Digital Receiver", + .type = SMS_NOVA_B0, + .default_mode = DEVICE_MODE_DVBT_BDA, + .intf_num = 5, + .mtu = 15792, + }, + [SMS1XXX_BOARD_ONDA_MDTV_DATA_CARD] = { + .name = "ONDA Data Card Digital Receiver", + .type = SMS_NOVA_B0, + .default_mode = DEVICE_MODE_DVBT_BDA, + .intf_num = 6, + .mtu = 15792, + }, + [SMS1XXX_BOARD_SIANO_MING] = { + .name = "Siano Ming Digital Receiver", + .type = SMS_MING, + .default_mode = DEVICE_MODE_CMMB, + }, + [SMS1XXX_BOARD_SIANO_PELE] = { + .name = "Siano Pele Digital Receiver", + .type = SMS_PELE, + .default_mode = DEVICE_MODE_ISDBT_BDA, + }, + [SMS1XXX_BOARD_SIANO_RIO] = { + .name = "Siano Rio Digital Receiver", + .type = SMS_RIO, + .default_mode = DEVICE_MODE_ISDBT_BDA, + }, + [SMS1XXX_BOARD_SIANO_DENVER_1530] = { + .name = "Siano Denver (ATSC-M/H) Digital Receiver", + .type = SMS_DENVER_1530, + .default_mode = DEVICE_MODE_ATSC, + .crystal = 2400, + }, + [SMS1XXX_BOARD_SIANO_DENVER_2160] = { + .name = "Siano Denver (TDMB) Digital Receiver", + .type = SMS_DENVER_2160, + .default_mode = DEVICE_MODE_DAB_TDMB, + }, + [SMS1XXX_BOARD_PCTV_77E] = { + .name = "Hauppauge microStick 77e", + .type = SMS_NOVA_B0, + .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_NOVA_12MHZ_B0, + .default_mode = DEVICE_MODE_DVBT_BDA, + }, +}; + +struct sms_board *sms_get_board(unsigned id) +{ + BUG_ON(id >= ARRAY_SIZE(sms_boards)); + + return &sms_boards[id]; +} +EXPORT_SYMBOL_GPL(sms_get_board); +static inline void sms_gpio_assign_11xx_default_led_config( + struct smscore_config_gpio *p_gpio_config) { + p_gpio_config->direction = SMS_GPIO_DIRECTION_OUTPUT; + p_gpio_config->inputcharacteristics = + SMS_GPIO_INPUTCHARACTERISTICS_NORMAL; + p_gpio_config->outputdriving = SMS_GPIO_OUTPUTDRIVING_4mA; + p_gpio_config->outputslewrate = SMS_GPIO_OUTPUT_SLEW_RATE_0_45_V_NS; + p_gpio_config->pullupdown = SMS_GPIO_PULLUPDOWN_NONE; +} + +int sms_board_event(struct smscore_device_t *coredev, + enum SMS_BOARD_EVENTS gevent) +{ + struct smscore_config_gpio my_gpio_config; + + sms_gpio_assign_11xx_default_led_config(&my_gpio_config); + + switch (gevent) { + case BOARD_EVENT_POWER_INIT: /* including hotplug */ + break; /* BOARD_EVENT_BIND */ + + case BOARD_EVENT_POWER_SUSPEND: + break; /* BOARD_EVENT_POWER_SUSPEND */ + + case BOARD_EVENT_POWER_RESUME: + break; /* BOARD_EVENT_POWER_RESUME */ + + case BOARD_EVENT_BIND: + break; /* BOARD_EVENT_BIND */ + + case BOARD_EVENT_SCAN_PROG: + break; /* BOARD_EVENT_SCAN_PROG */ + case BOARD_EVENT_SCAN_COMP: + break; /* BOARD_EVENT_SCAN_COMP */ + case BOARD_EVENT_EMERGENCY_WARNING_SIGNAL: + break; /* BOARD_EVENT_EMERGENCY_WARNING_SIGNAL */ + case BOARD_EVENT_FE_LOCK: + break; /* BOARD_EVENT_FE_LOCK */ + case BOARD_EVENT_FE_UNLOCK: + break; /* BOARD_EVENT_FE_UNLOCK */ + case BOARD_EVENT_DEMOD_LOCK: + break; /* BOARD_EVENT_DEMOD_LOCK */ + case BOARD_EVENT_DEMOD_UNLOCK: + break; /* BOARD_EVENT_DEMOD_UNLOCK */ + case BOARD_EVENT_RECEPTION_MAX_4: + break; /* BOARD_EVENT_RECEPTION_MAX_4 */ + case BOARD_EVENT_RECEPTION_3: + break; /* BOARD_EVENT_RECEPTION_3 */ + case BOARD_EVENT_RECEPTION_2: + break; /* BOARD_EVENT_RECEPTION_2 */ + case BOARD_EVENT_RECEPTION_1: + break; /* BOARD_EVENT_RECEPTION_1 */ + case BOARD_EVENT_RECEPTION_LOST_0: + break; /* BOARD_EVENT_RECEPTION_LOST_0 */ + case BOARD_EVENT_MULTIPLEX_OK: + break; /* BOARD_EVENT_MULTIPLEX_OK */ + case BOARD_EVENT_MULTIPLEX_ERRORS: + break; /* BOARD_EVENT_MULTIPLEX_ERRORS */ + + default: + pr_err("Unknown SMS board event\n"); + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(sms_board_event); + +static int sms_set_gpio(struct smscore_device_t *coredev, int pin, int enable) +{ + int lvl, ret; + u32 gpio; + struct smscore_config_gpio gpioconfig = { + .direction = SMS_GPIO_DIRECTION_OUTPUT, + .pullupdown = SMS_GPIO_PULLUPDOWN_NONE, + .inputcharacteristics = SMS_GPIO_INPUTCHARACTERISTICS_NORMAL, + .outputslewrate = SMS_GPIO_OUTPUT_SLEW_RATE_FAST, + .outputdriving = SMS_GPIO_OUTPUTDRIVING_S_4mA, + }; + + if (pin == 0) + return -EINVAL; + + if (pin < 0) { + /* inverted gpio */ + gpio = pin * -1; + lvl = enable ? 0 : 1; + } else { + gpio = pin; + lvl = enable ? 1 : 0; + } + + ret = smscore_configure_gpio(coredev, gpio, &gpioconfig); + if (ret < 0) + return ret; + + return smscore_set_gpio(coredev, gpio, lvl); +} + +int sms_board_setup(struct smscore_device_t *coredev) +{ + int board_id = smscore_get_board_id(coredev); + struct sms_board *board = sms_get_board(board_id); + + switch (board_id) { + case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: + /* turn off all LEDs */ + sms_set_gpio(coredev, board->led_power, 0); + sms_set_gpio(coredev, board->led_hi, 0); + sms_set_gpio(coredev, board->led_lo, 0); + break; + case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: + case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: + /* turn off LNA */ + sms_set_gpio(coredev, board->lna_ctrl, 0); + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(sms_board_setup); + +int sms_board_power(struct smscore_device_t *coredev, int onoff) +{ + int board_id = smscore_get_board_id(coredev); + struct sms_board *board = sms_get_board(board_id); + + switch (board_id) { + case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: + /* power LED */ + sms_set_gpio(coredev, + board->led_power, onoff ? 1 : 0); + break; + case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: + case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: + /* LNA */ + if (!onoff) + sms_set_gpio(coredev, board->lna_ctrl, 0); + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(sms_board_power); + +int sms_board_led_feedback(struct smscore_device_t *coredev, int led) +{ + int board_id = smscore_get_board_id(coredev); + struct sms_board *board = sms_get_board(board_id); + + /* don't touch GPIO if LEDs are already set */ + if (smscore_led_state(coredev, -1) == led) + return 0; + + switch (board_id) { + case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: + sms_set_gpio(coredev, + board->led_lo, (led & SMS_LED_LO) ? 1 : 0); + sms_set_gpio(coredev, + board->led_hi, (led & SMS_LED_HI) ? 1 : 0); + + smscore_led_state(coredev, led); + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(sms_board_led_feedback); + +int sms_board_lna_control(struct smscore_device_t *coredev, int onoff) +{ + int board_id = smscore_get_board_id(coredev); + struct sms_board *board = sms_get_board(board_id); + + pr_debug("%s: LNA %s\n", __func__, onoff ? "enabled" : "disabled"); + + switch (board_id) { + case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: + case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: + sms_set_gpio(coredev, + board->rf_switch, onoff ? 1 : 0); + return sms_set_gpio(coredev, + board->lna_ctrl, onoff ? 1 : 0); + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(sms_board_lna_control); + +int sms_board_load_modules(int id) +{ + request_module("smsdvb"); + return 0; +} +EXPORT_SYMBOL_GPL(sms_board_load_modules); diff --git a/drivers/media/common/siano/sms-cards.h b/drivers/media/common/siano/sms-cards.h new file mode 100644 index 0000000000..028c5cb435 --- /dev/null +++ b/drivers/media/common/siano/sms-cards.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Card-specific functions for the Siano SMS1xxx USB dongle + * + * Copyright (c) 2008 Michael Krufky + */ + +#ifndef __SMS_CARDS_H__ +#define __SMS_CARDS_H__ + +#include "smscoreapi.h" + +#include +#include "smsir.h" + +#define SMS_BOARD_UNKNOWN 0 +#define SMS1XXX_BOARD_SIANO_STELLAR 1 +#define SMS1XXX_BOARD_SIANO_NOVA_A 2 +#define SMS1XXX_BOARD_SIANO_NOVA_B 3 +#define SMS1XXX_BOARD_SIANO_VEGA 4 +#define SMS1XXX_BOARD_HAUPPAUGE_CATAMOUNT 5 +#define SMS1XXX_BOARD_HAUPPAUGE_OKEMO_A 6 +#define SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B 7 +#define SMS1XXX_BOARD_HAUPPAUGE_WINDHAM 8 +#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD 9 +#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2 10 +#define SMS1XXX_BOARD_SIANO_NICE 11 +#define SMS1XXX_BOARD_SIANO_VENICE 12 +#define SMS1XXX_BOARD_SIANO_STELLAR_ROM 13 +#define SMS1XXX_BOARD_ZTE_DVB_DATA_CARD 14 +#define SMS1XXX_BOARD_ONDA_MDTV_DATA_CARD 15 +#define SMS1XXX_BOARD_SIANO_MING 16 +#define SMS1XXX_BOARD_SIANO_PELE 17 +#define SMS1XXX_BOARD_SIANO_RIO 18 +#define SMS1XXX_BOARD_SIANO_DENVER_1530 19 +#define SMS1XXX_BOARD_SIANO_DENVER_2160 20 +#define SMS1XXX_BOARD_PCTV_77E 21 + +struct sms_board_gpio_cfg { + int lna_vhf_exist; + int lna_vhf_ctrl; + int lna_uhf_exist; + int lna_uhf_ctrl; + int lna_uhf_d_ctrl; + int lna_sband_exist; + int lna_sband_ctrl; + int lna_sband_d_ctrl; + int foreign_lna0_ctrl; + int foreign_lna1_ctrl; + int foreign_lna2_ctrl; + int rf_switch_vhf; + int rf_switch_uhf; + int rf_switch_sband; + int leds_power; + int led0; + int led1; + int led2; + int led3; + int led4; + int ir; + int eeprom_wp; + int mrc_sense; + int mrc_pdn_resetn; + int mrc_gp0; /* mrcs spi int */ + int mrc_gp1; + int mrc_gp2; + int mrc_gp3; + int mrc_gp4; + int host_spi_gsp_ts_int; +}; + +struct sms_board { + enum sms_device_type_st type; + char *name, *fw[DEVICE_MODE_MAX]; + struct sms_board_gpio_cfg board_cfg; + char *rc_codes; /* Name of IR codes table */ + + /* gpios */ + int led_power, led_hi, led_lo, lna_ctrl, rf_switch; + + char intf_num; + int default_mode; + unsigned int mtu; + unsigned int crystal; + struct sms_antenna_config_ST *antenna_config; +}; + +struct sms_board *sms_get_board(unsigned id); + +extern struct smscore_device_t *coredev; + +enum SMS_BOARD_EVENTS { + BOARD_EVENT_POWER_INIT, + BOARD_EVENT_POWER_SUSPEND, + BOARD_EVENT_POWER_RESUME, + BOARD_EVENT_BIND, + BOARD_EVENT_SCAN_PROG, + BOARD_EVENT_SCAN_COMP, + BOARD_EVENT_EMERGENCY_WARNING_SIGNAL, + BOARD_EVENT_FE_LOCK, + BOARD_EVENT_FE_UNLOCK, + BOARD_EVENT_DEMOD_LOCK, + BOARD_EVENT_DEMOD_UNLOCK, + BOARD_EVENT_RECEPTION_MAX_4, + BOARD_EVENT_RECEPTION_3, + BOARD_EVENT_RECEPTION_2, + BOARD_EVENT_RECEPTION_1, + BOARD_EVENT_RECEPTION_LOST_0, + BOARD_EVENT_MULTIPLEX_OK, + BOARD_EVENT_MULTIPLEX_ERRORS +}; + +int sms_board_event(struct smscore_device_t *coredev, + enum SMS_BOARD_EVENTS gevent); + +int sms_board_setup(struct smscore_device_t *coredev); + +#define SMS_LED_OFF 0 +#define SMS_LED_LO 1 +#define SMS_LED_HI 2 +int sms_board_led_feedback(struct smscore_device_t *coredev, int led); +int sms_board_power(struct smscore_device_t *coredev, int onoff); +int sms_board_lna_control(struct smscore_device_t *coredev, int onoff); + +extern int sms_board_load_modules(int id); + +#endif /* __SMS_CARDS_H__ */ diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c new file mode 100644 index 0000000000..7d4bc2733f --- /dev/null +++ b/drivers/media/common/siano/smscoreapi.c @@ -0,0 +1,2183 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Siano core API module + * + * This file contains implementation for the interface to sms core component + * + * author: Uri Shkolnik + * + * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc. + */ + +#include "smscoreapi.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sms-cards.h" +#include "smsir.h" + +struct smscore_device_notifyee_t { + struct list_head entry; + hotplug_t hotplug; +}; + +struct smscore_idlist_t { + struct list_head entry; + int id; + int data_type; +}; + +struct smscore_client_t { + struct list_head entry; + struct smscore_device_t *coredev; + void *context; + struct list_head idlist; + onresponse_t onresponse_handler; + onremove_t onremove_handler; +}; + +static char *siano_msgs[] = { + [MSG_TYPE_BASE_VAL - MSG_TYPE_BASE_VAL] = "MSG_TYPE_BASE_VAL", + [MSG_SMS_GET_VERSION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_VERSION_REQ", + [MSG_SMS_GET_VERSION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_VERSION_RES", + [MSG_SMS_MULTI_BRIDGE_CFG - MSG_TYPE_BASE_VAL] = "MSG_SMS_MULTI_BRIDGE_CFG", + [MSG_SMS_GPIO_CONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_CONFIG_REQ", + [MSG_SMS_GPIO_CONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_CONFIG_RES", + [MSG_SMS_GPIO_SET_LEVEL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_SET_LEVEL_REQ", + [MSG_SMS_GPIO_SET_LEVEL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_SET_LEVEL_RES", + [MSG_SMS_GPIO_GET_LEVEL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_GET_LEVEL_REQ", + [MSG_SMS_GPIO_GET_LEVEL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_GET_LEVEL_RES", + [MSG_SMS_EEPROM_BURN_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_EEPROM_BURN_IND", + [MSG_SMS_LOG_ENABLE_CHANGE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOG_ENABLE_CHANGE_REQ", + [MSG_SMS_LOG_ENABLE_CHANGE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOG_ENABLE_CHANGE_RES", + [MSG_SMS_SET_MAX_TX_MSG_LEN_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_MAX_TX_MSG_LEN_REQ", + [MSG_SMS_SET_MAX_TX_MSG_LEN_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_MAX_TX_MSG_LEN_RES", + [MSG_SMS_SPI_HALFDUPLEX_TOKEN_HOST_TO_DEVICE - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_HALFDUPLEX_TOKEN_HOST_TO_DEVICE", + [MSG_SMS_SPI_HALFDUPLEX_TOKEN_DEVICE_TO_HOST - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_HALFDUPLEX_TOKEN_DEVICE_TO_HOST", + [MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_REQ", + [MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_RES", + [MSG_SMS_BACKGROUND_SCAN_SIGNAL_DETECTED_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_BACKGROUND_SCAN_SIGNAL_DETECTED_IND", + [MSG_SMS_BACKGROUND_SCAN_NO_SIGNAL_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_BACKGROUND_SCAN_NO_SIGNAL_IND", + [MSG_SMS_CONFIGURE_RF_SWITCH_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CONFIGURE_RF_SWITCH_REQ", + [MSG_SMS_CONFIGURE_RF_SWITCH_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CONFIGURE_RF_SWITCH_RES", + [MSG_SMS_MRC_PATH_DISCONNECT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_PATH_DISCONNECT_REQ", + [MSG_SMS_MRC_PATH_DISCONNECT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_PATH_DISCONNECT_RES", + [MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_REQ", + [MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_RES", + [MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_REQ", + [MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_RES", + [MSG_WR_REG_RFT_REQ - MSG_TYPE_BASE_VAL] = "MSG_WR_REG_RFT_REQ", + [MSG_WR_REG_RFT_RES - MSG_TYPE_BASE_VAL] = "MSG_WR_REG_RFT_RES", + [MSG_RD_REG_RFT_REQ - MSG_TYPE_BASE_VAL] = "MSG_RD_REG_RFT_REQ", + [MSG_RD_REG_RFT_RES - MSG_TYPE_BASE_VAL] = "MSG_RD_REG_RFT_RES", + [MSG_RD_REG_ALL_RFT_REQ - MSG_TYPE_BASE_VAL] = "MSG_RD_REG_ALL_RFT_REQ", + [MSG_RD_REG_ALL_RFT_RES - MSG_TYPE_BASE_VAL] = "MSG_RD_REG_ALL_RFT_RES", + [MSG_HELP_INT - MSG_TYPE_BASE_VAL] = "MSG_HELP_INT", + [MSG_RUN_SCRIPT_INT - MSG_TYPE_BASE_VAL] = "MSG_RUN_SCRIPT_INT", + [MSG_SMS_EWS_INBAND_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EWS_INBAND_REQ", + [MSG_SMS_EWS_INBAND_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EWS_INBAND_RES", + [MSG_SMS_RFS_SELECT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RFS_SELECT_REQ", + [MSG_SMS_RFS_SELECT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RFS_SELECT_RES", + [MSG_SMS_MB_GET_VER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_GET_VER_REQ", + [MSG_SMS_MB_GET_VER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_GET_VER_RES", + [MSG_SMS_MB_WRITE_CFGFILE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_WRITE_CFGFILE_REQ", + [MSG_SMS_MB_WRITE_CFGFILE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_WRITE_CFGFILE_RES", + [MSG_SMS_MB_READ_CFGFILE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_READ_CFGFILE_REQ", + [MSG_SMS_MB_READ_CFGFILE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_READ_CFGFILE_RES", + [MSG_SMS_RD_MEM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RD_MEM_REQ", + [MSG_SMS_RD_MEM_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RD_MEM_RES", + [MSG_SMS_WR_MEM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_WR_MEM_REQ", + [MSG_SMS_WR_MEM_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_WR_MEM_RES", + [MSG_SMS_UPDATE_MEM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_UPDATE_MEM_REQ", + [MSG_SMS_UPDATE_MEM_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_UPDATE_MEM_RES", + [MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_REQ", + [MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_RES", + [MSG_SMS_RF_TUNE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RF_TUNE_REQ", + [MSG_SMS_RF_TUNE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RF_TUNE_RES", + [MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_REQ", + [MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_RES", + [MSG_SMS_ISDBT_SB_RECEPTION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_SB_RECEPTION_REQ", + [MSG_SMS_ISDBT_SB_RECEPTION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_SB_RECEPTION_RES", + [MSG_SMS_GENERIC_EPROM_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_EPROM_WRITE_REQ", + [MSG_SMS_GENERIC_EPROM_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_EPROM_WRITE_RES", + [MSG_SMS_GENERIC_EPROM_READ_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_EPROM_READ_REQ", + [MSG_SMS_GENERIC_EPROM_READ_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_EPROM_READ_RES", + [MSG_SMS_EEPROM_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EEPROM_WRITE_REQ", + [MSG_SMS_EEPROM_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EEPROM_WRITE_RES", + [MSG_SMS_CUSTOM_READ_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CUSTOM_READ_REQ", + [MSG_SMS_CUSTOM_READ_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CUSTOM_READ_RES", + [MSG_SMS_CUSTOM_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CUSTOM_WRITE_REQ", + [MSG_SMS_CUSTOM_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CUSTOM_WRITE_RES", + [MSG_SMS_INIT_DEVICE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_INIT_DEVICE_REQ", + [MSG_SMS_INIT_DEVICE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_INIT_DEVICE_RES", + [MSG_SMS_ATSC_SET_ALL_IP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_SET_ALL_IP_REQ", + [MSG_SMS_ATSC_SET_ALL_IP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_SET_ALL_IP_RES", + [MSG_SMS_ATSC_START_ENSEMBLE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_START_ENSEMBLE_REQ", + [MSG_SMS_ATSC_START_ENSEMBLE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_START_ENSEMBLE_RES", + [MSG_SMS_SET_OUTPUT_MODE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_OUTPUT_MODE_REQ", + [MSG_SMS_SET_OUTPUT_MODE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_OUTPUT_MODE_RES", + [MSG_SMS_ATSC_IP_FILTER_GET_LIST_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_GET_LIST_REQ", + [MSG_SMS_ATSC_IP_FILTER_GET_LIST_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_GET_LIST_RES", + [MSG_SMS_SUB_CHANNEL_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SUB_CHANNEL_START_REQ", + [MSG_SMS_SUB_CHANNEL_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SUB_CHANNEL_START_RES", + [MSG_SMS_SUB_CHANNEL_STOP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SUB_CHANNEL_STOP_REQ", + [MSG_SMS_SUB_CHANNEL_STOP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SUB_CHANNEL_STOP_RES", + [MSG_SMS_ATSC_IP_FILTER_ADD_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_ADD_REQ", + [MSG_SMS_ATSC_IP_FILTER_ADD_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_ADD_RES", + [MSG_SMS_ATSC_IP_FILTER_REMOVE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_REMOVE_REQ", + [MSG_SMS_ATSC_IP_FILTER_REMOVE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_REMOVE_RES", + [MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_REQ", + [MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_RES", + [MSG_SMS_WAIT_CMD - MSG_TYPE_BASE_VAL] = "MSG_SMS_WAIT_CMD", + [MSG_SMS_ADD_PID_FILTER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ADD_PID_FILTER_REQ", + [MSG_SMS_ADD_PID_FILTER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ADD_PID_FILTER_RES", + [MSG_SMS_REMOVE_PID_FILTER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_REMOVE_PID_FILTER_REQ", + [MSG_SMS_REMOVE_PID_FILTER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_REMOVE_PID_FILTER_RES", + [MSG_SMS_FAST_INFORMATION_CHANNEL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_FAST_INFORMATION_CHANNEL_REQ", + [MSG_SMS_FAST_INFORMATION_CHANNEL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_FAST_INFORMATION_CHANNEL_RES", + [MSG_SMS_DAB_CHANNEL - MSG_TYPE_BASE_VAL] = "MSG_SMS_DAB_CHANNEL", + [MSG_SMS_GET_PID_FILTER_LIST_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_PID_FILTER_LIST_REQ", + [MSG_SMS_GET_PID_FILTER_LIST_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_PID_FILTER_LIST_RES", + [MSG_SMS_POWER_DOWN_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_DOWN_REQ", + [MSG_SMS_POWER_DOWN_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_DOWN_RES", + [MSG_SMS_ATSC_SLT_EXIST_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_SLT_EXIST_IND", + [MSG_SMS_ATSC_NO_SLT_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_ATSC_NO_SLT_IND", + [MSG_SMS_GET_STATISTICS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_STATISTICS_REQ", + [MSG_SMS_GET_STATISTICS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_STATISTICS_RES", + [MSG_SMS_SEND_DUMP - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_DUMP", + [MSG_SMS_SCAN_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_START_REQ", + [MSG_SMS_SCAN_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_START_RES", + [MSG_SMS_SCAN_STOP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_STOP_REQ", + [MSG_SMS_SCAN_STOP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_STOP_RES", + [MSG_SMS_SCAN_PROGRESS_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_PROGRESS_IND", + [MSG_SMS_SCAN_COMPLETE_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SCAN_COMPLETE_IND", + [MSG_SMS_LOG_ITEM - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOG_ITEM", + [MSG_SMS_DAB_SUBCHANNEL_RECONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DAB_SUBCHANNEL_RECONFIG_REQ", + [MSG_SMS_DAB_SUBCHANNEL_RECONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DAB_SUBCHANNEL_RECONFIG_RES", + [MSG_SMS_HO_PER_SLICES_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_PER_SLICES_IND", + [MSG_SMS_HO_INBAND_POWER_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_INBAND_POWER_IND", + [MSG_SMS_MANUAL_DEMOD_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MANUAL_DEMOD_REQ", + [MSG_SMS_HO_TUNE_ON_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_TUNE_ON_REQ", + [MSG_SMS_HO_TUNE_ON_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_TUNE_ON_RES", + [MSG_SMS_HO_TUNE_OFF_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_TUNE_OFF_REQ", + [MSG_SMS_HO_TUNE_OFF_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_TUNE_OFF_RES", + [MSG_SMS_HO_PEEK_FREQ_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_PEEK_FREQ_REQ", + [MSG_SMS_HO_PEEK_FREQ_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_PEEK_FREQ_RES", + [MSG_SMS_HO_PEEK_FREQ_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_HO_PEEK_FREQ_IND", + [MSG_SMS_MB_ATTEN_SET_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_ATTEN_SET_REQ", + [MSG_SMS_MB_ATTEN_SET_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MB_ATTEN_SET_RES", + [MSG_SMS_ENABLE_STAT_IN_I2C_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_STAT_IN_I2C_REQ", + [MSG_SMS_ENABLE_STAT_IN_I2C_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_STAT_IN_I2C_RES", + [MSG_SMS_SET_ANTENNA_CONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_ANTENNA_CONFIG_REQ", + [MSG_SMS_SET_ANTENNA_CONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_ANTENNA_CONFIG_RES", + [MSG_SMS_GET_STATISTICS_EX_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_STATISTICS_EX_REQ", + [MSG_SMS_GET_STATISTICS_EX_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_STATISTICS_EX_RES", + [MSG_SMS_SLEEP_RESUME_COMP_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SLEEP_RESUME_COMP_IND", + [MSG_SMS_SWITCH_HOST_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWITCH_HOST_INTERFACE_REQ", + [MSG_SMS_SWITCH_HOST_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWITCH_HOST_INTERFACE_RES", + [MSG_SMS_DATA_DOWNLOAD_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_DOWNLOAD_REQ", + [MSG_SMS_DATA_DOWNLOAD_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_DOWNLOAD_RES", + [MSG_SMS_DATA_VALIDITY_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_VALIDITY_REQ", + [MSG_SMS_DATA_VALIDITY_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_VALIDITY_RES", + [MSG_SMS_SWDOWNLOAD_TRIGGER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWDOWNLOAD_TRIGGER_REQ", + [MSG_SMS_SWDOWNLOAD_TRIGGER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWDOWNLOAD_TRIGGER_RES", + [MSG_SMS_SWDOWNLOAD_BACKDOOR_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWDOWNLOAD_BACKDOOR_REQ", + [MSG_SMS_SWDOWNLOAD_BACKDOOR_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SWDOWNLOAD_BACKDOOR_RES", + [MSG_SMS_GET_VERSION_EX_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_VERSION_EX_REQ", + [MSG_SMS_GET_VERSION_EX_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_VERSION_EX_RES", + [MSG_SMS_CLOCK_OUTPUT_CONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CLOCK_OUTPUT_CONFIG_REQ", + [MSG_SMS_CLOCK_OUTPUT_CONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CLOCK_OUTPUT_CONFIG_RES", + [MSG_SMS_I2C_SET_FREQ_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_I2C_SET_FREQ_REQ", + [MSG_SMS_I2C_SET_FREQ_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_I2C_SET_FREQ_RES", + [MSG_SMS_GENERIC_I2C_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_I2C_REQ", + [MSG_SMS_GENERIC_I2C_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GENERIC_I2C_RES", + [MSG_SMS_DVBT_BDA_DATA - MSG_TYPE_BASE_VAL] = "MSG_SMS_DVBT_BDA_DATA", + [MSG_SW_RELOAD_REQ - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_REQ", + [MSG_SMS_DATA_MSG - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_MSG", + [MSG_TABLE_UPLOAD_REQ - MSG_TYPE_BASE_VAL] = "MSG_TABLE_UPLOAD_REQ", + [MSG_TABLE_UPLOAD_RES - MSG_TYPE_BASE_VAL] = "MSG_TABLE_UPLOAD_RES", + [MSG_SW_RELOAD_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_START_REQ", + [MSG_SW_RELOAD_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_START_RES", + [MSG_SW_RELOAD_EXEC_REQ - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_EXEC_REQ", + [MSG_SW_RELOAD_EXEC_RES - MSG_TYPE_BASE_VAL] = "MSG_SW_RELOAD_EXEC_RES", + [MSG_SMS_SPI_INT_LINE_SET_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_INT_LINE_SET_REQ", + [MSG_SMS_SPI_INT_LINE_SET_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_INT_LINE_SET_RES", + [MSG_SMS_GPIO_CONFIG_EX_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_CONFIG_EX_REQ", + [MSG_SMS_GPIO_CONFIG_EX_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GPIO_CONFIG_EX_RES", + [MSG_SMS_WATCHDOG_ACT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_WATCHDOG_ACT_REQ", + [MSG_SMS_WATCHDOG_ACT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_WATCHDOG_ACT_RES", + [MSG_SMS_LOOPBACK_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOOPBACK_REQ", + [MSG_SMS_LOOPBACK_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOOPBACK_RES", + [MSG_SMS_RAW_CAPTURE_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_START_REQ", + [MSG_SMS_RAW_CAPTURE_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_START_RES", + [MSG_SMS_RAW_CAPTURE_ABORT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_ABORT_REQ", + [MSG_SMS_RAW_CAPTURE_ABORT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_ABORT_RES", + [MSG_SMS_RAW_CAPTURE_COMPLETE_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_RAW_CAPTURE_COMPLETE_IND", + [MSG_SMS_DATA_PUMP_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_PUMP_IND", + [MSG_SMS_DATA_PUMP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_PUMP_REQ", + [MSG_SMS_DATA_PUMP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DATA_PUMP_RES", + [MSG_SMS_FLASH_DL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_FLASH_DL_REQ", + [MSG_SMS_EXEC_TEST_1_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_REQ", + [MSG_SMS_EXEC_TEST_1_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_RES", + [MSG_SMS_ENABLE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_REQ", + [MSG_SMS_ENABLE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_RES", + [MSG_SMS_SPI_SET_BUS_WIDTH_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_REQ", + [MSG_SMS_SPI_SET_BUS_WIDTH_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_RES", + [MSG_SMS_SEND_EMM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_EMM_REQ", + [MSG_SMS_SEND_EMM_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_EMM_RES", + [MSG_SMS_DISABLE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DISABLE_TS_INTERFACE_REQ", + [MSG_SMS_DISABLE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DISABLE_TS_INTERFACE_RES", + [MSG_SMS_IS_BUF_FREE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_IS_BUF_FREE_REQ", + [MSG_SMS_IS_BUF_FREE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_IS_BUF_FREE_RES", + [MSG_SMS_EXT_ANTENNA_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXT_ANTENNA_REQ", + [MSG_SMS_EXT_ANTENNA_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXT_ANTENNA_RES", + [MSG_SMS_CMMB_GET_NET_OF_FREQ_REQ_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_NET_OF_FREQ_REQ_OBSOLETE", + [MSG_SMS_CMMB_GET_NET_OF_FREQ_RES_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_NET_OF_FREQ_RES_OBSOLETE", + [MSG_SMS_BATTERY_LEVEL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_BATTERY_LEVEL_REQ", + [MSG_SMS_BATTERY_LEVEL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_BATTERY_LEVEL_RES", + [MSG_SMS_CMMB_INJECT_TABLE_REQ_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_INJECT_TABLE_REQ_OBSOLETE", + [MSG_SMS_CMMB_INJECT_TABLE_RES_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_INJECT_TABLE_RES_OBSOLETE", + [MSG_SMS_FM_RADIO_BLOCK_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_FM_RADIO_BLOCK_IND", + [MSG_SMS_HOST_NOTIFICATION_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_HOST_NOTIFICATION_IND", + [MSG_SMS_CMMB_GET_CONTROL_TABLE_REQ_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_CONTROL_TABLE_REQ_OBSOLETE", + [MSG_SMS_CMMB_GET_CONTROL_TABLE_RES_OBSOLETE - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_CONTROL_TABLE_RES_OBSOLETE", + [MSG_SMS_CMMB_GET_NETWORKS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_NETWORKS_REQ", + [MSG_SMS_CMMB_GET_NETWORKS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_NETWORKS_RES", + [MSG_SMS_CMMB_START_SERVICE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_START_SERVICE_REQ", + [MSG_SMS_CMMB_START_SERVICE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_START_SERVICE_RES", + [MSG_SMS_CMMB_STOP_SERVICE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_STOP_SERVICE_REQ", + [MSG_SMS_CMMB_STOP_SERVICE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_STOP_SERVICE_RES", + [MSG_SMS_CMMB_ADD_CHANNEL_FILTER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_ADD_CHANNEL_FILTER_REQ", + [MSG_SMS_CMMB_ADD_CHANNEL_FILTER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_ADD_CHANNEL_FILTER_RES", + [MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_REQ", + [MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_RES", + [MSG_SMS_CMMB_START_CONTROL_INFO_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_START_CONTROL_INFO_REQ", + [MSG_SMS_CMMB_START_CONTROL_INFO_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_START_CONTROL_INFO_RES", + [MSG_SMS_CMMB_STOP_CONTROL_INFO_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_STOP_CONTROL_INFO_REQ", + [MSG_SMS_CMMB_STOP_CONTROL_INFO_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_STOP_CONTROL_INFO_RES", + [MSG_SMS_ISDBT_TUNE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_TUNE_REQ", + [MSG_SMS_ISDBT_TUNE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ISDBT_TUNE_RES", + [MSG_SMS_TRANSMISSION_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_TRANSMISSION_IND", + [MSG_SMS_PID_STATISTICS_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_PID_STATISTICS_IND", + [MSG_SMS_POWER_DOWN_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_DOWN_IND", + [MSG_SMS_POWER_DOWN_CONF - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_DOWN_CONF", + [MSG_SMS_POWER_UP_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_UP_IND", + [MSG_SMS_POWER_UP_CONF - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_UP_CONF", + [MSG_SMS_POWER_MODE_SET_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_MODE_SET_REQ", + [MSG_SMS_POWER_MODE_SET_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_POWER_MODE_SET_RES", + [MSG_SMS_DEBUG_HOST_EVENT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DEBUG_HOST_EVENT_REQ", + [MSG_SMS_DEBUG_HOST_EVENT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DEBUG_HOST_EVENT_RES", + [MSG_SMS_NEW_CRYSTAL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_NEW_CRYSTAL_REQ", + [MSG_SMS_NEW_CRYSTAL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_NEW_CRYSTAL_RES", + [MSG_SMS_CONFIG_SPI_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CONFIG_SPI_REQ", + [MSG_SMS_CONFIG_SPI_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CONFIG_SPI_RES", + [MSG_SMS_I2C_SHORT_STAT_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_I2C_SHORT_STAT_IND", + [MSG_SMS_START_IR_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_START_IR_REQ", + [MSG_SMS_START_IR_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_START_IR_RES", + [MSG_SMS_IR_SAMPLES_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_IR_SAMPLES_IND", + [MSG_SMS_CMMB_CA_SERVICE_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_CA_SERVICE_IND", + [MSG_SMS_SLAVE_DEVICE_DETECTED - MSG_TYPE_BASE_VAL] = "MSG_SMS_SLAVE_DEVICE_DETECTED", + [MSG_SMS_INTERFACE_LOCK_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_INTERFACE_LOCK_IND", + [MSG_SMS_INTERFACE_UNLOCK_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_INTERFACE_UNLOCK_IND", + [MSG_SMS_SEND_ROSUM_BUFF_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_ROSUM_BUFF_REQ", + [MSG_SMS_SEND_ROSUM_BUFF_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_ROSUM_BUFF_RES", + [MSG_SMS_ROSUM_BUFF - MSG_TYPE_BASE_VAL] = "MSG_SMS_ROSUM_BUFF", + [MSG_SMS_SET_AES128_KEY_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_AES128_KEY_REQ", + [MSG_SMS_SET_AES128_KEY_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_AES128_KEY_RES", + [MSG_SMS_MBBMS_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MBBMS_WRITE_REQ", + [MSG_SMS_MBBMS_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MBBMS_WRITE_RES", + [MSG_SMS_MBBMS_READ_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_MBBMS_READ_IND", + [MSG_SMS_IQ_STREAM_START_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_START_REQ", + [MSG_SMS_IQ_STREAM_START_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_START_RES", + [MSG_SMS_IQ_STREAM_STOP_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_STOP_REQ", + [MSG_SMS_IQ_STREAM_STOP_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_STOP_RES", + [MSG_SMS_IQ_STREAM_DATA_BLOCK - MSG_TYPE_BASE_VAL] = "MSG_SMS_IQ_STREAM_DATA_BLOCK", + [MSG_SMS_GET_EEPROM_VERSION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_EEPROM_VERSION_REQ", + [MSG_SMS_GET_EEPROM_VERSION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_GET_EEPROM_VERSION_RES", + [MSG_SMS_SIGNAL_DETECTED_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SIGNAL_DETECTED_IND", + [MSG_SMS_NO_SIGNAL_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_NO_SIGNAL_IND", + [MSG_SMS_MRC_SHUTDOWN_SLAVE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_SHUTDOWN_SLAVE_REQ", + [MSG_SMS_MRC_SHUTDOWN_SLAVE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_SHUTDOWN_SLAVE_RES", + [MSG_SMS_MRC_BRINGUP_SLAVE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_BRINGUP_SLAVE_REQ", + [MSG_SMS_MRC_BRINGUP_SLAVE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_BRINGUP_SLAVE_RES", + [MSG_SMS_EXTERNAL_LNA_CTRL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXTERNAL_LNA_CTRL_REQ", + [MSG_SMS_EXTERNAL_LNA_CTRL_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXTERNAL_LNA_CTRL_RES", + [MSG_SMS_SET_PERIODIC_STATISTICS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_PERIODIC_STATISTICS_REQ", + [MSG_SMS_SET_PERIODIC_STATISTICS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SET_PERIODIC_STATISTICS_RES", + [MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_REQ", + [MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_RES", + [LOCAL_TUNE - MSG_TYPE_BASE_VAL] = "LOCAL_TUNE", + [LOCAL_IFFT_H_ICI - MSG_TYPE_BASE_VAL] = "LOCAL_IFFT_H_ICI", + [MSG_RESYNC_REQ - MSG_TYPE_BASE_VAL] = "MSG_RESYNC_REQ", + [MSG_SMS_CMMB_GET_MRC_STATISTICS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_MRC_STATISTICS_REQ", + [MSG_SMS_CMMB_GET_MRC_STATISTICS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_MRC_STATISTICS_RES", + [MSG_SMS_LOG_EX_ITEM - MSG_TYPE_BASE_VAL] = "MSG_SMS_LOG_EX_ITEM", + [MSG_SMS_DEVICE_DATA_LOSS_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_DEVICE_DATA_LOSS_IND", + [MSG_SMS_MRC_WATCHDOG_TRIGGERED_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_MRC_WATCHDOG_TRIGGERED_IND", + [MSG_SMS_USER_MSG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_USER_MSG_REQ", + [MSG_SMS_USER_MSG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_USER_MSG_RES", + [MSG_SMS_SMART_CARD_INIT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_INIT_REQ", + [MSG_SMS_SMART_CARD_INIT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_INIT_RES", + [MSG_SMS_SMART_CARD_WRITE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_WRITE_REQ", + [MSG_SMS_SMART_CARD_WRITE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_WRITE_RES", + [MSG_SMS_SMART_CARD_READ_IND - MSG_TYPE_BASE_VAL] = "MSG_SMS_SMART_CARD_READ_IND", + [MSG_SMS_TSE_ENABLE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_TSE_ENABLE_REQ", + [MSG_SMS_TSE_ENABLE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_TSE_ENABLE_RES", + [MSG_SMS_CMMB_GET_SHORT_STATISTICS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_SHORT_STATISTICS_REQ", + [MSG_SMS_CMMB_GET_SHORT_STATISTICS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_GET_SHORT_STATISTICS_RES", + [MSG_SMS_LED_CONFIG_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_LED_CONFIG_REQ", + [MSG_SMS_LED_CONFIG_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_LED_CONFIG_RES", + [MSG_PWM_ANTENNA_REQ - MSG_TYPE_BASE_VAL] = "MSG_PWM_ANTENNA_REQ", + [MSG_PWM_ANTENNA_RES - MSG_TYPE_BASE_VAL] = "MSG_PWM_ANTENNA_RES", + [MSG_SMS_CMMB_SMD_SN_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SMD_SN_REQ", + [MSG_SMS_CMMB_SMD_SN_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SMD_SN_RES", + [MSG_SMS_CMMB_SET_CA_CW_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_CA_CW_REQ", + [MSG_SMS_CMMB_SET_CA_CW_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_CA_CW_RES", + [MSG_SMS_CMMB_SET_CA_SALT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_CA_SALT_REQ", + [MSG_SMS_CMMB_SET_CA_SALT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_CMMB_SET_CA_SALT_RES", + [MSG_SMS_NSCD_INIT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_INIT_REQ", + [MSG_SMS_NSCD_INIT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_INIT_RES", + [MSG_SMS_NSCD_PROCESS_SECTION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_PROCESS_SECTION_REQ", + [MSG_SMS_NSCD_PROCESS_SECTION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_PROCESS_SECTION_RES", + [MSG_SMS_DBD_CREATE_OBJECT_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_CREATE_OBJECT_REQ", + [MSG_SMS_DBD_CREATE_OBJECT_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_CREATE_OBJECT_RES", + [MSG_SMS_DBD_CONFIGURE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_CONFIGURE_REQ", + [MSG_SMS_DBD_CONFIGURE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_CONFIGURE_RES", + [MSG_SMS_DBD_SET_KEYS_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_SET_KEYS_REQ", + [MSG_SMS_DBD_SET_KEYS_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_SET_KEYS_RES", + [MSG_SMS_DBD_PROCESS_HEADER_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_HEADER_REQ", + [MSG_SMS_DBD_PROCESS_HEADER_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_HEADER_RES", + [MSG_SMS_DBD_PROCESS_DATA_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_DATA_REQ", + [MSG_SMS_DBD_PROCESS_DATA_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_DATA_RES", + [MSG_SMS_DBD_PROCESS_GET_DATA_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_GET_DATA_REQ", + [MSG_SMS_DBD_PROCESS_GET_DATA_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_DBD_PROCESS_GET_DATA_RES", + [MSG_SMS_NSCD_OPEN_SESSION_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_OPEN_SESSION_REQ", + [MSG_SMS_NSCD_OPEN_SESSION_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_NSCD_OPEN_SESSION_RES", + [MSG_SMS_SEND_HOST_DATA_TO_DEMUX_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_HOST_DATA_TO_DEMUX_REQ", + [MSG_SMS_SEND_HOST_DATA_TO_DEMUX_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_HOST_DATA_TO_DEMUX_RES", + [MSG_LAST_MSG_TYPE - MSG_TYPE_BASE_VAL] = "MSG_LAST_MSG_TYPE", +}; + +char *smscore_translate_msg(enum msg_types msgtype) +{ + int i = msgtype - MSG_TYPE_BASE_VAL; + char *msg; + + if (i < 0 || i >= ARRAY_SIZE(siano_msgs)) + return "Unknown msg type"; + + msg = siano_msgs[i]; + + if (!*msg) + return "Unknown msg type"; + + return msg; +} +EXPORT_SYMBOL_GPL(smscore_translate_msg); + +void smscore_set_board_id(struct smscore_device_t *core, int id) +{ + core->board_id = id; +} + +int smscore_led_state(struct smscore_device_t *core, int led) +{ + if (led >= 0) + core->led_state = led; + return core->led_state; +} +EXPORT_SYMBOL_GPL(smscore_set_board_id); + +int smscore_get_board_id(struct smscore_device_t *core) +{ + return core->board_id; +} +EXPORT_SYMBOL_GPL(smscore_get_board_id); + +struct smscore_registry_entry_t { + struct list_head entry; + char devpath[32]; + int mode; + enum sms_device_type_st type; +}; + +static struct list_head g_smscore_notifyees; +static struct list_head g_smscore_devices; +static DEFINE_MUTEX(g_smscore_deviceslock); + +static struct list_head g_smscore_registry; +static DEFINE_MUTEX(g_smscore_registrylock); + +static int default_mode = DEVICE_MODE_NONE; + +module_param(default_mode, int, 0644); +MODULE_PARM_DESC(default_mode, "default firmware id (device mode)"); + +static struct smscore_registry_entry_t *smscore_find_registry(char *devpath) +{ + struct smscore_registry_entry_t *entry; + struct list_head *next; + + mutex_lock(&g_smscore_registrylock); + for (next = g_smscore_registry.next; + next != &g_smscore_registry; + next = next->next) { + entry = (struct smscore_registry_entry_t *) next; + if (!strncmp(entry->devpath, devpath, sizeof(entry->devpath))) { + mutex_unlock(&g_smscore_registrylock); + return entry; + } + } + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (entry) { + entry->mode = default_mode; + strscpy(entry->devpath, devpath, sizeof(entry->devpath)); + list_add(&entry->entry, &g_smscore_registry); + } else + pr_err("failed to create smscore_registry.\n"); + mutex_unlock(&g_smscore_registrylock); + return entry; +} + +int smscore_registry_getmode(char *devpath) +{ + struct smscore_registry_entry_t *entry; + + entry = smscore_find_registry(devpath); + if (entry) + return entry->mode; + else + pr_err("No registry found.\n"); + + return default_mode; +} +EXPORT_SYMBOL_GPL(smscore_registry_getmode); + +static enum sms_device_type_st smscore_registry_gettype(char *devpath) +{ + struct smscore_registry_entry_t *entry; + + entry = smscore_find_registry(devpath); + if (entry) + return entry->type; + else + pr_err("No registry found.\n"); + + return -EINVAL; +} + +static void smscore_registry_setmode(char *devpath, int mode) +{ + struct smscore_registry_entry_t *entry; + + entry = smscore_find_registry(devpath); + if (entry) + entry->mode = mode; + else + pr_err("No registry found.\n"); +} + +static void smscore_registry_settype(char *devpath, + enum sms_device_type_st type) +{ + struct smscore_registry_entry_t *entry; + + entry = smscore_find_registry(devpath); + if (entry) + entry->type = type; + else + pr_err("No registry found.\n"); +} + + +static void list_add_locked(struct list_head *new, struct list_head *head, + spinlock_t *lock) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + list_add(new, head); + + spin_unlock_irqrestore(lock, flags); +} + +/* + * register a client callback that called when device plugged in/unplugged + * NOTE: if devices exist callback is called immediately for each device + * + * @param hotplug callback + * + * return: 0 on success, <0 on error. + */ +int smscore_register_hotplug(hotplug_t hotplug) +{ + struct smscore_device_notifyee_t *notifyee; + struct list_head *next, *first; + int rc = 0; + + mutex_lock(&g_smscore_deviceslock); + notifyee = kmalloc(sizeof(*notifyee), GFP_KERNEL); + if (notifyee) { + /* now notify callback about existing devices */ + first = &g_smscore_devices; + for (next = first->next; + next != first && !rc; + next = next->next) { + struct smscore_device_t *coredev = + (struct smscore_device_t *) next; + rc = hotplug(coredev, coredev->device, 1); + } + + if (rc >= 0) { + notifyee->hotplug = hotplug; + list_add(¬ifyee->entry, &g_smscore_notifyees); + } else + kfree(notifyee); + } else + rc = -ENOMEM; + + mutex_unlock(&g_smscore_deviceslock); + + return rc; +} +EXPORT_SYMBOL_GPL(smscore_register_hotplug); + +/* + * unregister a client callback that called when device plugged in/unplugged + * + * @param hotplug callback + * + */ +void smscore_unregister_hotplug(hotplug_t hotplug) +{ + struct list_head *next, *first; + + mutex_lock(&g_smscore_deviceslock); + + first = &g_smscore_notifyees; + + for (next = first->next; next != first;) { + struct smscore_device_notifyee_t *notifyee = + (struct smscore_device_notifyee_t *) next; + next = next->next; + + if (notifyee->hotplug == hotplug) { + list_del(¬ifyee->entry); + kfree(notifyee); + } + } + + mutex_unlock(&g_smscore_deviceslock); +} +EXPORT_SYMBOL_GPL(smscore_unregister_hotplug); + +static void smscore_notify_clients(struct smscore_device_t *coredev) +{ + struct smscore_client_t *client; + + /* the client must call smscore_unregister_client from remove handler */ + while (!list_empty(&coredev->clients)) { + client = (struct smscore_client_t *) coredev->clients.next; + client->onremove_handler(client->context); + } +} + +static int smscore_notify_callbacks(struct smscore_device_t *coredev, + struct device *device, int arrival) +{ + struct smscore_device_notifyee_t *elem; + int rc = 0; + + /* note: must be called under g_deviceslock */ + + list_for_each_entry(elem, &g_smscore_notifyees, entry) { + rc = elem->hotplug(coredev, device, arrival); + if (rc < 0) + break; + } + + return rc; +} + +static struct +smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer, + dma_addr_t common_buffer_phys) +{ + struct smscore_buffer_t *cb; + + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) + return NULL; + + cb->p = buffer; + cb->offset_in_common = buffer - (u8 *) common_buffer; + if (common_buffer_phys) + cb->phys = common_buffer_phys + cb->offset_in_common; + + return cb; +} + +/* + * creates coredev object for a device, prepares buffers, + * creates buffer mappings, notifies registered hotplugs about new device. + * + * @param params device pointer to struct with device specific parameters + * and handlers + * @param coredev pointer to a value that receives created coredev object + * + * return: 0 on success, <0 on error. + */ +int smscore_register_device(struct smsdevice_params_t *params, + struct smscore_device_t **coredev, + gfp_t gfp_buf_flags, + void *mdev) +{ + struct smscore_device_t *dev; + u8 *buffer; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + dev->media_dev = mdev; +#endif + dev->gfp_buf_flags = gfp_buf_flags; + + /* init list entry so it could be safe in smscore_unregister_device */ + INIT_LIST_HEAD(&dev->entry); + + /* init queues */ + INIT_LIST_HEAD(&dev->clients); + INIT_LIST_HEAD(&dev->buffers); + + /* init locks */ + spin_lock_init(&dev->clientslock); + spin_lock_init(&dev->bufferslock); + + /* init completion events */ + init_completion(&dev->version_ex_done); + init_completion(&dev->data_download_done); + init_completion(&dev->data_validity_done); + init_completion(&dev->trigger_done); + init_completion(&dev->init_device_done); + init_completion(&dev->reload_start_done); + init_completion(&dev->resume_done); + init_completion(&dev->gpio_configuration_done); + init_completion(&dev->gpio_set_level_done); + init_completion(&dev->gpio_get_level_done); + init_completion(&dev->ir_init_done); + + /* Buffer management */ + init_waitqueue_head(&dev->buffer_mng_waitq); + + /* alloc common buffer */ + dev->common_buffer_size = params->buffer_size * params->num_buffers; + if (params->usb_device) + buffer = kzalloc(dev->common_buffer_size, GFP_KERNEL); + else + buffer = dma_alloc_coherent(params->device, + dev->common_buffer_size, + &dev->common_buffer_phys, + GFP_KERNEL | dev->gfp_buf_flags); + if (!buffer) { + smscore_unregister_device(dev); + return -ENOMEM; + } + dev->common_buffer = buffer; + + /* prepare dma buffers */ + for (; dev->num_buffers < params->num_buffers; + dev->num_buffers++, buffer += params->buffer_size) { + struct smscore_buffer_t *cb; + + cb = smscore_createbuffer(buffer, dev->common_buffer, + dev->common_buffer_phys); + if (!cb) { + smscore_unregister_device(dev); + return -ENOMEM; + } + + smscore_putbuffer(dev, cb); + } + + pr_debug("allocated %d buffers\n", dev->num_buffers); + + dev->mode = DEVICE_MODE_NONE; + dev->board_id = SMS_BOARD_UNKNOWN; + dev->context = params->context; + dev->device = params->device; + dev->usb_device = params->usb_device; + dev->setmode_handler = params->setmode_handler; + dev->detectmode_handler = params->detectmode_handler; + dev->sendrequest_handler = params->sendrequest_handler; + dev->preload_handler = params->preload_handler; + dev->postload_handler = params->postload_handler; + + dev->device_flags = params->flags; + strscpy(dev->devpath, params->devpath, sizeof(dev->devpath)); + + smscore_registry_settype(dev->devpath, params->device_type); + + /* add device to devices list */ + mutex_lock(&g_smscore_deviceslock); + list_add(&dev->entry, &g_smscore_devices); + mutex_unlock(&g_smscore_deviceslock); + + *coredev = dev; + + pr_debug("device %p created\n", dev); + + return 0; +} +EXPORT_SYMBOL_GPL(smscore_register_device); + + +static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev, + void *buffer, size_t size, struct completion *completion) { + int rc; + + if (!completion) + return -EINVAL; + init_completion(completion); + + rc = coredev->sendrequest_handler(coredev->context, buffer, size); + if (rc < 0) { + pr_info("sendrequest returned error %d\n", rc); + return rc; + } + + return wait_for_completion_timeout(completion, + msecs_to_jiffies(SMS_PROTOCOL_MAX_RAOUNDTRIP_MS)) ? + 0 : -ETIME; +} + +/* + * Starts & enables IR operations + * + * return: 0 on success, < 0 on error. + */ +static int smscore_init_ir(struct smscore_device_t *coredev) +{ + int ir_io; + int rc; + void *buffer; + + coredev->ir.dev = NULL; + ir_io = sms_get_board(smscore_get_board_id(coredev))->board_cfg.ir; + if (ir_io) {/* only if IR port exist we use IR sub-module */ + pr_debug("IR loading\n"); + rc = sms_ir_init(coredev); + + if (rc != 0) + pr_err("Error initialization DTV IR sub-module\n"); + else { + buffer = kmalloc(sizeof(struct sms_msg_data2) + + SMS_DMA_ALIGNMENT, + GFP_KERNEL | coredev->gfp_buf_flags); + if (buffer) { + struct sms_msg_data2 *msg = + (struct sms_msg_data2 *) + SMS_ALIGN_ADDRESS(buffer); + + SMS_INIT_MSG(&msg->x_msg_header, + MSG_SMS_START_IR_REQ, + sizeof(struct sms_msg_data2)); + msg->msg_data[0] = coredev->ir.controller; + msg->msg_data[1] = coredev->ir.timeout; + + rc = smscore_sendrequest_and_wait(coredev, msg, + msg->x_msg_header. msg_length, + &coredev->ir_init_done); + + kfree(buffer); + } else + pr_err("Sending IR initialization message failed\n"); + } + } else + pr_info("IR port has not been detected\n"); + + return 0; +} + +/* + * configures device features according to board configuration structure. + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * + * return: 0 on success, <0 on error. + */ +static int smscore_configure_board(struct smscore_device_t *coredev) +{ + struct sms_board *board; + + board = sms_get_board(coredev->board_id); + if (!board) { + pr_err("no board configuration exist.\n"); + return -EINVAL; + } + + if (board->mtu) { + struct sms_msg_data mtu_msg; + pr_debug("set max transmit unit %d\n", board->mtu); + + mtu_msg.x_msg_header.msg_src_id = 0; + mtu_msg.x_msg_header.msg_dst_id = HIF_TASK; + mtu_msg.x_msg_header.msg_flags = 0; + mtu_msg.x_msg_header.msg_type = MSG_SMS_SET_MAX_TX_MSG_LEN_REQ; + mtu_msg.x_msg_header.msg_length = sizeof(mtu_msg); + mtu_msg.msg_data[0] = board->mtu; + + coredev->sendrequest_handler(coredev->context, &mtu_msg, + sizeof(mtu_msg)); + } + + if (board->crystal) { + struct sms_msg_data crys_msg; + pr_debug("set crystal value %d\n", board->crystal); + + SMS_INIT_MSG(&crys_msg.x_msg_header, + MSG_SMS_NEW_CRYSTAL_REQ, + sizeof(crys_msg)); + crys_msg.msg_data[0] = board->crystal; + + coredev->sendrequest_handler(coredev->context, &crys_msg, + sizeof(crys_msg)); + } + + return 0; +} + +/* + * sets initial device mode and notifies client hotplugs that device is ready + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * + * return: 0 on success, <0 on error. + */ +int smscore_start_device(struct smscore_device_t *coredev) +{ + int rc; + int board_id = smscore_get_board_id(coredev); + int mode = smscore_registry_getmode(coredev->devpath); + + /* Device is initialized as DEVICE_MODE_NONE */ + if (board_id != SMS_BOARD_UNKNOWN && mode == DEVICE_MODE_NONE) + mode = sms_get_board(board_id)->default_mode; + + rc = smscore_set_device_mode(coredev, mode); + if (rc < 0) { + pr_info("set device mode failed , rc %d\n", rc); + return rc; + } + rc = smscore_configure_board(coredev); + if (rc < 0) { + pr_info("configure board failed , rc %d\n", rc); + return rc; + } + + mutex_lock(&g_smscore_deviceslock); + + rc = smscore_notify_callbacks(coredev, coredev->device, 1); + smscore_init_ir(coredev); + + pr_debug("device %p started, rc %d\n", coredev, rc); + + mutex_unlock(&g_smscore_deviceslock); + + return rc; +} +EXPORT_SYMBOL_GPL(smscore_start_device); + + +static int smscore_load_firmware_family2(struct smscore_device_t *coredev, + void *buffer, size_t size) +{ + struct sms_firmware *firmware = (struct sms_firmware *) buffer; + struct sms_msg_data5 *msg; + u32 mem_address, calc_checksum = 0; + u32 i, *ptr; + u8 *payload = firmware->payload; + int rc = 0; + firmware->start_address = le32_to_cpup((__le32 *)&firmware->start_address); + firmware->length = le32_to_cpup((__le32 *)&firmware->length); + + mem_address = firmware->start_address; + + pr_debug("loading FW to addr 0x%x size %d\n", + mem_address, firmware->length); + if (coredev->preload_handler) { + rc = coredev->preload_handler(coredev->context); + if (rc < 0) + return rc; + } + + /* PAGE_SIZE buffer shall be enough and dma aligned */ + msg = kmalloc(PAGE_SIZE, GFP_KERNEL | coredev->gfp_buf_flags); + if (!msg) + return -ENOMEM; + + if (coredev->mode != DEVICE_MODE_NONE) { + pr_debug("sending reload command.\n"); + SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_START_REQ, + sizeof(struct sms_msg_hdr)); + rc = smscore_sendrequest_and_wait(coredev, msg, + msg->x_msg_header.msg_length, + &coredev->reload_start_done); + if (rc < 0) { + pr_err("device reload failed, rc %d\n", rc); + goto exit_fw_download; + } + mem_address = *(u32 *) &payload[20]; + } + + for (i = 0, ptr = (u32 *)firmware->payload; i < firmware->length/4 ; + i++, ptr++) + calc_checksum += *ptr; + + while (size && rc >= 0) { + struct sms_data_download *data_msg = + (struct sms_data_download *) msg; + int payload_size = min_t(int, size, SMS_MAX_PAYLOAD_SIZE); + + SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_DATA_DOWNLOAD_REQ, + (u16)(sizeof(struct sms_msg_hdr) + + sizeof(u32) + payload_size)); + + data_msg->mem_addr = mem_address; + memcpy(data_msg->payload, payload, payload_size); + + rc = smscore_sendrequest_and_wait(coredev, data_msg, + data_msg->x_msg_header.msg_length, + &coredev->data_download_done); + + payload += payload_size; + size -= payload_size; + mem_address += payload_size; + } + + if (rc < 0) + goto exit_fw_download; + + pr_debug("sending MSG_SMS_DATA_VALIDITY_REQ expecting 0x%x\n", + calc_checksum); + SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_DATA_VALIDITY_REQ, + sizeof(msg->x_msg_header) + + sizeof(u32) * 3); + msg->msg_data[0] = firmware->start_address; + /* Entry point */ + msg->msg_data[1] = firmware->length; + msg->msg_data[2] = 0; /* Regular checksum*/ + rc = smscore_sendrequest_and_wait(coredev, msg, + msg->x_msg_header.msg_length, + &coredev->data_validity_done); + if (rc < 0) + goto exit_fw_download; + + if (coredev->mode == DEVICE_MODE_NONE) { + pr_debug("sending MSG_SMS_SWDOWNLOAD_TRIGGER_REQ\n"); + SMS_INIT_MSG(&msg->x_msg_header, + MSG_SMS_SWDOWNLOAD_TRIGGER_REQ, + sizeof(*msg)); + + msg->msg_data[0] = firmware->start_address; + /* Entry point */ + msg->msg_data[1] = 6; /* Priority */ + msg->msg_data[2] = 0x200; /* Stack size */ + msg->msg_data[3] = 0; /* Parameter */ + msg->msg_data[4] = 4; /* Task ID */ + + rc = smscore_sendrequest_and_wait(coredev, msg, + msg->x_msg_header.msg_length, + &coredev->trigger_done); + } else { + SMS_INIT_MSG(&msg->x_msg_header, MSG_SW_RELOAD_EXEC_REQ, + sizeof(struct sms_msg_hdr)); + rc = coredev->sendrequest_handler(coredev->context, msg, + msg->x_msg_header.msg_length); + } + + if (rc < 0) + goto exit_fw_download; + + /* + * backward compatibility - wait to device_ready_done for + * not more than 400 ms + */ + msleep(400); + +exit_fw_download: + kfree(msg); + + if (coredev->postload_handler) { + pr_debug("rc=%d, postload=0x%p\n", + rc, coredev->postload_handler); + if (rc >= 0) + return coredev->postload_handler(coredev->context); + } + + pr_debug("rc=%d\n", rc); + return rc; +} + +static char *smscore_fw_lkup[][DEVICE_MODE_MAX] = { + [SMS_NOVA_A0] = { + [DEVICE_MODE_DVBT] = SMS_FW_DVB_NOVA_12MHZ, + [DEVICE_MODE_DVBH] = SMS_FW_DVB_NOVA_12MHZ, + [DEVICE_MODE_DAB_TDMB] = SMS_FW_TDMB_NOVA_12MHZ, + [DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_NOVA_12MHZ, + [DEVICE_MODE_ISDBT] = SMS_FW_ISDBT_NOVA_12MHZ, + [DEVICE_MODE_ISDBT_BDA] = SMS_FW_ISDBT_NOVA_12MHZ, + }, + [SMS_NOVA_B0] = { + [DEVICE_MODE_DVBT] = SMS_FW_DVB_NOVA_12MHZ_B0, + [DEVICE_MODE_DVBH] = SMS_FW_DVB_NOVA_12MHZ_B0, + [DEVICE_MODE_DAB_TDMB] = SMS_FW_TDMB_NOVA_12MHZ_B0, + [DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_NOVA_12MHZ_B0, + [DEVICE_MODE_ISDBT] = SMS_FW_ISDBT_NOVA_12MHZ_B0, + [DEVICE_MODE_ISDBT_BDA] = SMS_FW_ISDBT_NOVA_12MHZ_B0, + [DEVICE_MODE_FM_RADIO] = SMS_FW_FM_RADIO, + [DEVICE_MODE_FM_RADIO_BDA] = SMS_FW_FM_RADIO, + }, + [SMS_VEGA] = { + [DEVICE_MODE_CMMB] = SMS_FW_CMMB_VEGA_12MHZ, + }, + [SMS_VENICE] = { + [DEVICE_MODE_CMMB] = SMS_FW_CMMB_VENICE_12MHZ, + }, + [SMS_MING] = { + [DEVICE_MODE_CMMB] = SMS_FW_CMMB_MING_APP, + }, + [SMS_PELE] = { + [DEVICE_MODE_ISDBT] = SMS_FW_ISDBT_PELE, + [DEVICE_MODE_ISDBT_BDA] = SMS_FW_ISDBT_PELE, + }, + [SMS_RIO] = { + [DEVICE_MODE_DVBT] = SMS_FW_DVB_RIO, + [DEVICE_MODE_DVBH] = SMS_FW_DVBH_RIO, + [DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_RIO, + [DEVICE_MODE_ISDBT] = SMS_FW_ISDBT_RIO, + [DEVICE_MODE_ISDBT_BDA] = SMS_FW_ISDBT_RIO, + [DEVICE_MODE_FM_RADIO] = SMS_FW_FM_RADIO_RIO, + [DEVICE_MODE_FM_RADIO_BDA] = SMS_FW_FM_RADIO_RIO, + }, + [SMS_DENVER_1530] = { + [DEVICE_MODE_ATSC] = SMS_FW_ATSC_DENVER, + }, + [SMS_DENVER_2160] = { + [DEVICE_MODE_DAB_TDMB] = SMS_FW_TDMB_DENVER, + }, +}; + +/* + * get firmware file name from one of the two mechanisms : sms_boards or + * smscore_fw_lkup. + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * @param mode requested mode of operation + * @param lookup if 1, always get the fw filename from smscore_fw_lkup + * table. if 0, try first to get from sms_boards + * + * return: 0 on success, <0 on error. + */ +static char *smscore_get_fw_filename(struct smscore_device_t *coredev, + int mode) +{ + char **fw; + int board_id = smscore_get_board_id(coredev); + enum sms_device_type_st type; + + type = smscore_registry_gettype(coredev->devpath); + + /* Prevent looking outside the smscore_fw_lkup table */ + if (type <= SMS_UNKNOWN_TYPE || type >= SMS_NUM_OF_DEVICE_TYPES) + return NULL; + if (mode <= DEVICE_MODE_NONE || mode >= DEVICE_MODE_MAX) + return NULL; + + pr_debug("trying to get fw name from sms_boards board_id %d mode %d\n", + board_id, mode); + fw = sms_get_board(board_id)->fw; + if (!fw || !fw[mode]) { + pr_debug("cannot find fw name in sms_boards, getting from lookup table mode %d type %d\n", + mode, type); + return smscore_fw_lkup[type][mode]; + } + + return fw[mode]; +} + +/* + * loads specified firmware into a buffer and calls device loadfirmware_handler + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * @param filename null-terminated string specifies firmware file name + * @param loadfirmware_handler device handler that loads firmware + * + * return: 0 on success, <0 on error. + */ +static int smscore_load_firmware_from_file(struct smscore_device_t *coredev, + int mode, + loadfirmware_t loadfirmware_handler) +{ + int rc = -ENOENT; + u8 *fw_buf; + u32 fw_buf_size; + const struct firmware *fw; + + char *fw_filename = smscore_get_fw_filename(coredev, mode); + if (!fw_filename) { + pr_err("mode %d not supported on this device\n", mode); + return -ENOENT; + } + pr_debug("Firmware name: %s\n", fw_filename); + + if (!loadfirmware_handler && + !(coredev->device_flags & SMS_DEVICE_FAMILY2)) + return -EINVAL; + + rc = request_firmware(&fw, fw_filename, coredev->device); + if (rc < 0) { + pr_err("failed to open firmware file '%s'\n", fw_filename); + return rc; + } + pr_debug("read fw %s, buffer size=0x%zx\n", fw_filename, fw->size); + fw_buf = kmalloc(ALIGN(fw->size + sizeof(struct sms_firmware), + SMS_ALLOC_ALIGNMENT), GFP_KERNEL | coredev->gfp_buf_flags); + if (!fw_buf) { + pr_err("failed to allocate firmware buffer\n"); + rc = -ENOMEM; + } else { + memcpy(fw_buf, fw->data, fw->size); + fw_buf_size = fw->size; + + rc = (coredev->device_flags & SMS_DEVICE_FAMILY2) ? + smscore_load_firmware_family2(coredev, fw_buf, fw_buf_size) + : loadfirmware_handler(coredev->context, fw_buf, + fw_buf_size); + } + + kfree(fw_buf); + release_firmware(fw); + + return rc; +} + +/* + * notifies all clients registered with the device, notifies hotplugs, + * frees all buffers and coredev object + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * + * return: 0 on success, <0 on error. + */ +void smscore_unregister_device(struct smscore_device_t *coredev) +{ + struct smscore_buffer_t *cb; + int num_buffers = 0; + int retry = 0; + + mutex_lock(&g_smscore_deviceslock); + + /* Release input device (IR) resources */ + sms_ir_exit(coredev); + + smscore_notify_clients(coredev); + smscore_notify_callbacks(coredev, NULL, 0); + + /* at this point all buffers should be back + * onresponse must no longer be called */ + + while (1) { + while (!list_empty(&coredev->buffers)) { + cb = (struct smscore_buffer_t *) coredev->buffers.next; + list_del(&cb->entry); + kfree(cb); + num_buffers++; + } + if (num_buffers == coredev->num_buffers) + break; + if (++retry > 10) { + pr_info("exiting although not all buffers released.\n"); + break; + } + + pr_debug("waiting for %d buffer(s)\n", + coredev->num_buffers - num_buffers); + mutex_unlock(&g_smscore_deviceslock); + msleep(100); + mutex_lock(&g_smscore_deviceslock); + } + + pr_debug("freed %d buffers\n", num_buffers); + + if (coredev->common_buffer) { + if (coredev->usb_device) + kfree(coredev->common_buffer); + else + dma_free_coherent(coredev->device, + coredev->common_buffer_size, + coredev->common_buffer, + coredev->common_buffer_phys); + } + kfree(coredev->fw_buf); + + list_del(&coredev->entry); + kfree(coredev); + + mutex_unlock(&g_smscore_deviceslock); + + pr_debug("device %p destroyed\n", coredev); +} +EXPORT_SYMBOL_GPL(smscore_unregister_device); + +static int smscore_detect_mode(struct smscore_device_t *coredev) +{ + void *buffer = kmalloc(sizeof(struct sms_msg_hdr) + SMS_DMA_ALIGNMENT, + GFP_KERNEL | coredev->gfp_buf_flags); + struct sms_msg_hdr *msg = + (struct sms_msg_hdr *) SMS_ALIGN_ADDRESS(buffer); + int rc; + + if (!buffer) + return -ENOMEM; + + SMS_INIT_MSG(msg, MSG_SMS_GET_VERSION_EX_REQ, + sizeof(struct sms_msg_hdr)); + + rc = smscore_sendrequest_and_wait(coredev, msg, msg->msg_length, + &coredev->version_ex_done); + if (rc == -ETIME) { + pr_err("MSG_SMS_GET_VERSION_EX_REQ failed first try\n"); + + if (wait_for_completion_timeout(&coredev->resume_done, + msecs_to_jiffies(5000))) { + rc = smscore_sendrequest_and_wait( + coredev, msg, msg->msg_length, + &coredev->version_ex_done); + if (rc < 0) + pr_err("MSG_SMS_GET_VERSION_EX_REQ failed second try, rc %d\n", + rc); + } else + rc = -ETIME; + } + + kfree(buffer); + + return rc; +} + +/* + * send init device request and wait for response + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * @param mode requested mode of operation + * + * return: 0 on success, <0 on error. + */ +static int smscore_init_device(struct smscore_device_t *coredev, int mode) +{ + void *buffer; + struct sms_msg_data *msg; + int rc = 0; + + buffer = kmalloc(sizeof(struct sms_msg_data) + + SMS_DMA_ALIGNMENT, GFP_KERNEL | coredev->gfp_buf_flags); + if (!buffer) + return -ENOMEM; + + msg = (struct sms_msg_data *)SMS_ALIGN_ADDRESS(buffer); + SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_INIT_DEVICE_REQ, + sizeof(struct sms_msg_data)); + msg->msg_data[0] = mode; + + rc = smscore_sendrequest_and_wait(coredev, msg, + msg->x_msg_header. msg_length, + &coredev->init_device_done); + + kfree(buffer); + return rc; +} + +/* + * calls device handler to change mode of operation + * NOTE: stellar/usb may disconnect when changing mode + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * @param mode requested mode of operation + * + * return: 0 on success, <0 on error. + */ +int smscore_set_device_mode(struct smscore_device_t *coredev, int mode) +{ + int rc = 0; + + pr_debug("set device mode to %d\n", mode); + if (coredev->device_flags & SMS_DEVICE_FAMILY2) { + if (mode <= DEVICE_MODE_NONE || mode >= DEVICE_MODE_MAX) { + pr_err("invalid mode specified %d\n", mode); + return -EINVAL; + } + + smscore_registry_setmode(coredev->devpath, mode); + + if (!(coredev->device_flags & SMS_DEVICE_NOT_READY)) { + rc = smscore_detect_mode(coredev); + if (rc < 0) { + pr_err("mode detect failed %d\n", rc); + return rc; + } + } + + if (coredev->mode == mode) { + pr_debug("device mode %d already set\n", mode); + return 0; + } + + if (!(coredev->modes_supported & (1 << mode))) { + rc = smscore_load_firmware_from_file(coredev, + mode, NULL); + if (rc >= 0) + pr_debug("firmware download success\n"); + } else { + pr_debug("mode %d is already supported by running firmware\n", + mode); + } + if (coredev->fw_version >= 0x800) { + rc = smscore_init_device(coredev, mode); + if (rc < 0) + pr_err("device init failed, rc %d.\n", rc); + } + } else { + if (mode <= DEVICE_MODE_NONE || mode >= DEVICE_MODE_MAX) { + pr_err("invalid mode specified %d\n", mode); + return -EINVAL; + } + + smscore_registry_setmode(coredev->devpath, mode); + + if (coredev->detectmode_handler) + coredev->detectmode_handler(coredev->context, + &coredev->mode); + + if (coredev->mode != mode && coredev->setmode_handler) + rc = coredev->setmode_handler(coredev->context, mode); + } + + if (rc >= 0) { + char *buffer; + coredev->mode = mode; + coredev->device_flags &= ~SMS_DEVICE_NOT_READY; + + buffer = kmalloc(sizeof(struct sms_msg_data) + + SMS_DMA_ALIGNMENT, GFP_KERNEL | coredev->gfp_buf_flags); + if (buffer) { + struct sms_msg_data *msg = (struct sms_msg_data *) SMS_ALIGN_ADDRESS(buffer); + + SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_INIT_DEVICE_REQ, + sizeof(struct sms_msg_data)); + msg->msg_data[0] = mode; + + rc = smscore_sendrequest_and_wait( + coredev, msg, msg->x_msg_header.msg_length, + &coredev->init_device_done); + + kfree(buffer); + } + } + + if (rc < 0) + pr_err("return error code %d.\n", rc); + else + pr_debug("Success setting device mode.\n"); + + return rc; +} + +/* + * calls device handler to get current mode of operation + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * + * return: current mode + */ +int smscore_get_device_mode(struct smscore_device_t *coredev) +{ + return coredev->mode; +} +EXPORT_SYMBOL_GPL(smscore_get_device_mode); + +/* + * find client by response id & type within the clients list. + * return client handle or NULL. + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * @param data_type client data type (SMS_DONT_CARE for all types) + * @param id client id (SMS_DONT_CARE for all id) + * + */ +static struct +smscore_client_t *smscore_find_client(struct smscore_device_t *coredev, + int data_type, int id) +{ + struct list_head *first; + struct smscore_client_t *client; + unsigned long flags; + struct list_head *firstid; + struct smscore_idlist_t *client_id; + + spin_lock_irqsave(&coredev->clientslock, flags); + first = &coredev->clients; + list_for_each_entry(client, first, entry) { + firstid = &client->idlist; + list_for_each_entry(client_id, firstid, entry) { + if ((client_id->id == id) && + (client_id->data_type == data_type || + (client_id->data_type == 0))) + goto found; + } + } + client = NULL; +found: + spin_unlock_irqrestore(&coredev->clientslock, flags); + return client; +} + +/* + * find client by response id/type, call clients onresponse handler + * return buffer to pool on error + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * @param cb pointer to response buffer descriptor + * + */ +void smscore_onresponse(struct smscore_device_t *coredev, + struct smscore_buffer_t *cb) { + struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) ((u8 *) cb->p + + cb->offset); + struct smscore_client_t *client; + int rc = -EBUSY; + static unsigned long last_sample_time; /* = 0; */ + static int data_total; /* = 0; */ + unsigned long time_now = jiffies_to_msecs(jiffies); + + if (!last_sample_time) + last_sample_time = time_now; + + if (time_now - last_sample_time > 10000) { + pr_debug("data rate %d bytes/secs\n", + (int)((data_total * 1000) / + (time_now - last_sample_time))); + + last_sample_time = time_now; + data_total = 0; + } + + data_total += cb->size; + /* Do we need to re-route? */ + if ((phdr->msg_type == MSG_SMS_HO_PER_SLICES_IND) || + (phdr->msg_type == MSG_SMS_TRANSMISSION_IND)) { + if (coredev->mode == DEVICE_MODE_DVBT_BDA) + phdr->msg_dst_id = DVBT_BDA_CONTROL_MSG_ID; + } + + + client = smscore_find_client(coredev, phdr->msg_type, phdr->msg_dst_id); + + /* If no client registered for type & id, + * check for control client where type is not registered */ + if (client) + rc = client->onresponse_handler(client->context, cb); + + if (rc < 0) { + switch (phdr->msg_type) { + case MSG_SMS_ISDBT_TUNE_RES: + break; + case MSG_SMS_RF_TUNE_RES: + break; + case MSG_SMS_SIGNAL_DETECTED_IND: + break; + case MSG_SMS_NO_SIGNAL_IND: + break; + case MSG_SMS_SPI_INT_LINE_SET_RES: + break; + case MSG_SMS_INTERFACE_LOCK_IND: + break; + case MSG_SMS_INTERFACE_UNLOCK_IND: + break; + case MSG_SMS_GET_VERSION_EX_RES: + { + struct sms_version_res *ver = + (struct sms_version_res *) phdr; + pr_debug("Firmware id %d prots 0x%x ver %d.%d\n", + ver->firmware_id, ver->supported_protocols, + ver->rom_ver_major, ver->rom_ver_minor); + + coredev->mode = ver->firmware_id == 255 ? + DEVICE_MODE_NONE : ver->firmware_id; + coredev->modes_supported = ver->supported_protocols; + coredev->fw_version = ver->rom_ver_major << 8 | + ver->rom_ver_minor; + + complete(&coredev->version_ex_done); + break; + } + case MSG_SMS_INIT_DEVICE_RES: + complete(&coredev->init_device_done); + break; + case MSG_SW_RELOAD_START_RES: + complete(&coredev->reload_start_done); + break; + case MSG_SMS_DATA_VALIDITY_RES: + { + struct sms_msg_data *validity = (struct sms_msg_data *) phdr; + + pr_debug("MSG_SMS_DATA_VALIDITY_RES, checksum = 0x%x\n", + validity->msg_data[0]); + complete(&coredev->data_validity_done); + break; + } + case MSG_SMS_DATA_DOWNLOAD_RES: + complete(&coredev->data_download_done); + break; + case MSG_SW_RELOAD_EXEC_RES: + break; + case MSG_SMS_SWDOWNLOAD_TRIGGER_RES: + complete(&coredev->trigger_done); + break; + case MSG_SMS_SLEEP_RESUME_COMP_IND: + complete(&coredev->resume_done); + break; + case MSG_SMS_GPIO_CONFIG_EX_RES: + complete(&coredev->gpio_configuration_done); + break; + case MSG_SMS_GPIO_SET_LEVEL_RES: + complete(&coredev->gpio_set_level_done); + break; + case MSG_SMS_GPIO_GET_LEVEL_RES: + { + u32 *msgdata = (u32 *) phdr; + coredev->gpio_get_res = msgdata[1]; + pr_debug("gpio level %d\n", + coredev->gpio_get_res); + complete(&coredev->gpio_get_level_done); + break; + } + case MSG_SMS_START_IR_RES: + complete(&coredev->ir_init_done); + break; + case MSG_SMS_IR_SAMPLES_IND: + sms_ir_event(coredev, + (const char *) + ((char *)phdr + + sizeof(struct sms_msg_hdr)), + (int)phdr->msg_length + - sizeof(struct sms_msg_hdr)); + break; + + case MSG_SMS_DVBT_BDA_DATA: + /* + * It can be received here, if the frontend is + * tuned into a valid channel and the proper firmware + * is loaded. That happens when the module got removed + * and re-inserted, without powering the device off + */ + break; + + default: + pr_debug("message %s(%d) not handled.\n", + smscore_translate_msg(phdr->msg_type), + phdr->msg_type); + break; + } + smscore_putbuffer(coredev, cb); + } +} +EXPORT_SYMBOL_GPL(smscore_onresponse); + +/* + * return pointer to next free buffer descriptor from core pool + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * + * return: pointer to descriptor on success, NULL on error. + */ + +static struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev) +{ + struct smscore_buffer_t *cb = NULL; + unsigned long flags; + + spin_lock_irqsave(&coredev->bufferslock, flags); + if (!list_empty(&coredev->buffers)) { + cb = (struct smscore_buffer_t *) coredev->buffers.next; + list_del(&cb->entry); + } + spin_unlock_irqrestore(&coredev->bufferslock, flags); + return cb; +} + +struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev) +{ + struct smscore_buffer_t *cb = NULL; + + wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev))); + + return cb; +} +EXPORT_SYMBOL_GPL(smscore_getbuffer); + +/* + * return buffer descriptor to a pool + * + * @param coredev pointer to a coredev object returned by + * smscore_register_device + * @param cb pointer buffer descriptor + * + */ +void smscore_putbuffer(struct smscore_device_t *coredev, + struct smscore_buffer_t *cb) { + wake_up_interruptible(&coredev->buffer_mng_waitq); + list_add_locked(&cb->entry, &coredev->buffers, &coredev->bufferslock); +} +EXPORT_SYMBOL_GPL(smscore_putbuffer); + +static int smscore_validate_client(struct smscore_device_t *coredev, + struct smscore_client_t *client, + int data_type, int id) +{ + struct smscore_idlist_t *listentry; + struct smscore_client_t *registered_client; + + if (!client) { + pr_err("bad parameter.\n"); + return -EINVAL; + } + registered_client = smscore_find_client(coredev, data_type, id); + if (registered_client == client) + return 0; + + if (registered_client) { + pr_err("The msg ID already registered to another client.\n"); + return -EEXIST; + } + listentry = kzalloc(sizeof(*listentry), GFP_KERNEL); + if (!listentry) + return -ENOMEM; + + listentry->id = id; + listentry->data_type = data_type; + list_add_locked(&listentry->entry, &client->idlist, + &coredev->clientslock); + return 0; +} + +/* + * creates smsclient object, check that id is taken by another client + * + * @param coredev pointer to a coredev object from clients hotplug + * @param initial_id all messages with this id would be sent to this client + * @param data_type all messages of this type would be sent to this client + * @param onresponse_handler client handler that is called to + * process incoming messages + * @param onremove_handler client handler that is called when device is removed + * @param context client-specific context + * @param client pointer to a value that receives created smsclient object + * + * return: 0 on success, <0 on error. + */ +int smscore_register_client(struct smscore_device_t *coredev, + struct smsclient_params_t *params, + struct smscore_client_t **client) +{ + struct smscore_client_t *newclient; + /* check that no other channel with same parameters exists */ + if (smscore_find_client(coredev, params->data_type, + params->initial_id)) { + pr_err("Client already exist.\n"); + return -EEXIST; + } + + newclient = kzalloc(sizeof(*newclient), GFP_KERNEL); + if (!newclient) + return -ENOMEM; + + INIT_LIST_HEAD(&newclient->idlist); + newclient->coredev = coredev; + newclient->onresponse_handler = params->onresponse_handler; + newclient->onremove_handler = params->onremove_handler; + newclient->context = params->context; + list_add_locked(&newclient->entry, &coredev->clients, + &coredev->clientslock); + smscore_validate_client(coredev, newclient, params->data_type, + params->initial_id); + *client = newclient; + pr_debug("%p %d %d\n", params->context, params->data_type, + params->initial_id); + + return 0; +} +EXPORT_SYMBOL_GPL(smscore_register_client); + +/* + * frees smsclient object and all subclients associated with it + * + * @param client pointer to smsclient object returned by + * smscore_register_client + * + */ +void smscore_unregister_client(struct smscore_client_t *client) +{ + struct smscore_device_t *coredev = client->coredev; + unsigned long flags; + + spin_lock_irqsave(&coredev->clientslock, flags); + + + while (!list_empty(&client->idlist)) { + struct smscore_idlist_t *identry = + (struct smscore_idlist_t *) client->idlist.next; + list_del(&identry->entry); + kfree(identry); + } + + pr_debug("%p\n", client->context); + + list_del(&client->entry); + kfree(client); + + spin_unlock_irqrestore(&coredev->clientslock, flags); +} +EXPORT_SYMBOL_GPL(smscore_unregister_client); + +/* + * verifies that source id is not taken by another client, + * calls device handler to send requests to the device + * + * @param client pointer to smsclient object returned by + * smscore_register_client + * @param buffer pointer to a request buffer + * @param size size (in bytes) of request buffer + * + * return: 0 on success, <0 on error. + */ +int smsclient_sendrequest(struct smscore_client_t *client, + void *buffer, size_t size) +{ + struct smscore_device_t *coredev; + struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer; + int rc; + + if (!client) { + pr_err("Got NULL client\n"); + return -EINVAL; + } + + coredev = client->coredev; + + /* check that no other channel with same id exists */ + if (!coredev) { + pr_err("Got NULL coredev\n"); + return -EINVAL; + } + + rc = smscore_validate_client(client->coredev, client, 0, + phdr->msg_src_id); + if (rc < 0) + return rc; + + return coredev->sendrequest_handler(coredev->context, buffer, size); +} +EXPORT_SYMBOL_GPL(smsclient_sendrequest); + + +/* old GPIO managements implementation */ +int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin, + struct smscore_config_gpio *pinconfig) +{ + struct { + struct sms_msg_hdr hdr; + u32 data[6]; + } msg; + + if (coredev->device_flags & SMS_DEVICE_FAMILY2) { + msg.hdr.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + msg.hdr.msg_dst_id = HIF_TASK; + msg.hdr.msg_flags = 0; + msg.hdr.msg_type = MSG_SMS_GPIO_CONFIG_EX_REQ; + msg.hdr.msg_length = sizeof(msg); + + msg.data[0] = pin; + msg.data[1] = pinconfig->pullupdown; + + /* Convert slew rate for Nova: Fast(0) = 3 / Slow(1) = 0; */ + msg.data[2] = pinconfig->outputslewrate == 0 ? 3 : 0; + + switch (pinconfig->outputdriving) { + case SMS_GPIO_OUTPUTDRIVING_S_16mA: + msg.data[3] = 7; /* Nova - 16mA */ + break; + case SMS_GPIO_OUTPUTDRIVING_S_12mA: + msg.data[3] = 5; /* Nova - 11mA */ + break; + case SMS_GPIO_OUTPUTDRIVING_S_8mA: + msg.data[3] = 3; /* Nova - 7mA */ + break; + case SMS_GPIO_OUTPUTDRIVING_S_4mA: + default: + msg.data[3] = 2; /* Nova - 4mA */ + break; + } + + msg.data[4] = pinconfig->direction; + msg.data[5] = 0; + } else /* TODO: SMS_DEVICE_FAMILY1 */ + return -EINVAL; + + return coredev->sendrequest_handler(coredev->context, + &msg, sizeof(msg)); +} + +int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level) +{ + struct { + struct sms_msg_hdr hdr; + u32 data[3]; + } msg; + + if (pin > MAX_GPIO_PIN_NUMBER) + return -EINVAL; + + msg.hdr.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + msg.hdr.msg_dst_id = HIF_TASK; + msg.hdr.msg_flags = 0; + msg.hdr.msg_type = MSG_SMS_GPIO_SET_LEVEL_REQ; + msg.hdr.msg_length = sizeof(msg); + + msg.data[0] = pin; + msg.data[1] = level ? 1 : 0; + msg.data[2] = 0; + + return coredev->sendrequest_handler(coredev->context, + &msg, sizeof(msg)); +} + +/* new GPIO management implementation */ +static int get_gpio_pin_params(u32 pin_num, u32 *p_translatedpin_num, + u32 *p_group_num, u32 *p_group_cfg) { + + *p_group_cfg = 1; + + if (pin_num <= 1) { + *p_translatedpin_num = 0; + *p_group_num = 9; + *p_group_cfg = 2; + } else if (pin_num >= 2 && pin_num <= 6) { + *p_translatedpin_num = 2; + *p_group_num = 0; + *p_group_cfg = 2; + } else if (pin_num >= 7 && pin_num <= 11) { + *p_translatedpin_num = 7; + *p_group_num = 1; + } else if (pin_num >= 12 && pin_num <= 15) { + *p_translatedpin_num = 12; + *p_group_num = 2; + *p_group_cfg = 3; + } else if (pin_num == 16) { + *p_translatedpin_num = 16; + *p_group_num = 23; + } else if (pin_num >= 17 && pin_num <= 24) { + *p_translatedpin_num = 17; + *p_group_num = 3; + } else if (pin_num == 25) { + *p_translatedpin_num = 25; + *p_group_num = 6; + } else if (pin_num >= 26 && pin_num <= 28) { + *p_translatedpin_num = 26; + *p_group_num = 4; + } else if (pin_num == 29) { + *p_translatedpin_num = 29; + *p_group_num = 5; + *p_group_cfg = 2; + } else if (pin_num == 30) { + *p_translatedpin_num = 30; + *p_group_num = 8; + } else if (pin_num == 31) { + *p_translatedpin_num = 31; + *p_group_num = 17; + } else + return -1; + + *p_group_cfg <<= 24; + + return 0; +} + +int smscore_gpio_configure(struct smscore_device_t *coredev, u8 pin_num, + struct smscore_config_gpio *p_gpio_config) { + + u32 total_len; + u32 translatedpin_num = 0; + u32 group_num = 0; + u32 electric_char; + u32 group_cfg; + void *buffer; + int rc; + + struct set_gpio_msg { + struct sms_msg_hdr x_msg_header; + u32 msg_data[6]; + } *p_msg; + + + if (pin_num > MAX_GPIO_PIN_NUMBER) + return -EINVAL; + + if (!p_gpio_config) + return -EINVAL; + + total_len = sizeof(struct sms_msg_hdr) + (sizeof(u32) * 6); + + buffer = kmalloc(total_len + SMS_DMA_ALIGNMENT, + GFP_KERNEL | coredev->gfp_buf_flags); + if (!buffer) + return -ENOMEM; + + p_msg = (struct set_gpio_msg *) SMS_ALIGN_ADDRESS(buffer); + + p_msg->x_msg_header.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + p_msg->x_msg_header.msg_dst_id = HIF_TASK; + p_msg->x_msg_header.msg_flags = 0; + p_msg->x_msg_header.msg_length = (u16) total_len; + p_msg->msg_data[0] = pin_num; + + if (!(coredev->device_flags & SMS_DEVICE_FAMILY2)) { + p_msg->x_msg_header.msg_type = MSG_SMS_GPIO_CONFIG_REQ; + if (get_gpio_pin_params(pin_num, &translatedpin_num, &group_num, + &group_cfg) != 0) { + rc = -EINVAL; + goto free; + } + + p_msg->msg_data[1] = translatedpin_num; + p_msg->msg_data[2] = group_num; + electric_char = (p_gpio_config->pullupdown) + | (p_gpio_config->inputcharacteristics << 2) + | (p_gpio_config->outputslewrate << 3) + | (p_gpio_config->outputdriving << 4); + p_msg->msg_data[3] = electric_char; + p_msg->msg_data[4] = p_gpio_config->direction; + p_msg->msg_data[5] = group_cfg; + } else { + p_msg->x_msg_header.msg_type = MSG_SMS_GPIO_CONFIG_EX_REQ; + p_msg->msg_data[1] = p_gpio_config->pullupdown; + p_msg->msg_data[2] = p_gpio_config->outputslewrate; + p_msg->msg_data[3] = p_gpio_config->outputdriving; + p_msg->msg_data[4] = p_gpio_config->direction; + p_msg->msg_data[5] = 0; + } + + rc = smscore_sendrequest_and_wait(coredev, p_msg, total_len, + &coredev->gpio_configuration_done); + + if (rc != 0) { + if (rc == -ETIME) + pr_err("smscore_gpio_configure timeout\n"); + else + pr_err("smscore_gpio_configure error\n"); + } +free: + kfree(buffer); + + return rc; +} + +int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 pin_num, + u8 new_level) { + + u32 total_len; + int rc; + void *buffer; + + struct set_gpio_msg { + struct sms_msg_hdr x_msg_header; + u32 msg_data[3]; /* keep it 3 ! */ + } *p_msg; + + if ((new_level > 1) || (pin_num > MAX_GPIO_PIN_NUMBER)) + return -EINVAL; + + total_len = sizeof(struct sms_msg_hdr) + + (3 * sizeof(u32)); /* keep it 3 ! */ + + buffer = kmalloc(total_len + SMS_DMA_ALIGNMENT, + GFP_KERNEL | coredev->gfp_buf_flags); + if (!buffer) + return -ENOMEM; + + p_msg = (struct set_gpio_msg *) SMS_ALIGN_ADDRESS(buffer); + + p_msg->x_msg_header.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + p_msg->x_msg_header.msg_dst_id = HIF_TASK; + p_msg->x_msg_header.msg_flags = 0; + p_msg->x_msg_header.msg_type = MSG_SMS_GPIO_SET_LEVEL_REQ; + p_msg->x_msg_header.msg_length = (u16) total_len; + p_msg->msg_data[0] = pin_num; + p_msg->msg_data[1] = new_level; + + /* Send message to SMS */ + rc = smscore_sendrequest_and_wait(coredev, p_msg, total_len, + &coredev->gpio_set_level_done); + + if (rc != 0) { + if (rc == -ETIME) + pr_err("smscore_gpio_set_level timeout\n"); + else + pr_err("smscore_gpio_set_level error\n"); + } + kfree(buffer); + + return rc; +} + +int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 pin_num, + u8 *level) { + + u32 total_len; + int rc; + void *buffer; + + struct set_gpio_msg { + struct sms_msg_hdr x_msg_header; + u32 msg_data[2]; + } *p_msg; + + + if (pin_num > MAX_GPIO_PIN_NUMBER) + return -EINVAL; + + total_len = sizeof(struct sms_msg_hdr) + (2 * sizeof(u32)); + + buffer = kmalloc(total_len + SMS_DMA_ALIGNMENT, + GFP_KERNEL | coredev->gfp_buf_flags); + if (!buffer) + return -ENOMEM; + + p_msg = (struct set_gpio_msg *) SMS_ALIGN_ADDRESS(buffer); + + p_msg->x_msg_header.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + p_msg->x_msg_header.msg_dst_id = HIF_TASK; + p_msg->x_msg_header.msg_flags = 0; + p_msg->x_msg_header.msg_type = MSG_SMS_GPIO_GET_LEVEL_REQ; + p_msg->x_msg_header.msg_length = (u16) total_len; + p_msg->msg_data[0] = pin_num; + p_msg->msg_data[1] = 0; + + /* Send message to SMS */ + rc = smscore_sendrequest_and_wait(coredev, p_msg, total_len, + &coredev->gpio_get_level_done); + + if (rc != 0) { + if (rc == -ETIME) + pr_err("smscore_gpio_get_level timeout\n"); + else + pr_err("smscore_gpio_get_level error\n"); + } + kfree(buffer); + + /* Its a race between other gpio_get_level() and the copy of the single + * global 'coredev->gpio_get_res' to the function's variable 'level' + */ + *level = coredev->gpio_get_res; + + return rc; +} + +static int __init smscore_module_init(void) +{ + INIT_LIST_HEAD(&g_smscore_notifyees); + INIT_LIST_HEAD(&g_smscore_devices); + INIT_LIST_HEAD(&g_smscore_registry); + + return 0; +} + +static void __exit smscore_module_exit(void) +{ + mutex_lock(&g_smscore_deviceslock); + while (!list_empty(&g_smscore_notifyees)) { + struct smscore_device_notifyee_t *notifyee = + (struct smscore_device_notifyee_t *) + g_smscore_notifyees.next; + + list_del(¬ifyee->entry); + kfree(notifyee); + } + mutex_unlock(&g_smscore_deviceslock); + + mutex_lock(&g_smscore_registrylock); + while (!list_empty(&g_smscore_registry)) { + struct smscore_registry_entry_t *entry = + (struct smscore_registry_entry_t *) + g_smscore_registry.next; + + list_del(&entry->entry); + kfree(entry); + } + mutex_unlock(&g_smscore_registrylock); + + pr_debug("\n"); +} + +module_init(smscore_module_init); +module_exit(smscore_module_exit); + +MODULE_DESCRIPTION("Siano MDTV Core module"); +MODULE_AUTHOR("Siano Mobile Silicon, Inc. (uris@siano-ms.com)"); +MODULE_LICENSE("GPL"); + +/* This should match what's defined at smscoreapi.h */ +MODULE_FIRMWARE(SMS_FW_ATSC_DENVER); +MODULE_FIRMWARE(SMS_FW_CMMB_MING_APP); +MODULE_FIRMWARE(SMS_FW_CMMB_VEGA_12MHZ); +MODULE_FIRMWARE(SMS_FW_CMMB_VENICE_12MHZ); +MODULE_FIRMWARE(SMS_FW_DVBH_RIO); +MODULE_FIRMWARE(SMS_FW_DVB_NOVA_12MHZ_B0); +MODULE_FIRMWARE(SMS_FW_DVB_NOVA_12MHZ); +MODULE_FIRMWARE(SMS_FW_DVB_RIO); +MODULE_FIRMWARE(SMS_FW_FM_RADIO); +MODULE_FIRMWARE(SMS_FW_FM_RADIO_RIO); +MODULE_FIRMWARE(SMS_FW_DVBT_HCW_55XXX); +MODULE_FIRMWARE(SMS_FW_ISDBT_HCW_55XXX); +MODULE_FIRMWARE(SMS_FW_ISDBT_NOVA_12MHZ_B0); +MODULE_FIRMWARE(SMS_FW_ISDBT_NOVA_12MHZ); +MODULE_FIRMWARE(SMS_FW_ISDBT_PELE); +MODULE_FIRMWARE(SMS_FW_ISDBT_RIO); +MODULE_FIRMWARE(SMS_FW_DVBT_NOVA_A); +MODULE_FIRMWARE(SMS_FW_DVBT_NOVA_B); +MODULE_FIRMWARE(SMS_FW_DVBT_STELLAR); +MODULE_FIRMWARE(SMS_FW_TDMB_DENVER); +MODULE_FIRMWARE(SMS_FW_TDMB_NOVA_12MHZ_B0); +MODULE_FIRMWARE(SMS_FW_TDMB_NOVA_12MHZ); diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h new file mode 100644 index 0000000000..f8789ee0d5 --- /dev/null +++ b/drivers/media/common/siano/smscoreapi.h @@ -0,0 +1,1168 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/**************************************************************** + +Siano Mobile Silicon, Inc. +MDTV receiver kernel modules. +Copyright (C) 2006-2008, Uri Shkolnik, Anatoly Greenblat + + +****************************************************************/ + +#ifndef __SMS_CORE_API_H__ +#define __SMS_CORE_API_H__ + +#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "smsir.h" + +/* + * Define the firmware names used by the driver. + * Those should match what's used at smscoreapi.c and sms-cards.c + * including the MODULE_FIRMWARE() macros at the end of smscoreapi.c + */ +#define SMS_FW_ATSC_DENVER "atsc_denver.inp" +#define SMS_FW_CMMB_MING_APP "cmmb_ming_app.inp" +#define SMS_FW_CMMB_VEGA_12MHZ "cmmb_vega_12mhz.inp" +#define SMS_FW_CMMB_VENICE_12MHZ "cmmb_venice_12mhz.inp" +#define SMS_FW_DVBH_RIO "dvbh_rio.inp" +#define SMS_FW_DVB_NOVA_12MHZ_B0 "dvb_nova_12mhz_b0.inp" +#define SMS_FW_DVB_NOVA_12MHZ "dvb_nova_12mhz.inp" +#define SMS_FW_DVB_RIO "dvb_rio.inp" +#define SMS_FW_FM_RADIO "fm_radio.inp" +#define SMS_FW_FM_RADIO_RIO "fm_radio_rio.inp" +#define SMS_FW_DVBT_HCW_55XXX "sms1xxx-hcw-55xxx-dvbt-02.fw" +#define SMS_FW_ISDBT_HCW_55XXX "sms1xxx-hcw-55xxx-isdbt-02.fw" +#define SMS_FW_ISDBT_NOVA_12MHZ_B0 "isdbt_nova_12mhz_b0.inp" +#define SMS_FW_ISDBT_NOVA_12MHZ "isdbt_nova_12mhz.inp" +#define SMS_FW_ISDBT_PELE "isdbt_pele.inp" +#define SMS_FW_ISDBT_RIO "isdbt_rio.inp" +#define SMS_FW_DVBT_NOVA_A "sms1xxx-nova-a-dvbt-01.fw" +#define SMS_FW_DVBT_NOVA_B "sms1xxx-nova-b-dvbt-01.fw" +#define SMS_FW_DVBT_STELLAR "sms1xxx-stellar-dvbt-01.fw" +#define SMS_FW_TDMB_DENVER "tdmb_denver.inp" +#define SMS_FW_TDMB_NOVA_12MHZ_B0 "tdmb_nova_12mhz_b0.inp" +#define SMS_FW_TDMB_NOVA_12MHZ "tdmb_nova_12mhz.inp" + +#define SMS_PROTOCOL_MAX_RAOUNDTRIP_MS (10000) +#define SMS_ALLOC_ALIGNMENT 128 +#define SMS_DMA_ALIGNMENT 16 +#define SMS_ALIGN_ADDRESS(addr) \ + ((((uintptr_t)(addr)) + (SMS_DMA_ALIGNMENT-1)) & ~(SMS_DMA_ALIGNMENT-1)) + +#define SMS_DEVICE_FAMILY1 0 +#define SMS_DEVICE_FAMILY2 1 +#define SMS_ROM_NO_RESPONSE 2 +#define SMS_DEVICE_NOT_READY 0x8000000 + +enum sms_device_type_st { + SMS_UNKNOWN_TYPE = -1, + SMS_STELLAR = 0, + SMS_NOVA_A0, + SMS_NOVA_B0, + SMS_VEGA, + SMS_VENICE, + SMS_MING, + SMS_PELE, + SMS_RIO, + SMS_DENVER_1530, + SMS_DENVER_2160, + SMS_NUM_OF_DEVICE_TYPES +}; + +enum sms_power_mode_st { + SMS_POWER_MODE_ACTIVE, + SMS_POWER_MODE_SUSPENDED +}; + +struct smscore_device_t; +struct smscore_client_t; +struct smscore_buffer_t; + +typedef int (*hotplug_t)(struct smscore_device_t *coredev, + struct device *device, int arrival); + +typedef int (*setmode_t)(void *context, int mode); +typedef void (*detectmode_t)(void *context, int *mode); +typedef int (*sendrequest_t)(void *context, void *buffer, size_t size); +typedef int (*loadfirmware_t)(void *context, void *buffer, size_t size); +typedef int (*preload_t)(void *context); +typedef int (*postload_t)(void *context); + +typedef int (*onresponse_t)(void *context, struct smscore_buffer_t *cb); +typedef void (*onremove_t)(void *context); + +struct smscore_buffer_t { + /* public members, once passed to clients can be changed freely */ + struct list_head entry; + int size; + int offset; + + /* private members, read-only for clients */ + void *p; + dma_addr_t phys; + unsigned long offset_in_common; +}; + +struct smsdevice_params_t { + struct device *device; + struct usb_device *usb_device; + + int buffer_size; + int num_buffers; + + char devpath[32]; + unsigned long flags; + + setmode_t setmode_handler; + detectmode_t detectmode_handler; + sendrequest_t sendrequest_handler; + preload_t preload_handler; + postload_t postload_handler; + + void *context; + enum sms_device_type_st device_type; +}; + +struct smsclient_params_t { + int initial_id; + int data_type; + onresponse_t onresponse_handler; + onremove_t onremove_handler; + void *context; +}; + +struct smscore_device_t { + struct list_head entry; + + struct list_head clients; + struct list_head subclients; + spinlock_t clientslock; + + struct list_head buffers; + spinlock_t bufferslock; + int num_buffers; + + void *common_buffer; + int common_buffer_size; + dma_addr_t common_buffer_phys; + + void *context; + struct device *device; + struct usb_device *usb_device; + + char devpath[32]; + unsigned long device_flags; + + setmode_t setmode_handler; + detectmode_t detectmode_handler; + sendrequest_t sendrequest_handler; + preload_t preload_handler; + postload_t postload_handler; + + int mode, modes_supported; + + gfp_t gfp_buf_flags; + + /* host <--> device messages */ + struct completion version_ex_done, data_download_done, trigger_done; + struct completion data_validity_done, device_ready_done; + struct completion init_device_done, reload_start_done, resume_done; + struct completion gpio_configuration_done, gpio_set_level_done; + struct completion gpio_get_level_done, ir_init_done; + + /* Buffer management */ + wait_queue_head_t buffer_mng_waitq; + + /* GPIO */ + int gpio_get_res; + + /* Target hardware board */ + int board_id; + + /* Firmware */ + u8 *fw_buf; + u32 fw_buf_size; + u16 fw_version; + + /* Infrared (IR) */ + struct ir_t ir; + + /* + * Identify if device is USB or not. + * Used by smsdvb-sysfs to know the root node for debugfs + */ + bool is_usb_device; + + int led_state; + +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + struct media_device *media_dev; +#endif +}; + +/* GPIO definitions for antenna frequency domain control (SMS8021) */ +#define SMS_ANTENNA_GPIO_0 1 +#define SMS_ANTENNA_GPIO_1 0 + +enum sms_bandwidth_mode { + BW_8_MHZ = 0, + BW_7_MHZ = 1, + BW_6_MHZ = 2, + BW_5_MHZ = 3, + BW_ISDBT_1SEG = 4, + BW_ISDBT_3SEG = 5, + BW_2_MHZ = 6, + BW_FM_RADIO = 7, + BW_ISDBT_13SEG = 8, + BW_1_5_MHZ = 15, + BW_UNKNOWN = 0xffff +}; + + +#define MSG_HDR_FLAG_SPLIT_MSG 4 + +#define MAX_GPIO_PIN_NUMBER 31 + +#define HIF_TASK 11 +#define HIF_TASK_SLAVE 22 +#define HIF_TASK_SLAVE2 33 +#define HIF_TASK_SLAVE3 44 +#define SMS_HOST_LIB 150 +#define DVBT_BDA_CONTROL_MSG_ID 201 + +#define SMS_MAX_PAYLOAD_SIZE 240 +#define SMS_TUNE_TIMEOUT 500 + +enum msg_types { + MSG_TYPE_BASE_VAL = 500, + MSG_SMS_GET_VERSION_REQ = 503, + MSG_SMS_GET_VERSION_RES = 504, + MSG_SMS_MULTI_BRIDGE_CFG = 505, + MSG_SMS_GPIO_CONFIG_REQ = 507, + MSG_SMS_GPIO_CONFIG_RES = 508, + MSG_SMS_GPIO_SET_LEVEL_REQ = 509, + MSG_SMS_GPIO_SET_LEVEL_RES = 510, + MSG_SMS_GPIO_GET_LEVEL_REQ = 511, + MSG_SMS_GPIO_GET_LEVEL_RES = 512, + MSG_SMS_EEPROM_BURN_IND = 513, + MSG_SMS_LOG_ENABLE_CHANGE_REQ = 514, + MSG_SMS_LOG_ENABLE_CHANGE_RES = 515, + MSG_SMS_SET_MAX_TX_MSG_LEN_REQ = 516, + MSG_SMS_SET_MAX_TX_MSG_LEN_RES = 517, + MSG_SMS_SPI_HALFDUPLEX_TOKEN_HOST_TO_DEVICE = 518, + MSG_SMS_SPI_HALFDUPLEX_TOKEN_DEVICE_TO_HOST = 519, + MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_REQ = 520, + MSG_SMS_BACKGROUND_SCAN_FLAG_CHANGE_RES = 521, + MSG_SMS_BACKGROUND_SCAN_SIGNAL_DETECTED_IND = 522, + MSG_SMS_BACKGROUND_SCAN_NO_SIGNAL_IND = 523, + MSG_SMS_CONFIGURE_RF_SWITCH_REQ = 524, + MSG_SMS_CONFIGURE_RF_SWITCH_RES = 525, + MSG_SMS_MRC_PATH_DISCONNECT_REQ = 526, + MSG_SMS_MRC_PATH_DISCONNECT_RES = 527, + MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_REQ = 528, + MSG_SMS_RECEIVE_1SEG_THROUGH_FULLSEG_RES = 529, + MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_REQ = 530, + MSG_SMS_RECEIVE_VHF_VIA_VHF_INPUT_RES = 531, + MSG_WR_REG_RFT_REQ = 533, + MSG_WR_REG_RFT_RES = 534, + MSG_RD_REG_RFT_REQ = 535, + MSG_RD_REG_RFT_RES = 536, + MSG_RD_REG_ALL_RFT_REQ = 537, + MSG_RD_REG_ALL_RFT_RES = 538, + MSG_HELP_INT = 539, + MSG_RUN_SCRIPT_INT = 540, + MSG_SMS_EWS_INBAND_REQ = 541, + MSG_SMS_EWS_INBAND_RES = 542, + MSG_SMS_RFS_SELECT_REQ = 543, + MSG_SMS_RFS_SELECT_RES = 544, + MSG_SMS_MB_GET_VER_REQ = 545, + MSG_SMS_MB_GET_VER_RES = 546, + MSG_SMS_MB_WRITE_CFGFILE_REQ = 547, + MSG_SMS_MB_WRITE_CFGFILE_RES = 548, + MSG_SMS_MB_READ_CFGFILE_REQ = 549, + MSG_SMS_MB_READ_CFGFILE_RES = 550, + MSG_SMS_RD_MEM_REQ = 552, + MSG_SMS_RD_MEM_RES = 553, + MSG_SMS_WR_MEM_REQ = 554, + MSG_SMS_WR_MEM_RES = 555, + MSG_SMS_UPDATE_MEM_REQ = 556, + MSG_SMS_UPDATE_MEM_RES = 557, + MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_REQ = 558, + MSG_SMS_ISDBT_ENABLE_FULL_PARAMS_SET_RES = 559, + MSG_SMS_RF_TUNE_REQ = 561, + MSG_SMS_RF_TUNE_RES = 562, + MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_REQ = 563, + MSG_SMS_ISDBT_ENABLE_HIGH_MOBILITY_RES = 564, + MSG_SMS_ISDBT_SB_RECEPTION_REQ = 565, + MSG_SMS_ISDBT_SB_RECEPTION_RES = 566, + MSG_SMS_GENERIC_EPROM_WRITE_REQ = 567, + MSG_SMS_GENERIC_EPROM_WRITE_RES = 568, + MSG_SMS_GENERIC_EPROM_READ_REQ = 569, + MSG_SMS_GENERIC_EPROM_READ_RES = 570, + MSG_SMS_EEPROM_WRITE_REQ = 571, + MSG_SMS_EEPROM_WRITE_RES = 572, + MSG_SMS_CUSTOM_READ_REQ = 574, + MSG_SMS_CUSTOM_READ_RES = 575, + MSG_SMS_CUSTOM_WRITE_REQ = 576, + MSG_SMS_CUSTOM_WRITE_RES = 577, + MSG_SMS_INIT_DEVICE_REQ = 578, + MSG_SMS_INIT_DEVICE_RES = 579, + MSG_SMS_ATSC_SET_ALL_IP_REQ = 580, + MSG_SMS_ATSC_SET_ALL_IP_RES = 581, + MSG_SMS_ATSC_START_ENSEMBLE_REQ = 582, + MSG_SMS_ATSC_START_ENSEMBLE_RES = 583, + MSG_SMS_SET_OUTPUT_MODE_REQ = 584, + MSG_SMS_SET_OUTPUT_MODE_RES = 585, + MSG_SMS_ATSC_IP_FILTER_GET_LIST_REQ = 586, + MSG_SMS_ATSC_IP_FILTER_GET_LIST_RES = 587, + MSG_SMS_SUB_CHANNEL_START_REQ = 589, + MSG_SMS_SUB_CHANNEL_START_RES = 590, + MSG_SMS_SUB_CHANNEL_STOP_REQ = 591, + MSG_SMS_SUB_CHANNEL_STOP_RES = 592, + MSG_SMS_ATSC_IP_FILTER_ADD_REQ = 593, + MSG_SMS_ATSC_IP_FILTER_ADD_RES = 594, + MSG_SMS_ATSC_IP_FILTER_REMOVE_REQ = 595, + MSG_SMS_ATSC_IP_FILTER_REMOVE_RES = 596, + MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_REQ = 597, + MSG_SMS_ATSC_IP_FILTER_REMOVE_ALL_RES = 598, + MSG_SMS_WAIT_CMD = 599, + MSG_SMS_ADD_PID_FILTER_REQ = 601, + MSG_SMS_ADD_PID_FILTER_RES = 602, + MSG_SMS_REMOVE_PID_FILTER_REQ = 603, + MSG_SMS_REMOVE_PID_FILTER_RES = 604, + MSG_SMS_FAST_INFORMATION_CHANNEL_REQ = 605, + MSG_SMS_FAST_INFORMATION_CHANNEL_RES = 606, + MSG_SMS_DAB_CHANNEL = 607, + MSG_SMS_GET_PID_FILTER_LIST_REQ = 608, + MSG_SMS_GET_PID_FILTER_LIST_RES = 609, + MSG_SMS_POWER_DOWN_REQ = 610, + MSG_SMS_POWER_DOWN_RES = 611, + MSG_SMS_ATSC_SLT_EXIST_IND = 612, + MSG_SMS_ATSC_NO_SLT_IND = 613, + MSG_SMS_GET_STATISTICS_REQ = 615, + MSG_SMS_GET_STATISTICS_RES = 616, + MSG_SMS_SEND_DUMP = 617, + MSG_SMS_SCAN_START_REQ = 618, + MSG_SMS_SCAN_START_RES = 619, + MSG_SMS_SCAN_STOP_REQ = 620, + MSG_SMS_SCAN_STOP_RES = 621, + MSG_SMS_SCAN_PROGRESS_IND = 622, + MSG_SMS_SCAN_COMPLETE_IND = 623, + MSG_SMS_LOG_ITEM = 624, + MSG_SMS_DAB_SUBCHANNEL_RECONFIG_REQ = 628, + MSG_SMS_DAB_SUBCHANNEL_RECONFIG_RES = 629, + MSG_SMS_HO_PER_SLICES_IND = 630, + MSG_SMS_HO_INBAND_POWER_IND = 631, + MSG_SMS_MANUAL_DEMOD_REQ = 632, + MSG_SMS_HO_TUNE_ON_REQ = 636, + MSG_SMS_HO_TUNE_ON_RES = 637, + MSG_SMS_HO_TUNE_OFF_REQ = 638, + MSG_SMS_HO_TUNE_OFF_RES = 639, + MSG_SMS_HO_PEEK_FREQ_REQ = 640, + MSG_SMS_HO_PEEK_FREQ_RES = 641, + MSG_SMS_HO_PEEK_FREQ_IND = 642, + MSG_SMS_MB_ATTEN_SET_REQ = 643, + MSG_SMS_MB_ATTEN_SET_RES = 644, + MSG_SMS_ENABLE_STAT_IN_I2C_REQ = 649, + MSG_SMS_ENABLE_STAT_IN_I2C_RES = 650, + MSG_SMS_SET_ANTENNA_CONFIG_REQ = 651, + MSG_SMS_SET_ANTENNA_CONFIG_RES = 652, + MSG_SMS_GET_STATISTICS_EX_REQ = 653, + MSG_SMS_GET_STATISTICS_EX_RES = 654, + MSG_SMS_SLEEP_RESUME_COMP_IND = 655, + MSG_SMS_SWITCH_HOST_INTERFACE_REQ = 656, + MSG_SMS_SWITCH_HOST_INTERFACE_RES = 657, + MSG_SMS_DATA_DOWNLOAD_REQ = 660, + MSG_SMS_DATA_DOWNLOAD_RES = 661, + MSG_SMS_DATA_VALIDITY_REQ = 662, + MSG_SMS_DATA_VALIDITY_RES = 663, + MSG_SMS_SWDOWNLOAD_TRIGGER_REQ = 664, + MSG_SMS_SWDOWNLOAD_TRIGGER_RES = 665, + MSG_SMS_SWDOWNLOAD_BACKDOOR_REQ = 666, + MSG_SMS_SWDOWNLOAD_BACKDOOR_RES = 667, + MSG_SMS_GET_VERSION_EX_REQ = 668, + MSG_SMS_GET_VERSION_EX_RES = 669, + MSG_SMS_CLOCK_OUTPUT_CONFIG_REQ = 670, + MSG_SMS_CLOCK_OUTPUT_CONFIG_RES = 671, + MSG_SMS_I2C_SET_FREQ_REQ = 685, + MSG_SMS_I2C_SET_FREQ_RES = 686, + MSG_SMS_GENERIC_I2C_REQ = 687, + MSG_SMS_GENERIC_I2C_RES = 688, + MSG_SMS_DVBT_BDA_DATA = 693, + MSG_SW_RELOAD_REQ = 697, + MSG_SMS_DATA_MSG = 699, + MSG_TABLE_UPLOAD_REQ = 700, + MSG_TABLE_UPLOAD_RES = 701, + MSG_SW_RELOAD_START_REQ = 702, + MSG_SW_RELOAD_START_RES = 703, + MSG_SW_RELOAD_EXEC_REQ = 704, + MSG_SW_RELOAD_EXEC_RES = 705, + MSG_SMS_SPI_INT_LINE_SET_REQ = 710, + MSG_SMS_SPI_INT_LINE_SET_RES = 711, + MSG_SMS_GPIO_CONFIG_EX_REQ = 712, + MSG_SMS_GPIO_CONFIG_EX_RES = 713, + MSG_SMS_WATCHDOG_ACT_REQ = 716, + MSG_SMS_WATCHDOG_ACT_RES = 717, + MSG_SMS_LOOPBACK_REQ = 718, + MSG_SMS_LOOPBACK_RES = 719, + MSG_SMS_RAW_CAPTURE_START_REQ = 720, + MSG_SMS_RAW_CAPTURE_START_RES = 721, + MSG_SMS_RAW_CAPTURE_ABORT_REQ = 722, + MSG_SMS_RAW_CAPTURE_ABORT_RES = 723, + MSG_SMS_RAW_CAPTURE_COMPLETE_IND = 728, + MSG_SMS_DATA_PUMP_IND = 729, + MSG_SMS_DATA_PUMP_REQ = 730, + MSG_SMS_DATA_PUMP_RES = 731, + MSG_SMS_FLASH_DL_REQ = 732, + MSG_SMS_EXEC_TEST_1_REQ = 734, + MSG_SMS_EXEC_TEST_1_RES = 735, + MSG_SMS_ENABLE_TS_INTERFACE_REQ = 736, + MSG_SMS_ENABLE_TS_INTERFACE_RES = 737, + MSG_SMS_SPI_SET_BUS_WIDTH_REQ = 738, + MSG_SMS_SPI_SET_BUS_WIDTH_RES = 739, + MSG_SMS_SEND_EMM_REQ = 740, + MSG_SMS_SEND_EMM_RES = 741, + MSG_SMS_DISABLE_TS_INTERFACE_REQ = 742, + MSG_SMS_DISABLE_TS_INTERFACE_RES = 743, + MSG_SMS_IS_BUF_FREE_REQ = 744, + MSG_SMS_IS_BUF_FREE_RES = 745, + MSG_SMS_EXT_ANTENNA_REQ = 746, + MSG_SMS_EXT_ANTENNA_RES = 747, + MSG_SMS_CMMB_GET_NET_OF_FREQ_REQ_OBSOLETE = 748, + MSG_SMS_CMMB_GET_NET_OF_FREQ_RES_OBSOLETE = 749, + MSG_SMS_BATTERY_LEVEL_REQ = 750, + MSG_SMS_BATTERY_LEVEL_RES = 751, + MSG_SMS_CMMB_INJECT_TABLE_REQ_OBSOLETE = 752, + MSG_SMS_CMMB_INJECT_TABLE_RES_OBSOLETE = 753, + MSG_SMS_FM_RADIO_BLOCK_IND = 754, + MSG_SMS_HOST_NOTIFICATION_IND = 755, + MSG_SMS_CMMB_GET_CONTROL_TABLE_REQ_OBSOLETE = 756, + MSG_SMS_CMMB_GET_CONTROL_TABLE_RES_OBSOLETE = 757, + MSG_SMS_CMMB_GET_NETWORKS_REQ = 760, + MSG_SMS_CMMB_GET_NETWORKS_RES = 761, + MSG_SMS_CMMB_START_SERVICE_REQ = 762, + MSG_SMS_CMMB_START_SERVICE_RES = 763, + MSG_SMS_CMMB_STOP_SERVICE_REQ = 764, + MSG_SMS_CMMB_STOP_SERVICE_RES = 765, + MSG_SMS_CMMB_ADD_CHANNEL_FILTER_REQ = 768, + MSG_SMS_CMMB_ADD_CHANNEL_FILTER_RES = 769, + MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_REQ = 770, + MSG_SMS_CMMB_REMOVE_CHANNEL_FILTER_RES = 771, + MSG_SMS_CMMB_START_CONTROL_INFO_REQ = 772, + MSG_SMS_CMMB_START_CONTROL_INFO_RES = 773, + MSG_SMS_CMMB_STOP_CONTROL_INFO_REQ = 774, + MSG_SMS_CMMB_STOP_CONTROL_INFO_RES = 775, + MSG_SMS_ISDBT_TUNE_REQ = 776, + MSG_SMS_ISDBT_TUNE_RES = 777, + MSG_SMS_TRANSMISSION_IND = 782, + MSG_SMS_PID_STATISTICS_IND = 783, + MSG_SMS_POWER_DOWN_IND = 784, + MSG_SMS_POWER_DOWN_CONF = 785, + MSG_SMS_POWER_UP_IND = 786, + MSG_SMS_POWER_UP_CONF = 787, + MSG_SMS_POWER_MODE_SET_REQ = 790, + MSG_SMS_POWER_MODE_SET_RES = 791, + MSG_SMS_DEBUG_HOST_EVENT_REQ = 792, + MSG_SMS_DEBUG_HOST_EVENT_RES = 793, + MSG_SMS_NEW_CRYSTAL_REQ = 794, + MSG_SMS_NEW_CRYSTAL_RES = 795, + MSG_SMS_CONFIG_SPI_REQ = 796, + MSG_SMS_CONFIG_SPI_RES = 797, + MSG_SMS_I2C_SHORT_STAT_IND = 798, + MSG_SMS_START_IR_REQ = 800, + MSG_SMS_START_IR_RES = 801, + MSG_SMS_IR_SAMPLES_IND = 802, + MSG_SMS_CMMB_CA_SERVICE_IND = 803, + MSG_SMS_SLAVE_DEVICE_DETECTED = 804, + MSG_SMS_INTERFACE_LOCK_IND = 805, + MSG_SMS_INTERFACE_UNLOCK_IND = 806, + MSG_SMS_SEND_ROSUM_BUFF_REQ = 810, + MSG_SMS_SEND_ROSUM_BUFF_RES = 811, + MSG_SMS_ROSUM_BUFF = 812, + MSG_SMS_SET_AES128_KEY_REQ = 815, + MSG_SMS_SET_AES128_KEY_RES = 816, + MSG_SMS_MBBMS_WRITE_REQ = 817, + MSG_SMS_MBBMS_WRITE_RES = 818, + MSG_SMS_MBBMS_READ_IND = 819, + MSG_SMS_IQ_STREAM_START_REQ = 820, + MSG_SMS_IQ_STREAM_START_RES = 821, + MSG_SMS_IQ_STREAM_STOP_REQ = 822, + MSG_SMS_IQ_STREAM_STOP_RES = 823, + MSG_SMS_IQ_STREAM_DATA_BLOCK = 824, + MSG_SMS_GET_EEPROM_VERSION_REQ = 825, + MSG_SMS_GET_EEPROM_VERSION_RES = 826, + MSG_SMS_SIGNAL_DETECTED_IND = 827, + MSG_SMS_NO_SIGNAL_IND = 828, + MSG_SMS_MRC_SHUTDOWN_SLAVE_REQ = 830, + MSG_SMS_MRC_SHUTDOWN_SLAVE_RES = 831, + MSG_SMS_MRC_BRINGUP_SLAVE_REQ = 832, + MSG_SMS_MRC_BRINGUP_SLAVE_RES = 833, + MSG_SMS_EXTERNAL_LNA_CTRL_REQ = 834, + MSG_SMS_EXTERNAL_LNA_CTRL_RES = 835, + MSG_SMS_SET_PERIODIC_STATISTICS_REQ = 836, + MSG_SMS_SET_PERIODIC_STATISTICS_RES = 837, + MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_REQ = 838, + MSG_SMS_CMMB_SET_AUTO_OUTPUT_TS0_RES = 839, + LOCAL_TUNE = 850, + LOCAL_IFFT_H_ICI = 851, + MSG_RESYNC_REQ = 852, + MSG_SMS_CMMB_GET_MRC_STATISTICS_REQ = 853, + MSG_SMS_CMMB_GET_MRC_STATISTICS_RES = 854, + MSG_SMS_LOG_EX_ITEM = 855, + MSG_SMS_DEVICE_DATA_LOSS_IND = 856, + MSG_SMS_MRC_WATCHDOG_TRIGGERED_IND = 857, + MSG_SMS_USER_MSG_REQ = 858, + MSG_SMS_USER_MSG_RES = 859, + MSG_SMS_SMART_CARD_INIT_REQ = 860, + MSG_SMS_SMART_CARD_INIT_RES = 861, + MSG_SMS_SMART_CARD_WRITE_REQ = 862, + MSG_SMS_SMART_CARD_WRITE_RES = 863, + MSG_SMS_SMART_CARD_READ_IND = 864, + MSG_SMS_TSE_ENABLE_REQ = 866, + MSG_SMS_TSE_ENABLE_RES = 867, + MSG_SMS_CMMB_GET_SHORT_STATISTICS_REQ = 868, + MSG_SMS_CMMB_GET_SHORT_STATISTICS_RES = 869, + MSG_SMS_LED_CONFIG_REQ = 870, + MSG_SMS_LED_CONFIG_RES = 871, + MSG_PWM_ANTENNA_REQ = 872, + MSG_PWM_ANTENNA_RES = 873, + MSG_SMS_CMMB_SMD_SN_REQ = 874, + MSG_SMS_CMMB_SMD_SN_RES = 875, + MSG_SMS_CMMB_SET_CA_CW_REQ = 876, + MSG_SMS_CMMB_SET_CA_CW_RES = 877, + MSG_SMS_CMMB_SET_CA_SALT_REQ = 878, + MSG_SMS_CMMB_SET_CA_SALT_RES = 879, + MSG_SMS_NSCD_INIT_REQ = 880, + MSG_SMS_NSCD_INIT_RES = 881, + MSG_SMS_NSCD_PROCESS_SECTION_REQ = 882, + MSG_SMS_NSCD_PROCESS_SECTION_RES = 883, + MSG_SMS_DBD_CREATE_OBJECT_REQ = 884, + MSG_SMS_DBD_CREATE_OBJECT_RES = 885, + MSG_SMS_DBD_CONFIGURE_REQ = 886, + MSG_SMS_DBD_CONFIGURE_RES = 887, + MSG_SMS_DBD_SET_KEYS_REQ = 888, + MSG_SMS_DBD_SET_KEYS_RES = 889, + MSG_SMS_DBD_PROCESS_HEADER_REQ = 890, + MSG_SMS_DBD_PROCESS_HEADER_RES = 891, + MSG_SMS_DBD_PROCESS_DATA_REQ = 892, + MSG_SMS_DBD_PROCESS_DATA_RES = 893, + MSG_SMS_DBD_PROCESS_GET_DATA_REQ = 894, + MSG_SMS_DBD_PROCESS_GET_DATA_RES = 895, + MSG_SMS_NSCD_OPEN_SESSION_REQ = 896, + MSG_SMS_NSCD_OPEN_SESSION_RES = 897, + MSG_SMS_SEND_HOST_DATA_TO_DEMUX_REQ = 898, + MSG_SMS_SEND_HOST_DATA_TO_DEMUX_RES = 899, + MSG_LAST_MSG_TYPE = 900, +}; + +#define SMS_INIT_MSG_EX(ptr, type, src, dst, len) do { \ + (ptr)->msg_type = type; \ + (ptr)->msg_src_id = src; \ + (ptr)->msg_dst_id = dst; \ + (ptr)->msg_length = len; \ + (ptr)->msg_flags = 0; \ +} while (0) + +#define SMS_INIT_MSG(ptr, type, len) \ + SMS_INIT_MSG_EX(ptr, type, 0, HIF_TASK, len) + +enum SMS_DVB3_EVENTS { + DVB3_EVENT_INIT = 0, + DVB3_EVENT_SLEEP, + DVB3_EVENT_HOTPLUG, + DVB3_EVENT_FE_LOCK, + DVB3_EVENT_FE_UNLOCK, + DVB3_EVENT_UNC_OK, + DVB3_EVENT_UNC_ERR +}; + +enum SMS_DEVICE_MODE { + DEVICE_MODE_NONE = -1, + DEVICE_MODE_DVBT = 0, + DEVICE_MODE_DVBH, + DEVICE_MODE_DAB_TDMB, + DEVICE_MODE_DAB_TDMB_DABIP, + DEVICE_MODE_DVBT_BDA, + DEVICE_MODE_ISDBT, + DEVICE_MODE_ISDBT_BDA, + DEVICE_MODE_CMMB, + DEVICE_MODE_RAW_TUNER, + DEVICE_MODE_FM_RADIO, + DEVICE_MODE_FM_RADIO_BDA, + DEVICE_MODE_ATSC, + DEVICE_MODE_MAX, +}; + +struct sms_msg_hdr { + u16 msg_type; + u8 msg_src_id; + u8 msg_dst_id; + u16 msg_length; /* length of entire message, including header */ + u16 msg_flags; +}; + +struct sms_msg_data { + struct sms_msg_hdr x_msg_header; + u32 msg_data[1]; +}; + +struct sms_msg_data2 { + struct sms_msg_hdr x_msg_header; + u32 msg_data[2]; +}; + +struct sms_msg_data5 { + struct sms_msg_hdr x_msg_header; + u32 msg_data[5]; +}; + +struct sms_data_download { + struct sms_msg_hdr x_msg_header; + u32 mem_addr; + u8 payload[SMS_MAX_PAYLOAD_SIZE]; +}; + +struct sms_version_res { + struct sms_msg_hdr x_msg_header; + + u16 chip_model; /* e.g. 0x1102 for SMS-1102 "Nova" */ + u8 step; /* 0 - step A */ + u8 metal_fix; /* 0 - Metal 0 */ + + /* firmware_id 0xFF if ROM, otherwise the + * value indicated by SMSHOSTLIB_DEVICE_MODES_E */ + u8 firmware_id; + /* supported_protocols Bitwise OR combination of + * supported protocols */ + u8 supported_protocols; + + u8 version_major; + u8 version_minor; + u8 version_patch; + u8 version_field_patch; + + u8 rom_ver_major; + u8 rom_ver_minor; + u8 rom_ver_patch; + u8 rom_ver_field_patch; + + u8 TextLabel[34]; +}; + +struct sms_firmware { + u32 check_sum; + u32 length; + u32 start_address; + u8 payload[1]; +}; + +/* statistics information returned as response for + * SmsHostApiGetstatistics_Req */ +struct sms_stats { + u32 reserved; /* reserved */ + + /* Common parameters */ + u32 is_rf_locked; /* 0 - not locked, 1 - locked */ + u32 is_demod_locked; /* 0 - not locked, 1 - locked */ + u32 is_external_lna_on; /* 0 - external LNA off, 1 - external LNA on */ + + /* Reception quality */ + s32 SNR; /* dB */ + u32 ber; /* Post Viterbi ber [1E-5] */ + u32 FIB_CRC; /* CRC errors percentage, valid only for DAB */ + u32 ts_per; /* Transport stream PER, + 0xFFFFFFFF indicate N/A, valid only for DVB-T/H */ + u32 MFER; /* DVB-H frame error rate in percentage, + 0xFFFFFFFF indicate N/A, valid only for DVB-H */ + s32 RSSI; /* dBm */ + s32 in_band_pwr; /* In band power in dBM */ + s32 carrier_offset; /* Carrier Offset in bin/1024 */ + + /* Transmission parameters */ + u32 frequency; /* frequency in Hz */ + u32 bandwidth; /* bandwidth in MHz, valid only for DVB-T/H */ + u32 transmission_mode; /* Transmission Mode, for DAB modes 1-4, + for DVB-T/H FFT mode carriers in Kilos */ + u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET, + valid only for DVB-T/H */ + u32 guard_interval; /* Guard Interval from + SMSHOSTLIB_GUARD_INTERVALS_ET, valid only for DVB-T/H */ + u32 code_rate; /* Code Rate from SMSHOSTLIB_CODE_RATE_ET, + valid only for DVB-T/H */ + u32 lp_code_rate; /* Low Priority Code Rate from + SMSHOSTLIB_CODE_RATE_ET, valid only for DVB-T/H */ + u32 hierarchy; /* hierarchy from SMSHOSTLIB_HIERARCHY_ET, + valid only for DVB-T/H */ + u32 constellation; /* constellation from + SMSHOSTLIB_CONSTELLATION_ET, valid only for DVB-T/H */ + + /* Burst parameters, valid only for DVB-H */ + u32 burst_size; /* Current burst size in bytes, + valid only for DVB-H */ + u32 burst_duration; /* Current burst duration in mSec, + valid only for DVB-H */ + u32 burst_cycle_time; /* Current burst cycle time in mSec, + valid only for DVB-H */ + u32 calc_burst_cycle_time;/* Current burst cycle time in mSec, + as calculated by demodulator, valid only for DVB-H */ + u32 num_of_rows; /* Number of rows in MPE table, + valid only for DVB-H */ + u32 num_of_padd_cols; /* Number of padding columns in MPE table, + valid only for DVB-H */ + u32 num_of_punct_cols; /* Number of puncturing columns in MPE table, + valid only for DVB-H */ + u32 error_ts_packets; /* Number of erroneous + transport-stream packets */ + u32 total_ts_packets; /* Total number of transport-stream packets */ + u32 num_of_valid_mpe_tlbs; /* Number of MPE tables which do not include + errors after MPE RS decoding */ + u32 num_of_invalid_mpe_tlbs;/* Number of MPE tables which include errors + after MPE RS decoding */ + u32 num_of_corrected_mpe_tlbs;/* Number of MPE tables which were + corrected by MPE RS decoding */ + /* Common params */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ + u32 ber_bit_count; /* Total number of SYNC bits. */ + + /* Interface information */ + u32 sms_to_host_tx_errors; /* Total number of transmission errors. */ + + /* DAB/T-DMB */ + u32 pre_ber; /* DAB/T-DMB only: Pre Viterbi ber [1E-5] */ + + /* DVB-H TPS parameters */ + u32 cell_id; /* TPS Cell ID in bits 15..0, bits 31..16 zero; + if set to 0xFFFFFFFF cell_id not yet recovered */ + u32 dvbh_srv_ind_hp; /* DVB-H service indication info, bit 1 - + Time Slicing indicator, bit 0 - MPE-FEC indicator */ + u32 dvbh_srv_ind_lp; /* DVB-H service indication info, bit 1 - + Time Slicing indicator, bit 0 - MPE-FEC indicator */ + + u32 num_mpe_received; /* DVB-H, Num MPE section received */ + + u32 reservedFields[10]; /* reserved */ +}; + +struct sms_msg_statistics_info { + u32 request_result; + + struct sms_stats stat; + + /* Split the calc of the SNR in DAB */ + u32 signal; /* dB */ + u32 noise; /* dB */ + +}; + +struct sms_isdbt_layer_stats { + /* Per-layer information */ + u32 code_rate; /* Code Rate from SMSHOSTLIB_CODE_RATE_ET, + * 255 means layer does not exist */ + u32 constellation; /* constellation from SMSHOSTLIB_CONSTELLATION_ET, + * 255 means layer does not exist */ + u32 ber; /* Post Viterbi ber [1E-5], 0xFFFFFFFF indicate N/A */ + u32 ber_error_count; /* Post Viterbi Error Bits Count */ + u32 ber_bit_count; /* Post Viterbi Total Bits Count */ + u32 pre_ber; /* Pre Viterbi ber [1E-5], 0xFFFFFFFF indicate N/A */ + u32 ts_per; /* Transport stream PER [%], 0xFFFFFFFF indicate N/A */ + u32 error_ts_packets; /* Number of erroneous transport-stream packets */ + u32 total_ts_packets; /* Total number of transport-stream packets */ + u32 ti_ldepth_i; /* Time interleaver depth I parameter, + * 255 means layer does not exist */ + u32 number_of_segments; /* Number of segments in layer A, + * 255 means layer does not exist */ + u32 tmcc_errors; /* TMCC errors */ +}; + +struct sms_isdbt_stats { + u32 statistics_type; /* Enumerator identifying the type of the + * structure. Values are the same as + * SMSHOSTLIB_DEVICE_MODES_E + * + * This field MUST always be first in any + * statistics structure */ + + u32 full_size; /* Total size of the structure returned by the modem. + * If the size requested by the host is smaller than + * full_size, the struct will be truncated */ + + /* Common parameters */ + u32 is_rf_locked; /* 0 - not locked, 1 - locked */ + u32 is_demod_locked; /* 0 - not locked, 1 - locked */ + u32 is_external_lna_on; /* 0 - external LNA off, 1 - external LNA on */ + + /* Reception quality */ + s32 SNR; /* dB */ + s32 RSSI; /* dBm */ + s32 in_band_pwr; /* In band power in dBM */ + s32 carrier_offset; /* Carrier Offset in Hz */ + + /* Transmission parameters */ + u32 frequency; /* frequency in Hz */ + u32 bandwidth; /* bandwidth in MHz */ + u32 transmission_mode; /* ISDB-T transmission mode */ + u32 modem_state; /* 0 - Acquisition, 1 - Locked */ + u32 guard_interval; /* Guard Interval, 1 divided by value */ + u32 system_type; /* ISDB-T system type (ISDB-T / ISDB-Tsb) */ + u32 partial_reception; /* TRUE - partial reception, FALSE otherwise */ + u32 num_of_layers; /* Number of ISDB-T layers in the network */ + + /* Per-layer information */ + /* Layers A, B and C */ + struct sms_isdbt_layer_stats layer_info[3]; + /* Per-layer statistics, see sms_isdbt_layer_stats */ + + /* Interface information */ + u32 sms_to_host_tx_errors; /* Total number of transmission errors. */ +}; + +struct sms_isdbt_stats_ex { + u32 statistics_type; /* Enumerator identifying the type of the + * structure. Values are the same as + * SMSHOSTLIB_DEVICE_MODES_E + * + * This field MUST always be first in any + * statistics structure */ + + u32 full_size; /* Total size of the structure returned by the modem. + * If the size requested by the host is smaller than + * full_size, the struct will be truncated */ + + /* Common parameters */ + u32 is_rf_locked; /* 0 - not locked, 1 - locked */ + u32 is_demod_locked; /* 0 - not locked, 1 - locked */ + u32 is_external_lna_on; /* 0 - external LNA off, 1 - external LNA on */ + + /* Reception quality */ + s32 SNR; /* dB */ + s32 RSSI; /* dBm */ + s32 in_band_pwr; /* In band power in dBM */ + s32 carrier_offset; /* Carrier Offset in Hz */ + + /* Transmission parameters */ + u32 frequency; /* frequency in Hz */ + u32 bandwidth; /* bandwidth in MHz */ + u32 transmission_mode; /* ISDB-T transmission mode */ + u32 modem_state; /* 0 - Acquisition, 1 - Locked */ + u32 guard_interval; /* Guard Interval, 1 divided by value */ + u32 system_type; /* ISDB-T system type (ISDB-T / ISDB-Tsb) */ + u32 partial_reception; /* TRUE - partial reception, FALSE otherwise */ + u32 num_of_layers; /* Number of ISDB-T layers in the network */ + + u32 segment_number; /* Segment number for ISDB-Tsb */ + u32 tune_bw; /* Tuned bandwidth - BW_ISDBT_1SEG / BW_ISDBT_3SEG */ + + /* Per-layer information */ + /* Layers A, B and C */ + struct sms_isdbt_layer_stats layer_info[3]; + /* Per-layer statistics, see sms_isdbt_layer_stats */ + + /* Interface information */ + u32 reserved1; /* Was sms_to_host_tx_errors - obsolete . */ + /* Proprietary information */ + u32 ext_antenna; /* Obsolete field. */ + u32 reception_quality; + u32 ews_alert_active; /* signals if EWS alert is currently on */ + u32 lna_on_off; /* Internal LNA state: 0: OFF, 1: ON */ + + u32 rf_agc_level; /* RF AGC Level [linear units], full gain = 65535 (20dB) */ + u32 bb_agc_level; /* Baseband AGC level [linear units], full gain = 65535 (71.5dB) */ + u32 fw_errors_counter; /* Application errors - should be always zero */ + u8 FwErrorsHistoryArr[8]; /* Last FW errors IDs - first is most recent, last is oldest */ + + s32 MRC_SNR; /* dB */ + u32 snr_full_res; /* dB x 65536 */ + u32 reserved4[4]; +}; + + +struct sms_pid_stats_data { + struct PID_BURST_S { + u32 size; + u32 padding_cols; + u32 punct_cols; + u32 duration; + u32 cycle; + u32 calc_cycle; + } burst; + + u32 tot_tbl_cnt; + u32 invalid_tbl_cnt; + u32 tot_cor_tbl; +}; + +struct sms_pid_data { + u32 pid; + u32 num_rows; + struct sms_pid_stats_data pid_statistics; +}; + +#define CORRECT_STAT_RSSI(_stat) ((_stat).RSSI *= -1) +#define CORRECT_STAT_BANDWIDTH(_stat) (_stat.bandwidth = 8 - _stat.bandwidth) +#define CORRECT_STAT_TRANSMISSON_MODE(_stat) \ + if (_stat.transmission_mode == 0) \ + _stat.transmission_mode = 2; \ + else if (_stat.transmission_mode == 1) \ + _stat.transmission_mode = 8; \ + else \ + _stat.transmission_mode = 4; + +struct sms_tx_stats { + u32 frequency; /* frequency in Hz */ + u32 bandwidth; /* bandwidth in MHz */ + u32 transmission_mode; /* FFT mode carriers in Kilos */ + u32 guard_interval; /* Guard Interval from + SMSHOSTLIB_GUARD_INTERVALS_ET */ + u32 code_rate; /* Code Rate from SMSHOSTLIB_CODE_RATE_ET */ + u32 lp_code_rate; /* Low Priority Code Rate from + SMSHOSTLIB_CODE_RATE_ET */ + u32 hierarchy; /* hierarchy from SMSHOSTLIB_HIERARCHY_ET */ + u32 constellation; /* constellation from + SMSHOSTLIB_CONSTELLATION_ET */ + + /* DVB-H TPS parameters */ + u32 cell_id; /* TPS Cell ID in bits 15..0, bits 31..16 zero; + if set to 0xFFFFFFFF cell_id not yet recovered */ + u32 dvbh_srv_ind_hp; /* DVB-H service indication info, bit 1 - + Time Slicing indicator, bit 0 - MPE-FEC indicator */ + u32 dvbh_srv_ind_lp; /* DVB-H service indication info, bit 1 - + Time Slicing indicator, bit 0 - MPE-FEC indicator */ + u32 is_demod_locked; /* 0 - not locked, 1 - locked */ +}; + +struct sms_rx_stats { + u32 is_rf_locked; /* 0 - not locked, 1 - locked */ + u32 is_demod_locked; /* 0 - not locked, 1 - locked */ + u32 is_external_lna_on; /* 0 - external LNA off, 1 - external LNA on */ + + u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ + s32 SNR; /* dB */ + u32 ber; /* Post Viterbi ber [1E-5] */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ + u32 ber_bit_count; /* Total number of SYNC bits. */ + u32 ts_per; /* Transport stream PER, + 0xFFFFFFFF indicate N/A */ + u32 MFER; /* DVB-H frame error rate in percentage, + 0xFFFFFFFF indicate N/A, valid only for DVB-H */ + s32 RSSI; /* dBm */ + s32 in_band_pwr; /* In band power in dBM */ + s32 carrier_offset; /* Carrier Offset in bin/1024 */ + u32 error_ts_packets; /* Number of erroneous + transport-stream packets */ + u32 total_ts_packets; /* Total number of transport-stream packets */ + + s32 MRC_SNR; /* dB */ + s32 MRC_RSSI; /* dBm */ + s32 mrc_in_band_pwr; /* In band power in dBM */ +}; + +struct sms_rx_stats_ex { + u32 is_rf_locked; /* 0 - not locked, 1 - locked */ + u32 is_demod_locked; /* 0 - not locked, 1 - locked */ + u32 is_external_lna_on; /* 0 - external LNA off, 1 - external LNA on */ + + u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ + s32 SNR; /* dB */ + u32 ber; /* Post Viterbi ber [1E-5] */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ + u32 ber_bit_count; /* Total number of SYNC bits. */ + u32 ts_per; /* Transport stream PER, + 0xFFFFFFFF indicate N/A */ + u32 MFER; /* DVB-H frame error rate in percentage, + 0xFFFFFFFF indicate N/A, valid only for DVB-H */ + s32 RSSI; /* dBm */ + s32 in_band_pwr; /* In band power in dBM */ + s32 carrier_offset; /* Carrier Offset in bin/1024 */ + u32 error_ts_packets; /* Number of erroneous + transport-stream packets */ + u32 total_ts_packets; /* Total number of transport-stream packets */ + + s32 ref_dev_ppm; + s32 freq_dev_hz; + + s32 MRC_SNR; /* dB */ + s32 MRC_RSSI; /* dBm */ + s32 mrc_in_band_pwr; /* In band power in dBM */ +}; + +#define SRVM_MAX_PID_FILTERS 8 + +/* statistics information returned as response for + * SmsHostApiGetstatisticsEx_Req for DVB applications, SMS1100 and up */ +struct sms_stats_dvb { + /* Reception */ + struct sms_rx_stats reception_data; + + /* Transmission parameters */ + struct sms_tx_stats transmission_data; + + /* Burst parameters, valid only for DVB-H */ + struct sms_pid_data pid_data[SRVM_MAX_PID_FILTERS]; +}; + +/* statistics information returned as response for + * SmsHostApiGetstatisticsEx_Req for DVB applications, SMS1100 and up */ +struct sms_stats_dvb_ex { + /* Reception */ + struct sms_rx_stats_ex reception_data; + + /* Transmission parameters */ + struct sms_tx_stats transmission_data; + + /* Burst parameters, valid only for DVB-H */ + struct sms_pid_data pid_data[SRVM_MAX_PID_FILTERS]; +}; + +struct sms_srvm_signal_status { + u32 result; + u32 snr; + u32 ts_packets; + u32 ets_packets; + u32 constellation; + u32 hp_code; + u32 tps_srv_ind_lp; + u32 tps_srv_ind_hp; + u32 cell_id; + u32 reason; + + s32 in_band_power; + u32 request_id; +}; + +struct sms_i2c_req { + u32 device_address; /* I2c device address */ + u32 write_count; /* number of bytes to write */ + u32 read_count; /* number of bytes to read */ + u8 Data[1]; +}; + +struct sms_i2c_res { + u32 status; /* non-zero value in case of failure */ + u32 read_count; /* number of bytes read */ + u8 Data[1]; +}; + + +struct smscore_config_gpio { +#define SMS_GPIO_DIRECTION_INPUT 0 +#define SMS_GPIO_DIRECTION_OUTPUT 1 + u8 direction; + +#define SMS_GPIO_PULLUPDOWN_NONE 0 +#define SMS_GPIO_PULLUPDOWN_PULLDOWN 1 +#define SMS_GPIO_PULLUPDOWN_PULLUP 2 +#define SMS_GPIO_PULLUPDOWN_KEEPER 3 + u8 pullupdown; + +#define SMS_GPIO_INPUTCHARACTERISTICS_NORMAL 0 +#define SMS_GPIO_INPUTCHARACTERISTICS_SCHMITT 1 + u8 inputcharacteristics; + + /* 10xx */ +#define SMS_GPIO_OUTPUT_SLEW_RATE_FAST 0 +#define SMS_GPIO_OUTPUT_SLEW_WRATE_SLOW 1 + + /* 11xx */ +#define SMS_GPIO_OUTPUT_SLEW_RATE_0_45_V_NS 0 +#define SMS_GPIO_OUTPUT_SLEW_RATE_0_9_V_NS 1 +#define SMS_GPIO_OUTPUT_SLEW_RATE_1_7_V_NS 2 +#define SMS_GPIO_OUTPUT_SLEW_RATE_3_3_V_NS 3 + + u8 outputslewrate; + + /* 10xx */ +#define SMS_GPIO_OUTPUTDRIVING_S_4mA 0 +#define SMS_GPIO_OUTPUTDRIVING_S_8mA 1 +#define SMS_GPIO_OUTPUTDRIVING_S_12mA 2 +#define SMS_GPIO_OUTPUTDRIVING_S_16mA 3 + + /* 11xx*/ +#define SMS_GPIO_OUTPUTDRIVING_1_5mA 0 +#define SMS_GPIO_OUTPUTDRIVING_2_8mA 1 +#define SMS_GPIO_OUTPUTDRIVING_4mA 2 +#define SMS_GPIO_OUTPUTDRIVING_7mA 3 +#define SMS_GPIO_OUTPUTDRIVING_10mA 4 +#define SMS_GPIO_OUTPUTDRIVING_11mA 5 +#define SMS_GPIO_OUTPUTDRIVING_14mA 6 +#define SMS_GPIO_OUTPUTDRIVING_16mA 7 + + u8 outputdriving; +}; + +char *smscore_translate_msg(enum msg_types msgtype); + +extern int smscore_registry_getmode(char *devpath); + +extern int smscore_register_hotplug(hotplug_t hotplug); +extern void smscore_unregister_hotplug(hotplug_t hotplug); + +extern int smscore_register_device(struct smsdevice_params_t *params, + struct smscore_device_t **coredev, + gfp_t gfp_buf_flags, + void *mdev); +extern void smscore_unregister_device(struct smscore_device_t *coredev); + +extern int smscore_start_device(struct smscore_device_t *coredev); +extern int smscore_load_firmware(struct smscore_device_t *coredev, + char *filename, + loadfirmware_t loadfirmware_handler); + +extern int smscore_set_device_mode(struct smscore_device_t *coredev, int mode); +extern int smscore_get_device_mode(struct smscore_device_t *coredev); + +extern int smscore_register_client(struct smscore_device_t *coredev, + struct smsclient_params_t *params, + struct smscore_client_t **client); +extern void smscore_unregister_client(struct smscore_client_t *client); + +extern int smsclient_sendrequest(struct smscore_client_t *client, + void *buffer, size_t size); +extern void smscore_onresponse(struct smscore_device_t *coredev, + struct smscore_buffer_t *cb); + +extern int smscore_get_common_buffer_size(struct smscore_device_t *coredev); +extern int smscore_map_common_buffer(struct smscore_device_t *coredev, + struct vm_area_struct *vma); +extern int smscore_send_fw_file(struct smscore_device_t *coredev, + u8 *ufwbuf, int size); + +extern +struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev); +extern void smscore_putbuffer(struct smscore_device_t *coredev, + struct smscore_buffer_t *cb); + +/* old GPIO management */ +int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin, + struct smscore_config_gpio *pinconfig); +int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level); + +/* new GPIO management */ +extern int smscore_gpio_configure(struct smscore_device_t *coredev, u8 pin_num, + struct smscore_config_gpio *p_gpio_config); +extern int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 pin_num, + u8 new_level); +extern int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 pin_num, + u8 *level); + +void smscore_set_board_id(struct smscore_device_t *core, int id); +int smscore_get_board_id(struct smscore_device_t *core); + +int smscore_led_state(struct smscore_device_t *core, int led); + + +/* ------------------------------------------------------------------------ */ + +#endif /* __SMS_CORE_API_H__ */ diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c new file mode 100644 index 0000000000..e0beefd80d --- /dev/null +++ b/drivers/media/common/siano/smsdvb-debugfs.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright(c) 2013 Mauro Carvalho Chehab + +#include "smscoreapi.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "smsdvb.h" + +static struct dentry *smsdvb_debugfs_usb_root; + +struct smsdvb_debugfs { + struct kref refcount; + spinlock_t lock; + + char stats_data[PAGE_SIZE]; + unsigned stats_count; + bool stats_was_read; + + wait_queue_head_t stats_queue; +}; + +static void smsdvb_print_dvb_stats(struct smsdvb_debugfs *debug_data, + struct sms_stats *p) +{ + int n = 0; + char *buf; + + spin_lock(&debug_data->lock); + if (debug_data->stats_count) { + spin_unlock(&debug_data->lock); + return; + } + + buf = debug_data->stats_data; + + n += sysfs_emit_at(buf, n, "is_rf_locked = %d\n", p->is_rf_locked); + n += sysfs_emit_at(buf, n, "is_demod_locked = %d\n", p->is_demod_locked); + n += sysfs_emit_at(buf, n, "is_external_lna_on = %d\n", p->is_external_lna_on); + n += sysfs_emit_at(buf, n, "SNR = %d\n", p->SNR); + n += sysfs_emit_at(buf, n, "ber = %d\n", p->ber); + n += sysfs_emit_at(buf, n, "FIB_CRC = %d\n", p->FIB_CRC); + n += sysfs_emit_at(buf, n, "ts_per = %d\n", p->ts_per); + n += sysfs_emit_at(buf, n, "MFER = %d\n", p->MFER); + n += sysfs_emit_at(buf, n, "RSSI = %d\n", p->RSSI); + n += sysfs_emit_at(buf, n, "in_band_pwr = %d\n", p->in_band_pwr); + n += sysfs_emit_at(buf, n, "carrier_offset = %d\n", p->carrier_offset); + n += sysfs_emit_at(buf, n, "modem_state = %d\n", p->modem_state); + n += sysfs_emit_at(buf, n, "frequency = %d\n", p->frequency); + n += sysfs_emit_at(buf, n, "bandwidth = %d\n", p->bandwidth); + n += sysfs_emit_at(buf, n, "transmission_mode = %d\n", p->transmission_mode); + n += sysfs_emit_at(buf, n, "modem_state = %d\n", p->modem_state); + n += sysfs_emit_at(buf, n, "guard_interval = %d\n", p->guard_interval); + n += sysfs_emit_at(buf, n, "code_rate = %d\n", p->code_rate); + n += sysfs_emit_at(buf, n, "lp_code_rate = %d\n", p->lp_code_rate); + n += sysfs_emit_at(buf, n, "hierarchy = %d\n", p->hierarchy); + n += sysfs_emit_at(buf, n, "constellation = %d\n", p->constellation); + n += sysfs_emit_at(buf, n, "burst_size = %d\n", p->burst_size); + n += sysfs_emit_at(buf, n, "burst_duration = %d\n", p->burst_duration); + n += sysfs_emit_at(buf, n, "burst_cycle_time = %d\n", p->burst_cycle_time); + n += sysfs_emit_at(buf, n, "calc_burst_cycle_time = %d\n", p->calc_burst_cycle_time); + n += sysfs_emit_at(buf, n, "num_of_rows = %d\n", p->num_of_rows); + n += sysfs_emit_at(buf, n, "num_of_padd_cols = %d\n", p->num_of_padd_cols); + n += sysfs_emit_at(buf, n, "num_of_punct_cols = %d\n", p->num_of_punct_cols); + n += sysfs_emit_at(buf, n, "error_ts_packets = %d\n", p->error_ts_packets); + n += sysfs_emit_at(buf, n, "total_ts_packets = %d\n", p->total_ts_packets); + n += sysfs_emit_at(buf, n, "num_of_valid_mpe_tlbs = %d\n", p->num_of_valid_mpe_tlbs); + n += sysfs_emit_at(buf, n, "num_of_invalid_mpe_tlbs = %d\n", p->num_of_invalid_mpe_tlbs); + n += sysfs_emit_at(buf, n, "num_of_corrected_mpe_tlbs = %d\n", + p->num_of_corrected_mpe_tlbs); + n += sysfs_emit_at(buf, n, "ber_error_count = %d\n", p->ber_error_count); + n += sysfs_emit_at(buf, n, "ber_bit_count = %d\n", p->ber_bit_count); + n += sysfs_emit_at(buf, n, "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors); + n += sysfs_emit_at(buf, n, "pre_ber = %d\n", p->pre_ber); + n += sysfs_emit_at(buf, n, "cell_id = %d\n", p->cell_id); + n += sysfs_emit_at(buf, n, "dvbh_srv_ind_hp = %d\n", p->dvbh_srv_ind_hp); + n += sysfs_emit_at(buf, n, "dvbh_srv_ind_lp = %d\n", p->dvbh_srv_ind_lp); + n += sysfs_emit_at(buf, n, "num_mpe_received = %d\n", p->num_mpe_received); + + debug_data->stats_count = n; + spin_unlock(&debug_data->lock); + wake_up(&debug_data->stats_queue); +} + +static void smsdvb_print_isdb_stats(struct smsdvb_debugfs *debug_data, + struct sms_isdbt_stats *p) +{ + int i, n = 0; + char *buf; + + spin_lock(&debug_data->lock); + if (debug_data->stats_count) { + spin_unlock(&debug_data->lock); + return; + } + + buf = debug_data->stats_data; + + n += sysfs_emit_at(buf, n, "statistics_type = %d\t", p->statistics_type); + n += sysfs_emit_at(buf, n, "full_size = %d\n", p->full_size); + + n += sysfs_emit_at(buf, n, "is_rf_locked = %d\t\t", p->is_rf_locked); + n += sysfs_emit_at(buf, n, "is_demod_locked = %d\t", p->is_demod_locked); + n += sysfs_emit_at(buf, n, "is_external_lna_on = %d\n", p->is_external_lna_on); + n += sysfs_emit_at(buf, n, "SNR = %d dB\t\t", p->SNR); + n += sysfs_emit_at(buf, n, "RSSI = %d dBm\t\t", p->RSSI); + n += sysfs_emit_at(buf, n, "in_band_pwr = %d dBm\n", p->in_band_pwr); + n += sysfs_emit_at(buf, n, "carrier_offset = %d\t", p->carrier_offset); + n += sysfs_emit_at(buf, n, "bandwidth = %d\t\t", p->bandwidth); + n += sysfs_emit_at(buf, n, "frequency = %d Hz\n", p->frequency); + n += sysfs_emit_at(buf, n, "transmission_mode = %d\t", p->transmission_mode); + n += sysfs_emit_at(buf, n, "modem_state = %d\t\t", p->modem_state); + n += sysfs_emit_at(buf, n, "guard_interval = %d\n", p->guard_interval); + n += sysfs_emit_at(buf, n, "system_type = %d\t\t", p->system_type); + n += sysfs_emit_at(buf, n, "partial_reception = %d\t", p->partial_reception); + n += sysfs_emit_at(buf, n, "num_of_layers = %d\n", p->num_of_layers); + n += sysfs_emit_at(buf, n, "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors); + + for (i = 0; i < 3; i++) { + if (p->layer_info[i].number_of_segments < 1 || + p->layer_info[i].number_of_segments > 13) + continue; + + n += sysfs_emit_at(buf, n, "\nLayer %d\n", i); + n += sysfs_emit_at(buf, n, "\tcode_rate = %d\t", p->layer_info[i].code_rate); + n += sysfs_emit_at(buf, n, "constellation = %d\n", p->layer_info[i].constellation); + n += sysfs_emit_at(buf, n, "\tber = %-5d\t", p->layer_info[i].ber); + n += sysfs_emit_at(buf, n, "\tber_error_count = %-5d\t", + p->layer_info[i].ber_error_count); + n += sysfs_emit_at(buf, n, "ber_bit_count = %-5d\n", + p->layer_info[i].ber_bit_count); + n += sysfs_emit_at(buf, n, "\tpre_ber = %-5d\t", p->layer_info[i].pre_ber); + n += sysfs_emit_at(buf, n, "\tts_per = %-5d\n", p->layer_info[i].ts_per); + n += sysfs_emit_at(buf, n, "\terror_ts_packets = %-5d\t", + p->layer_info[i].error_ts_packets); + n += sysfs_emit_at(buf, n, "total_ts_packets = %-5d\t", + p->layer_info[i].total_ts_packets); + n += sysfs_emit_at(buf, n, "ti_ldepth_i = %d\n", p->layer_info[i].ti_ldepth_i); + n += sysfs_emit_at(buf, n, "\tnumber_of_segments = %d\t", + p->layer_info[i].number_of_segments); + n += sysfs_emit_at(buf, n, "tmcc_errors = %d\n", p->layer_info[i].tmcc_errors); + } + + debug_data->stats_count = n; + spin_unlock(&debug_data->lock); + wake_up(&debug_data->stats_queue); +} + +static void smsdvb_print_isdb_stats_ex(struct smsdvb_debugfs *debug_data, + struct sms_isdbt_stats_ex *p) +{ + int i, n = 0; + char *buf; + + spin_lock(&debug_data->lock); + if (debug_data->stats_count) { + spin_unlock(&debug_data->lock); + return; + } + + buf = debug_data->stats_data; + + n += sysfs_emit_at(buf, n, "statistics_type = %d\t", p->statistics_type); + n += sysfs_emit_at(buf, n, "full_size = %d\n", p->full_size); + + n += sysfs_emit_at(buf, n, "is_rf_locked = %d\t\t", p->is_rf_locked); + n += sysfs_emit_at(buf, n, "is_demod_locked = %d\t", p->is_demod_locked); + n += sysfs_emit_at(buf, n, "is_external_lna_on = %d\n", p->is_external_lna_on); + n += sysfs_emit_at(buf, n, "SNR = %d dB\t\t", p->SNR); + n += sysfs_emit_at(buf, n, "RSSI = %d dBm\t\t", p->RSSI); + n += sysfs_emit_at(buf, n, "in_band_pwr = %d dBm\n", p->in_band_pwr); + n += sysfs_emit_at(buf, n, "carrier_offset = %d\t", p->carrier_offset); + n += sysfs_emit_at(buf, n, "bandwidth = %d\t\t", p->bandwidth); + n += sysfs_emit_at(buf, n, "frequency = %d Hz\n", p->frequency); + n += sysfs_emit_at(buf, n, "transmission_mode = %d\t", p->transmission_mode); + n += sysfs_emit_at(buf, n, "modem_state = %d\t\t", p->modem_state); + n += sysfs_emit_at(buf, n, "guard_interval = %d\n", p->guard_interval); + n += sysfs_emit_at(buf, n, "system_type = %d\t\t", p->system_type); + n += sysfs_emit_at(buf, n, "partial_reception = %d\t", p->partial_reception); + n += sysfs_emit_at(buf, n, "num_of_layers = %d\n", p->num_of_layers); + n += sysfs_emit_at(buf, n, "segment_number = %d\t", p->segment_number); + n += sysfs_emit_at(buf, n, "tune_bw = %d\n", p->tune_bw); + + for (i = 0; i < 3; i++) { + if (p->layer_info[i].number_of_segments < 1 || + p->layer_info[i].number_of_segments > 13) + continue; + + n += sysfs_emit_at(buf, n, "\nLayer %d\n", i); + n += sysfs_emit_at(buf, n, "\tcode_rate = %d\t", p->layer_info[i].code_rate); + n += sysfs_emit_at(buf, n, "constellation = %d\n", p->layer_info[i].constellation); + n += sysfs_emit_at(buf, n, "\tber = %-5d\t", p->layer_info[i].ber); + n += sysfs_emit_at(buf, n, "\tber_error_count = %-5d\t", + p->layer_info[i].ber_error_count); + n += sysfs_emit_at(buf, n, "ber_bit_count = %-5d\n", + p->layer_info[i].ber_bit_count); + n += sysfs_emit_at(buf, n, "\tpre_ber = %-5d\t", p->layer_info[i].pre_ber); + n += sysfs_emit_at(buf, n, "\tts_per = %-5d\n", p->layer_info[i].ts_per); + n += sysfs_emit_at(buf, n, "\terror_ts_packets = %-5d\t", + p->layer_info[i].error_ts_packets); + n += sysfs_emit_at(buf, n, "total_ts_packets = %-5d\t", + p->layer_info[i].total_ts_packets); + n += sysfs_emit_at(buf, n, "ti_ldepth_i = %d\n", p->layer_info[i].ti_ldepth_i); + n += sysfs_emit_at(buf, n, "\tnumber_of_segments = %d\t", + p->layer_info[i].number_of_segments); + n += sysfs_emit_at(buf, n, "tmcc_errors = %d\n", p->layer_info[i].tmcc_errors); + } + + + debug_data->stats_count = n; + spin_unlock(&debug_data->lock); + + wake_up(&debug_data->stats_queue); +} + +static int smsdvb_stats_open(struct inode *inode, struct file *file) +{ + struct smsdvb_client_t *client = inode->i_private; + struct smsdvb_debugfs *debug_data = client->debug_data; + + kref_get(&debug_data->refcount); + + spin_lock(&debug_data->lock); + debug_data->stats_count = 0; + debug_data->stats_was_read = false; + spin_unlock(&debug_data->lock); + + file->private_data = debug_data; + + return 0; +} + +static void smsdvb_debugfs_data_release(struct kref *ref) +{ + struct smsdvb_debugfs *debug_data; + + debug_data = container_of(ref, struct smsdvb_debugfs, refcount); + kfree(debug_data); +} + +static int smsdvb_stats_wait_read(struct smsdvb_debugfs *debug_data) +{ + int rc = 1; + + spin_lock(&debug_data->lock); + + if (debug_data->stats_was_read) + goto exit; + + rc = debug_data->stats_count; + +exit: + spin_unlock(&debug_data->lock); + return rc; +} + +static __poll_t smsdvb_stats_poll(struct file *file, poll_table *wait) +{ + struct smsdvb_debugfs *debug_data = file->private_data; + int rc; + + kref_get(&debug_data->refcount); + + poll_wait(file, &debug_data->stats_queue, wait); + + rc = smsdvb_stats_wait_read(debug_data); + kref_put(&debug_data->refcount, smsdvb_debugfs_data_release); + + return rc > 0 ? EPOLLIN | EPOLLRDNORM : 0; +} + +static ssize_t smsdvb_stats_read(struct file *file, char __user *user_buf, + size_t nbytes, loff_t *ppos) +{ + int rc = 0, len; + struct smsdvb_debugfs *debug_data = file->private_data; + + kref_get(&debug_data->refcount); + + if (file->f_flags & O_NONBLOCK) { + rc = smsdvb_stats_wait_read(debug_data); + if (!rc) { + rc = -EWOULDBLOCK; + goto ret; + } + } else { + rc = wait_event_interruptible(debug_data->stats_queue, + smsdvb_stats_wait_read(debug_data)); + if (rc < 0) + goto ret; + } + + if (debug_data->stats_was_read) { + rc = 0; /* EOF */ + goto ret; + } + + len = debug_data->stats_count - *ppos; + if (len >= 0) + rc = simple_read_from_buffer(user_buf, nbytes, ppos, + debug_data->stats_data, len); + else + rc = 0; + + if (*ppos >= debug_data->stats_count) { + spin_lock(&debug_data->lock); + debug_data->stats_was_read = true; + spin_unlock(&debug_data->lock); + } +ret: + kref_put(&debug_data->refcount, smsdvb_debugfs_data_release); + return rc; +} + +static int smsdvb_stats_release(struct inode *inode, struct file *file) +{ + struct smsdvb_debugfs *debug_data = file->private_data; + + spin_lock(&debug_data->lock); + debug_data->stats_was_read = true; /* return EOF to read() */ + spin_unlock(&debug_data->lock); + wake_up_interruptible_sync(&debug_data->stats_queue); + + kref_put(&debug_data->refcount, smsdvb_debugfs_data_release); + file->private_data = NULL; + + return 0; +} + +static const struct file_operations debugfs_stats_ops = { + .open = smsdvb_stats_open, + .poll = smsdvb_stats_poll, + .read = smsdvb_stats_read, + .release = smsdvb_stats_release, + .llseek = generic_file_llseek, +}; + +/* + * Functions used by smsdvb, in order to create the interfaces + */ + +int smsdvb_debugfs_create(struct smsdvb_client_t *client) +{ + struct smscore_device_t *coredev = client->coredev; + struct dentry *d; + struct smsdvb_debugfs *debug_data; + + if (!smsdvb_debugfs_usb_root || !coredev->is_usb_device) + return -ENODEV; + + client->debugfs = debugfs_create_dir(coredev->devpath, + smsdvb_debugfs_usb_root); + if (IS_ERR_OR_NULL(client->debugfs)) { + pr_info("Unable to create debugfs %s directory.\n", + coredev->devpath); + return -ENODEV; + } + + d = debugfs_create_file("stats", S_IRUGO | S_IWUSR, client->debugfs, + client, &debugfs_stats_ops); + if (!d) { + debugfs_remove(client->debugfs); + return -ENOMEM; + } + + debug_data = kzalloc(sizeof(*client->debug_data), GFP_KERNEL); + if (!debug_data) + return -ENOMEM; + + client->debug_data = debug_data; + client->prt_dvb_stats = smsdvb_print_dvb_stats; + client->prt_isdb_stats = smsdvb_print_isdb_stats; + client->prt_isdb_stats_ex = smsdvb_print_isdb_stats_ex; + + init_waitqueue_head(&debug_data->stats_queue); + spin_lock_init(&debug_data->lock); + kref_init(&debug_data->refcount); + + return 0; +} + +void smsdvb_debugfs_release(struct smsdvb_client_t *client) +{ + if (!client->debugfs) + return; + + client->prt_dvb_stats = NULL; + client->prt_isdb_stats = NULL; + client->prt_isdb_stats_ex = NULL; + + debugfs_remove_recursive(client->debugfs); + kref_put(&client->debug_data->refcount, smsdvb_debugfs_data_release); + + client->debug_data = NULL; + client->debugfs = NULL; +} + +void smsdvb_debugfs_register(void) +{ + struct dentry *d; + + /* + * FIXME: This was written to debug Siano USB devices. So, it creates + * the debugfs node under /usb. + * A similar logic would be needed for Siano sdio devices, but, in that + * case, usb_debug_root is not a good choice. + * + * Perhaps the right fix here would be to create another sysfs root + * node for sdio-based boards, but this may need some logic at sdio + * subsystem. + */ + d = debugfs_create_dir("smsdvb", usb_debug_root); + if (IS_ERR_OR_NULL(d)) { + pr_err("Couldn't create sysfs node for smsdvb\n"); + return; + } + smsdvb_debugfs_usb_root = d; +} + +void smsdvb_debugfs_unregister(void) +{ + if (!smsdvb_debugfs_usb_root) + return; + debugfs_remove_recursive(smsdvb_debugfs_usb_root); + smsdvb_debugfs_usb_root = NULL; +} diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c new file mode 100644 index 0000000000..f80caaa333 --- /dev/null +++ b/drivers/media/common/siano/smsdvb-main.c @@ -0,0 +1,1271 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/**************************************************************** + +Siano Mobile Silicon, Inc. +MDTV receiver kernel modules. +Copyright (C) 2006-2008, Uri Shkolnik + + +****************************************************************/ + +#include "smscoreapi.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "sms-cards.h" + +#include "smsdvb.h" + +DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); + +static LIST_HEAD(g_smsdvb_clients); +static DEFINE_MUTEX(g_smsdvb_clientslock); + +static u32 sms_to_guard_interval_table[] = { + [0] = GUARD_INTERVAL_1_32, + [1] = GUARD_INTERVAL_1_16, + [2] = GUARD_INTERVAL_1_8, + [3] = GUARD_INTERVAL_1_4, +}; + +static u32 sms_to_code_rate_table[] = { + [0] = FEC_1_2, + [1] = FEC_2_3, + [2] = FEC_3_4, + [3] = FEC_5_6, + [4] = FEC_7_8, +}; + + +static u32 sms_to_hierarchy_table[] = { + [0] = HIERARCHY_NONE, + [1] = HIERARCHY_1, + [2] = HIERARCHY_2, + [3] = HIERARCHY_4, +}; + +static u32 sms_to_modulation_table[] = { + [0] = QPSK, + [1] = QAM_16, + [2] = QAM_64, + [3] = DQPSK, +}; + + +/* Events that may come from DVB v3 adapter */ +static void sms_board_dvb3_event(struct smsdvb_client_t *client, + enum SMS_DVB3_EVENTS event) { + + struct smscore_device_t *coredev = client->coredev; + switch (event) { + case DVB3_EVENT_INIT: + pr_debug("DVB3_EVENT_INIT\n"); + sms_board_event(coredev, BOARD_EVENT_BIND); + break; + case DVB3_EVENT_SLEEP: + pr_debug("DVB3_EVENT_SLEEP\n"); + sms_board_event(coredev, BOARD_EVENT_POWER_SUSPEND); + break; + case DVB3_EVENT_HOTPLUG: + pr_debug("DVB3_EVENT_HOTPLUG\n"); + sms_board_event(coredev, BOARD_EVENT_POWER_INIT); + break; + case DVB3_EVENT_FE_LOCK: + if (client->event_fe_state != DVB3_EVENT_FE_LOCK) { + client->event_fe_state = DVB3_EVENT_FE_LOCK; + pr_debug("DVB3_EVENT_FE_LOCK\n"); + sms_board_event(coredev, BOARD_EVENT_FE_LOCK); + } + break; + case DVB3_EVENT_FE_UNLOCK: + if (client->event_fe_state != DVB3_EVENT_FE_UNLOCK) { + client->event_fe_state = DVB3_EVENT_FE_UNLOCK; + pr_debug("DVB3_EVENT_FE_UNLOCK\n"); + sms_board_event(coredev, BOARD_EVENT_FE_UNLOCK); + } + break; + case DVB3_EVENT_UNC_OK: + if (client->event_unc_state != DVB3_EVENT_UNC_OK) { + client->event_unc_state = DVB3_EVENT_UNC_OK; + pr_debug("DVB3_EVENT_UNC_OK\n"); + sms_board_event(coredev, BOARD_EVENT_MULTIPLEX_OK); + } + break; + case DVB3_EVENT_UNC_ERR: + if (client->event_unc_state != DVB3_EVENT_UNC_ERR) { + client->event_unc_state = DVB3_EVENT_UNC_ERR; + pr_debug("DVB3_EVENT_UNC_ERR\n"); + sms_board_event(coredev, BOARD_EVENT_MULTIPLEX_ERRORS); + } + break; + + default: + pr_err("Unknown dvb3 api event\n"); + break; + } +} + +static void smsdvb_stats_not_ready(struct dvb_frontend *fe) +{ + struct smsdvb_client_t *client = + container_of(fe, struct smsdvb_client_t, frontend); + struct smscore_device_t *coredev = client->coredev; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + int i, n_layers; + + switch (smscore_get_device_mode(coredev)) { + case DEVICE_MODE_ISDBT: + case DEVICE_MODE_ISDBT_BDA: + n_layers = 4; + break; + default: + n_layers = 1; + } + + /* Global stats */ + c->strength.len = 1; + c->cnr.len = 1; + c->strength.stat[0].scale = FE_SCALE_DECIBEL; + c->cnr.stat[0].scale = FE_SCALE_DECIBEL; + + /* Per-layer stats */ + c->post_bit_error.len = n_layers; + c->post_bit_count.len = n_layers; + c->block_error.len = n_layers; + c->block_count.len = n_layers; + + /* + * Put all of them at FE_SCALE_NOT_AVAILABLE. They're dynamically + * changed when the stats become available. + */ + for (i = 0; i < n_layers; i++) { + c->post_bit_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE; + c->post_bit_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE; + c->block_error.stat[i].scale = FE_SCALE_NOT_AVAILABLE; + c->block_count.stat[i].scale = FE_SCALE_NOT_AVAILABLE; + } +} + +static inline int sms_to_mode(u32 mode) +{ + switch (mode) { + case 2: + return TRANSMISSION_MODE_2K; + case 4: + return TRANSMISSION_MODE_4K; + case 8: + return TRANSMISSION_MODE_8K; + } + return TRANSMISSION_MODE_AUTO; +} + +static inline int sms_to_isdbt_mode(u32 mode) +{ + switch (mode) { + case 1: + return TRANSMISSION_MODE_2K; + case 2: + return TRANSMISSION_MODE_4K; + case 3: + return TRANSMISSION_MODE_8K; + } + return TRANSMISSION_MODE_AUTO; +} + +static inline int sms_to_isdbt_guard_interval(u32 interval) +{ + switch (interval) { + case 4: + return GUARD_INTERVAL_1_4; + case 8: + return GUARD_INTERVAL_1_8; + case 16: + return GUARD_INTERVAL_1_16; + case 32: + return GUARD_INTERVAL_1_32; + } + return GUARD_INTERVAL_AUTO; +} + +static inline int sms_to_status(u32 is_demod_locked, u32 is_rf_locked) +{ + if (is_demod_locked) + return FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | + FE_HAS_SYNC | FE_HAS_LOCK; + + if (is_rf_locked) + return FE_HAS_SIGNAL | FE_HAS_CARRIER; + + return 0; +} + +static inline u32 sms_to_bw(u32 value) +{ + return value * 1000000; +} + +#define convert_from_table(value, table, defval) ({ \ + u32 __ret; \ + if (value < ARRAY_SIZE(table)) \ + __ret = table[value]; \ + else \ + __ret = defval; \ + __ret; \ +}) + +#define sms_to_guard_interval(value) \ + convert_from_table(value, sms_to_guard_interval_table, \ + GUARD_INTERVAL_AUTO); + +#define sms_to_code_rate(value) \ + convert_from_table(value, sms_to_code_rate_table, \ + FEC_NONE); + +#define sms_to_hierarchy(value) \ + convert_from_table(value, sms_to_hierarchy_table, \ + FEC_NONE); + +#define sms_to_modulation(value) \ + convert_from_table(value, sms_to_modulation_table, \ + FEC_NONE); + +static void smsdvb_update_tx_params(struct smsdvb_client_t *client, + struct sms_tx_stats *p) +{ + struct dvb_frontend *fe = &client->frontend; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + + c->frequency = p->frequency; + client->fe_status = sms_to_status(p->is_demod_locked, 0); + c->bandwidth_hz = sms_to_bw(p->bandwidth); + c->transmission_mode = sms_to_mode(p->transmission_mode); + c->guard_interval = sms_to_guard_interval(p->guard_interval); + c->code_rate_HP = sms_to_code_rate(p->code_rate); + c->code_rate_LP = sms_to_code_rate(p->lp_code_rate); + c->hierarchy = sms_to_hierarchy(p->hierarchy); + c->modulation = sms_to_modulation(p->constellation); +} + +static void smsdvb_update_per_slices(struct smsdvb_client_t *client, + struct RECEPTION_STATISTICS_PER_SLICES_S *p) +{ + struct dvb_frontend *fe = &client->frontend; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + u64 tmp; + + client->fe_status = sms_to_status(p->is_demod_locked, p->is_rf_locked); + c->modulation = sms_to_modulation(p->constellation); + + /* signal Strength, in DBm */ + c->strength.stat[0].uvalue = p->in_band_power * 1000; + + /* Carrier to noise ratio, in DB */ + c->cnr.stat[0].svalue = p->snr * 1000; + + /* PER/BER requires demod lock */ + if (!p->is_demod_locked) + return; + + /* TS PER */ + client->last_per = c->block_error.stat[0].uvalue; + c->block_error.stat[0].scale = FE_SCALE_COUNTER; + c->block_count.stat[0].scale = FE_SCALE_COUNTER; + c->block_error.stat[0].uvalue += p->ets_packets; + c->block_count.stat[0].uvalue += p->ets_packets + p->ts_packets; + + /* ber */ + c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; + c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; + c->post_bit_error.stat[0].uvalue += p->ber_error_count; + c->post_bit_count.stat[0].uvalue += p->ber_bit_count; + + /* Legacy PER/BER */ + tmp = p->ets_packets * 65535ULL; + if (p->ts_packets + p->ets_packets) + do_div(tmp, p->ts_packets + p->ets_packets); + client->legacy_per = tmp; +} + +static void smsdvb_update_dvb_stats(struct smsdvb_client_t *client, + struct sms_stats *p) +{ + struct dvb_frontend *fe = &client->frontend; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + + if (client->prt_dvb_stats) + client->prt_dvb_stats(client->debug_data, p); + + client->fe_status = sms_to_status(p->is_demod_locked, p->is_rf_locked); + + /* Update DVB modulation parameters */ + c->frequency = p->frequency; + client->fe_status = sms_to_status(p->is_demod_locked, 0); + c->bandwidth_hz = sms_to_bw(p->bandwidth); + c->transmission_mode = sms_to_mode(p->transmission_mode); + c->guard_interval = sms_to_guard_interval(p->guard_interval); + c->code_rate_HP = sms_to_code_rate(p->code_rate); + c->code_rate_LP = sms_to_code_rate(p->lp_code_rate); + c->hierarchy = sms_to_hierarchy(p->hierarchy); + c->modulation = sms_to_modulation(p->constellation); + + /* update reception data */ + c->lna = p->is_external_lna_on ? 1 : 0; + + /* Carrier to noise ratio, in DB */ + c->cnr.stat[0].svalue = p->SNR * 1000; + + /* signal Strength, in DBm */ + c->strength.stat[0].uvalue = p->in_band_pwr * 1000; + + /* PER/BER requires demod lock */ + if (!p->is_demod_locked) + return; + + /* TS PER */ + client->last_per = c->block_error.stat[0].uvalue; + c->block_error.stat[0].scale = FE_SCALE_COUNTER; + c->block_count.stat[0].scale = FE_SCALE_COUNTER; + c->block_error.stat[0].uvalue += p->error_ts_packets; + c->block_count.stat[0].uvalue += p->total_ts_packets; + + /* ber */ + c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; + c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; + c->post_bit_error.stat[0].uvalue += p->ber_error_count; + c->post_bit_count.stat[0].uvalue += p->ber_bit_count; + + /* Legacy PER/BER */ + client->legacy_ber = p->ber; +}; + +static void smsdvb_update_isdbt_stats(struct smsdvb_client_t *client, + struct sms_isdbt_stats *p) +{ + struct dvb_frontend *fe = &client->frontend; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct sms_isdbt_layer_stats *lr; + int i, n_layers; + + if (client->prt_isdb_stats) + client->prt_isdb_stats(client->debug_data, p); + + client->fe_status = sms_to_status(p->is_demod_locked, p->is_rf_locked); + + /* + * Firmware 2.1 seems to report only lock status and + * signal strength. The signal strength indicator is at the + * wrong field. + */ + if (p->statistics_type == 0) { + c->strength.stat[0].uvalue = ((s32)p->transmission_mode) * 1000; + c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; + return; + } + + /* Update ISDB-T transmission parameters */ + c->frequency = p->frequency; + c->bandwidth_hz = sms_to_bw(p->bandwidth); + c->transmission_mode = sms_to_isdbt_mode(p->transmission_mode); + c->guard_interval = sms_to_isdbt_guard_interval(p->guard_interval); + c->isdbt_partial_reception = p->partial_reception ? 1 : 0; + n_layers = p->num_of_layers; + if (n_layers < 1) + n_layers = 1; + if (n_layers > 3) + n_layers = 3; + c->isdbt_layer_enabled = 0; + + /* update reception data */ + c->lna = p->is_external_lna_on ? 1 : 0; + + /* Carrier to noise ratio, in DB */ + c->cnr.stat[0].svalue = p->SNR * 1000; + + /* signal Strength, in DBm */ + c->strength.stat[0].uvalue = p->in_band_pwr * 1000; + + /* PER/BER and per-layer stats require demod lock */ + if (!p->is_demod_locked) + return; + + client->last_per = c->block_error.stat[0].uvalue; + + /* Clears global counters, as the code below will sum it again */ + c->block_error.stat[0].uvalue = 0; + c->block_count.stat[0].uvalue = 0; + c->block_error.stat[0].scale = FE_SCALE_COUNTER; + c->block_count.stat[0].scale = FE_SCALE_COUNTER; + c->post_bit_error.stat[0].uvalue = 0; + c->post_bit_count.stat[0].uvalue = 0; + c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; + c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; + + for (i = 0; i < n_layers; i++) { + lr = &p->layer_info[i]; + + /* Update per-layer transmission parameters */ + if (lr->number_of_segments > 0 && lr->number_of_segments < 13) { + c->isdbt_layer_enabled |= 1 << i; + c->layer[i].segment_count = lr->number_of_segments; + } else { + continue; + } + c->layer[i].modulation = sms_to_modulation(lr->constellation); + c->layer[i].fec = sms_to_code_rate(lr->code_rate); + + /* Time interleaving */ + c->layer[i].interleaving = (u8)lr->ti_ldepth_i; + + /* TS PER */ + c->block_error.stat[i + 1].scale = FE_SCALE_COUNTER; + c->block_count.stat[i + 1].scale = FE_SCALE_COUNTER; + c->block_error.stat[i + 1].uvalue += lr->error_ts_packets; + c->block_count.stat[i + 1].uvalue += lr->total_ts_packets; + + /* Update global PER counter */ + c->block_error.stat[0].uvalue += lr->error_ts_packets; + c->block_count.stat[0].uvalue += lr->total_ts_packets; + + /* BER */ + c->post_bit_error.stat[i + 1].scale = FE_SCALE_COUNTER; + c->post_bit_count.stat[i + 1].scale = FE_SCALE_COUNTER; + c->post_bit_error.stat[i + 1].uvalue += lr->ber_error_count; + c->post_bit_count.stat[i + 1].uvalue += lr->ber_bit_count; + + /* Update global BER counter */ + c->post_bit_error.stat[0].uvalue += lr->ber_error_count; + c->post_bit_count.stat[0].uvalue += lr->ber_bit_count; + } +} + +static void smsdvb_update_isdbt_stats_ex(struct smsdvb_client_t *client, + struct sms_isdbt_stats_ex *p) +{ + struct dvb_frontend *fe = &client->frontend; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct sms_isdbt_layer_stats *lr; + int i, n_layers; + + if (client->prt_isdb_stats_ex) + client->prt_isdb_stats_ex(client->debug_data, p); + + /* Update ISDB-T transmission parameters */ + c->frequency = p->frequency; + client->fe_status = sms_to_status(p->is_demod_locked, 0); + c->bandwidth_hz = sms_to_bw(p->bandwidth); + c->transmission_mode = sms_to_isdbt_mode(p->transmission_mode); + c->guard_interval = sms_to_isdbt_guard_interval(p->guard_interval); + c->isdbt_partial_reception = p->partial_reception ? 1 : 0; + n_layers = p->num_of_layers; + if (n_layers < 1) + n_layers = 1; + if (n_layers > 3) + n_layers = 3; + c->isdbt_layer_enabled = 0; + + /* update reception data */ + c->lna = p->is_external_lna_on ? 1 : 0; + + /* Carrier to noise ratio, in DB */ + c->cnr.stat[0].svalue = p->SNR * 1000; + + /* signal Strength, in DBm */ + c->strength.stat[0].uvalue = p->in_band_pwr * 1000; + + /* PER/BER and per-layer stats require demod lock */ + if (!p->is_demod_locked) + return; + + client->last_per = c->block_error.stat[0].uvalue; + + /* Clears global counters, as the code below will sum it again */ + c->block_error.stat[0].uvalue = 0; + c->block_count.stat[0].uvalue = 0; + c->block_error.stat[0].scale = FE_SCALE_COUNTER; + c->block_count.stat[0].scale = FE_SCALE_COUNTER; + c->post_bit_error.stat[0].uvalue = 0; + c->post_bit_count.stat[0].uvalue = 0; + c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; + c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; + + c->post_bit_error.len = n_layers + 1; + c->post_bit_count.len = n_layers + 1; + c->block_error.len = n_layers + 1; + c->block_count.len = n_layers + 1; + for (i = 0; i < n_layers; i++) { + lr = &p->layer_info[i]; + + /* Update per-layer transmission parameters */ + if (lr->number_of_segments > 0 && lr->number_of_segments < 13) { + c->isdbt_layer_enabled |= 1 << i; + c->layer[i].segment_count = lr->number_of_segments; + } else { + continue; + } + c->layer[i].modulation = sms_to_modulation(lr->constellation); + c->layer[i].fec = sms_to_code_rate(lr->code_rate); + + /* Time interleaving */ + c->layer[i].interleaving = (u8)lr->ti_ldepth_i; + + /* TS PER */ + c->block_error.stat[i + 1].scale = FE_SCALE_COUNTER; + c->block_count.stat[i + 1].scale = FE_SCALE_COUNTER; + c->block_error.stat[i + 1].uvalue += lr->error_ts_packets; + c->block_count.stat[i + 1].uvalue += lr->total_ts_packets; + + /* Update global PER counter */ + c->block_error.stat[0].uvalue += lr->error_ts_packets; + c->block_count.stat[0].uvalue += lr->total_ts_packets; + + /* ber */ + c->post_bit_error.stat[i + 1].scale = FE_SCALE_COUNTER; + c->post_bit_count.stat[i + 1].scale = FE_SCALE_COUNTER; + c->post_bit_error.stat[i + 1].uvalue += lr->ber_error_count; + c->post_bit_count.stat[i + 1].uvalue += lr->ber_bit_count; + + /* Update global ber counter */ + c->post_bit_error.stat[0].uvalue += lr->ber_error_count; + c->post_bit_count.stat[0].uvalue += lr->ber_bit_count; + } +} + +static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb) +{ + struct smsdvb_client_t *client = (struct smsdvb_client_t *) context; + struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) (((u8 *) cb->p) + + cb->offset); + void *p = phdr + 1; + struct dvb_frontend *fe = &client->frontend; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + bool is_status_update = false; + + switch (phdr->msg_type) { + case MSG_SMS_DVBT_BDA_DATA: + /* + * Only feed data to dvb demux if are there any feed listening + * to it and if the device has tuned + */ + if (client->feed_users && client->has_tuned) + dvb_dmx_swfilter(&client->demux, p, + cb->size - sizeof(struct sms_msg_hdr)); + break; + + case MSG_SMS_RF_TUNE_RES: + case MSG_SMS_ISDBT_TUNE_RES: + complete(&client->tune_done); + break; + + case MSG_SMS_SIGNAL_DETECTED_IND: + client->fe_status = FE_HAS_SIGNAL | FE_HAS_CARRIER | + FE_HAS_VITERBI | FE_HAS_SYNC | + FE_HAS_LOCK; + + is_status_update = true; + break; + + case MSG_SMS_NO_SIGNAL_IND: + client->fe_status = 0; + + is_status_update = true; + break; + + case MSG_SMS_TRANSMISSION_IND: + smsdvb_update_tx_params(client, p); + + is_status_update = true; + break; + + case MSG_SMS_HO_PER_SLICES_IND: + smsdvb_update_per_slices(client, p); + + is_status_update = true; + break; + + case MSG_SMS_GET_STATISTICS_RES: + switch (smscore_get_device_mode(client->coredev)) { + case DEVICE_MODE_ISDBT: + case DEVICE_MODE_ISDBT_BDA: + smsdvb_update_isdbt_stats(client, p); + break; + default: + /* Skip sms_msg_statistics_info:request_result field */ + smsdvb_update_dvb_stats(client, p + sizeof(u32)); + } + + is_status_update = true; + break; + + /* Only for ISDB-T */ + case MSG_SMS_GET_STATISTICS_EX_RES: + /* Skip sms_msg_statistics_info:request_result field? */ + smsdvb_update_isdbt_stats_ex(client, p + sizeof(u32)); + is_status_update = true; + break; + default: + pr_debug("message not handled\n"); + } + smscore_putbuffer(client->coredev, cb); + + if (is_status_update) { + if (client->fe_status & FE_HAS_LOCK) { + sms_board_dvb3_event(client, DVB3_EVENT_FE_LOCK); + if (client->last_per == c->block_error.stat[0].uvalue) + sms_board_dvb3_event(client, DVB3_EVENT_UNC_OK); + else + sms_board_dvb3_event(client, DVB3_EVENT_UNC_ERR); + client->has_tuned = true; + } else { + smsdvb_stats_not_ready(fe); + client->has_tuned = false; + sms_board_dvb3_event(client, DVB3_EVENT_FE_UNLOCK); + } + complete(&client->stats_done); + } + + return 0; +} + +static void smsdvb_media_device_unregister(struct smsdvb_client_t *client) +{ +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + struct smscore_device_t *coredev = client->coredev; + + if (!coredev->media_dev) + return; + media_device_unregister(coredev->media_dev); + media_device_cleanup(coredev->media_dev); + kfree(coredev->media_dev); + coredev->media_dev = NULL; +#endif +} + +static void smsdvb_unregister_client(struct smsdvb_client_t *client) +{ + /* must be called under clientslock */ + + list_del(&client->entry); + + smsdvb_debugfs_release(client); + smscore_unregister_client(client->smsclient); + dvb_unregister_frontend(&client->frontend); + dvb_dmxdev_release(&client->dmxdev); + dvb_dmx_release(&client->demux); + smsdvb_media_device_unregister(client); + dvb_unregister_adapter(&client->adapter); + kfree(client); +} + +static void smsdvb_onremove(void *context) +{ + mutex_lock(&g_smsdvb_clientslock); + + smsdvb_unregister_client((struct smsdvb_client_t *) context); + + mutex_unlock(&g_smsdvb_clientslock); +} + +static int smsdvb_start_feed(struct dvb_demux_feed *feed) +{ + struct smsdvb_client_t *client = + container_of(feed->demux, struct smsdvb_client_t, demux); + struct sms_msg_data pid_msg; + + pr_debug("add pid %d(%x)\n", + feed->pid, feed->pid); + + client->feed_users++; + + pid_msg.x_msg_header.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + pid_msg.x_msg_header.msg_dst_id = HIF_TASK; + pid_msg.x_msg_header.msg_flags = 0; + pid_msg.x_msg_header.msg_type = MSG_SMS_ADD_PID_FILTER_REQ; + pid_msg.x_msg_header.msg_length = sizeof(pid_msg); + pid_msg.msg_data[0] = feed->pid; + + return smsclient_sendrequest(client->smsclient, + &pid_msg, sizeof(pid_msg)); +} + +static int smsdvb_stop_feed(struct dvb_demux_feed *feed) +{ + struct smsdvb_client_t *client = + container_of(feed->demux, struct smsdvb_client_t, demux); + struct sms_msg_data pid_msg; + + pr_debug("remove pid %d(%x)\n", + feed->pid, feed->pid); + + client->feed_users--; + + pid_msg.x_msg_header.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + pid_msg.x_msg_header.msg_dst_id = HIF_TASK; + pid_msg.x_msg_header.msg_flags = 0; + pid_msg.x_msg_header.msg_type = MSG_SMS_REMOVE_PID_FILTER_REQ; + pid_msg.x_msg_header.msg_length = sizeof(pid_msg); + pid_msg.msg_data[0] = feed->pid; + + return smsclient_sendrequest(client->smsclient, + &pid_msg, sizeof(pid_msg)); +} + +static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client, + void *buffer, size_t size, + struct completion *completion) +{ + int rc; + + rc = smsclient_sendrequest(client->smsclient, buffer, size); + if (rc < 0) + return rc; + + return wait_for_completion_timeout(completion, + msecs_to_jiffies(2000)) ? + 0 : -ETIME; +} + +static int smsdvb_send_statistics_request(struct smsdvb_client_t *client) +{ + int rc; + struct sms_msg_hdr msg; + + /* Don't request stats too fast */ + if (client->get_stats_jiffies && + (!time_after(jiffies, client->get_stats_jiffies))) + return 0; + client->get_stats_jiffies = jiffies + msecs_to_jiffies(100); + + msg.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + msg.msg_dst_id = HIF_TASK; + msg.msg_flags = 0; + msg.msg_length = sizeof(msg); + + switch (smscore_get_device_mode(client->coredev)) { + case DEVICE_MODE_ISDBT: + case DEVICE_MODE_ISDBT_BDA: + /* + * Check for firmware version, to avoid breaking for old cards + */ + if (client->coredev->fw_version >= 0x800) + msg.msg_type = MSG_SMS_GET_STATISTICS_EX_REQ; + else + msg.msg_type = MSG_SMS_GET_STATISTICS_REQ; + break; + default: + msg.msg_type = MSG_SMS_GET_STATISTICS_REQ; + } + + rc = smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), + &client->stats_done); + + return rc; +} + +static inline int led_feedback(struct smsdvb_client_t *client) +{ + if (!(client->fe_status & FE_HAS_LOCK)) + return sms_board_led_feedback(client->coredev, SMS_LED_OFF); + + return sms_board_led_feedback(client->coredev, + (client->legacy_ber == 0) ? + SMS_LED_HI : SMS_LED_LO); +} + +static int smsdvb_read_status(struct dvb_frontend *fe, enum fe_status *stat) +{ + int rc; + struct smsdvb_client_t *client; + client = container_of(fe, struct smsdvb_client_t, frontend); + + rc = smsdvb_send_statistics_request(client); + + *stat = client->fe_status; + + led_feedback(client); + + return rc; +} + +static int smsdvb_read_ber(struct dvb_frontend *fe, u32 *ber) +{ + int rc; + struct smsdvb_client_t *client; + + client = container_of(fe, struct smsdvb_client_t, frontend); + + rc = smsdvb_send_statistics_request(client); + + *ber = client->legacy_ber; + + led_feedback(client); + + return rc; +} + +static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength) +{ + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + int rc; + s32 power = (s32) c->strength.stat[0].uvalue; + struct smsdvb_client_t *client; + + client = container_of(fe, struct smsdvb_client_t, frontend); + + rc = smsdvb_send_statistics_request(client); + + if (power < -95) + *strength = 0; + else if (power > -29) + *strength = 65535; + else + *strength = (power + 95) * 65535 / 66; + + led_feedback(client); + + return rc; +} + +static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr) +{ + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + int rc; + struct smsdvb_client_t *client; + + client = container_of(fe, struct smsdvb_client_t, frontend); + + rc = smsdvb_send_statistics_request(client); + + /* Preferred scale for SNR with legacy API: 0.1 dB */ + *snr = ((u32)c->cnr.stat[0].svalue) / 100; + + led_feedback(client); + + return rc; +} + +static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) +{ + int rc; + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct smsdvb_client_t *client; + + client = container_of(fe, struct smsdvb_client_t, frontend); + + rc = smsdvb_send_statistics_request(client); + + *ucblocks = c->block_error.stat[0].uvalue; + + led_feedback(client); + + return rc; +} + +static int smsdvb_get_tune_settings(struct dvb_frontend *fe, + struct dvb_frontend_tune_settings *tune) +{ + pr_debug("\n"); + + tune->min_delay_ms = 400; + tune->step_size = 250000; + tune->max_drift = 0; + return 0; +} + +static int smsdvb_dvbt_set_frontend(struct dvb_frontend *fe) +{ + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct smsdvb_client_t *client = + container_of(fe, struct smsdvb_client_t, frontend); + + struct { + struct sms_msg_hdr msg; + u32 Data[3]; + } msg; + + int ret; + + client->fe_status = 0; + client->event_fe_state = -1; + client->event_unc_state = -1; + fe->dtv_property_cache.delivery_system = SYS_DVBT; + + msg.msg.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + msg.msg.msg_dst_id = HIF_TASK; + msg.msg.msg_flags = 0; + msg.msg.msg_type = MSG_SMS_RF_TUNE_REQ; + msg.msg.msg_length = sizeof(msg); + msg.Data[0] = c->frequency; + msg.Data[2] = 12000000; + + pr_debug("%s: freq %d band %d\n", __func__, c->frequency, + c->bandwidth_hz); + + switch (c->bandwidth_hz / 1000000) { + case 8: + msg.Data[1] = BW_8_MHZ; + break; + case 7: + msg.Data[1] = BW_7_MHZ; + break; + case 6: + msg.Data[1] = BW_6_MHZ; + break; + case 0: + return -EOPNOTSUPP; + default: + return -EINVAL; + } + /* Disable LNA, if any. An error is returned if no LNA is present */ + ret = sms_board_lna_control(client->coredev, 0); + if (ret == 0) { + enum fe_status status; + + /* tune with LNA off at first */ + ret = smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), + &client->tune_done); + + smsdvb_read_status(fe, &status); + + if (status & FE_HAS_LOCK) + return ret; + + /* previous tune didn't lock - enable LNA and tune again */ + sms_board_lna_control(client->coredev, 1); + } + + return smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), + &client->tune_done); +} + +static int smsdvb_isdbt_set_frontend(struct dvb_frontend *fe) +{ + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct smsdvb_client_t *client = + container_of(fe, struct smsdvb_client_t, frontend); + int board_id = smscore_get_board_id(client->coredev); + struct sms_board *board = sms_get_board(board_id); + enum sms_device_type_st type = board->type; + int ret; + + struct { + struct sms_msg_hdr msg; + u32 Data[4]; + } msg; + + fe->dtv_property_cache.delivery_system = SYS_ISDBT; + + msg.msg.msg_src_id = DVBT_BDA_CONTROL_MSG_ID; + msg.msg.msg_dst_id = HIF_TASK; + msg.msg.msg_flags = 0; + msg.msg.msg_type = MSG_SMS_ISDBT_TUNE_REQ; + msg.msg.msg_length = sizeof(msg); + + if (c->isdbt_sb_segment_idx == -1) + c->isdbt_sb_segment_idx = 0; + + if (!c->isdbt_layer_enabled) + c->isdbt_layer_enabled = 7; + + msg.Data[0] = c->frequency; + msg.Data[1] = BW_ISDBT_1SEG; + msg.Data[2] = 12000000; + msg.Data[3] = c->isdbt_sb_segment_idx; + + if (c->isdbt_partial_reception) { + if ((type == SMS_PELE || type == SMS_RIO) && + c->isdbt_sb_segment_count > 3) + msg.Data[1] = BW_ISDBT_13SEG; + else if (c->isdbt_sb_segment_count > 1) + msg.Data[1] = BW_ISDBT_3SEG; + } else if (type == SMS_PELE || type == SMS_RIO) + msg.Data[1] = BW_ISDBT_13SEG; + + c->bandwidth_hz = 6000000; + + pr_debug("freq %d segwidth %d segindex %d\n", + c->frequency, c->isdbt_sb_segment_count, + c->isdbt_sb_segment_idx); + + /* Disable LNA, if any. An error is returned if no LNA is present */ + ret = sms_board_lna_control(client->coredev, 0); + if (ret == 0) { + enum fe_status status; + + /* tune with LNA off at first */ + ret = smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), + &client->tune_done); + + smsdvb_read_status(fe, &status); + + if (status & FE_HAS_LOCK) + return ret; + + /* previous tune didn't lock - enable LNA and tune again */ + sms_board_lna_control(client->coredev, 1); + } + return smsdvb_sendrequest_and_wait(client, &msg, sizeof(msg), + &client->tune_done); +} + +static int smsdvb_set_frontend(struct dvb_frontend *fe) +{ + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + struct smsdvb_client_t *client = + container_of(fe, struct smsdvb_client_t, frontend); + struct smscore_device_t *coredev = client->coredev; + + smsdvb_stats_not_ready(fe); + c->strength.stat[0].uvalue = 0; + c->cnr.stat[0].uvalue = 0; + + client->has_tuned = false; + + switch (smscore_get_device_mode(coredev)) { + case DEVICE_MODE_DVBT: + case DEVICE_MODE_DVBT_BDA: + return smsdvb_dvbt_set_frontend(fe); + case DEVICE_MODE_ISDBT: + case DEVICE_MODE_ISDBT_BDA: + return smsdvb_isdbt_set_frontend(fe); + default: + return -EINVAL; + } +} + +static int smsdvb_init(struct dvb_frontend *fe) +{ + struct smsdvb_client_t *client = + container_of(fe, struct smsdvb_client_t, frontend); + + sms_board_power(client->coredev, 1); + + sms_board_dvb3_event(client, DVB3_EVENT_INIT); + return 0; +} + +static int smsdvb_sleep(struct dvb_frontend *fe) +{ + struct smsdvb_client_t *client = + container_of(fe, struct smsdvb_client_t, frontend); + + sms_board_led_feedback(client->coredev, SMS_LED_OFF); + sms_board_power(client->coredev, 0); + + sms_board_dvb3_event(client, DVB3_EVENT_SLEEP); + + return 0; +} + +static void smsdvb_release(struct dvb_frontend *fe) +{ + /* do nothing */ +} + +static const struct dvb_frontend_ops smsdvb_fe_ops = { + .info = { + .name = "Siano Mobile Digital MDTV Receiver", + .frequency_min_hz = 44250 * kHz, + .frequency_max_hz = 867250 * kHz, + .frequency_stepsize_hz = 250 * kHz, + .caps = FE_CAN_INVERSION_AUTO | + FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | + FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | + FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | + FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | + FE_CAN_GUARD_INTERVAL_AUTO | + FE_CAN_RECOVER | + FE_CAN_HIERARCHY_AUTO, + }, + + .release = smsdvb_release, + + .set_frontend = smsdvb_set_frontend, + .get_tune_settings = smsdvb_get_tune_settings, + + .read_status = smsdvb_read_status, + .read_ber = smsdvb_read_ber, + .read_signal_strength = smsdvb_read_signal_strength, + .read_snr = smsdvb_read_snr, + .read_ucblocks = smsdvb_read_ucblocks, + + .init = smsdvb_init, + .sleep = smsdvb_sleep, +}; + +static int smsdvb_hotplug(struct smscore_device_t *coredev, + struct device *device, int arrival) +{ + struct smsclient_params_t params; + struct smsdvb_client_t *client; + int rc; + + /* device removal handled by onremove callback */ + if (!arrival) + return 0; + client = kzalloc(sizeof(struct smsdvb_client_t), GFP_KERNEL); + if (!client) + return -ENOMEM; + + /* register dvb adapter */ + rc = dvb_register_adapter(&client->adapter, + sms_get_board( + smscore_get_board_id(coredev))->name, + THIS_MODULE, device, adapter_nr); + if (rc < 0) { + pr_err("dvb_register_adapter() failed %d\n", rc); + goto adapter_error; + } + dvb_register_media_controller(&client->adapter, coredev->media_dev); + + /* init dvb demux */ + client->demux.dmx.capabilities = DMX_TS_FILTERING; + client->demux.filternum = 32; /* todo: nova ??? */ + client->demux.feednum = 32; + client->demux.start_feed = smsdvb_start_feed; + client->demux.stop_feed = smsdvb_stop_feed; + + rc = dvb_dmx_init(&client->demux); + if (rc < 0) { + pr_err("dvb_dmx_init failed %d\n", rc); + goto dvbdmx_error; + } + + /* init dmxdev */ + client->dmxdev.filternum = 32; + client->dmxdev.demux = &client->demux.dmx; + client->dmxdev.capabilities = 0; + + rc = dvb_dmxdev_init(&client->dmxdev, &client->adapter); + if (rc < 0) { + pr_err("dvb_dmxdev_init failed %d\n", rc); + goto dmxdev_error; + } + + /* init and register frontend */ + memcpy(&client->frontend.ops, &smsdvb_fe_ops, + sizeof(struct dvb_frontend_ops)); + + switch (smscore_get_device_mode(coredev)) { + case DEVICE_MODE_DVBT: + case DEVICE_MODE_DVBT_BDA: + client->frontend.ops.delsys[0] = SYS_DVBT; + break; + case DEVICE_MODE_ISDBT: + case DEVICE_MODE_ISDBT_BDA: + client->frontend.ops.delsys[0] = SYS_ISDBT; + break; + } + + rc = dvb_register_frontend(&client->adapter, &client->frontend); + if (rc < 0) { + pr_err("frontend registration failed %d\n", rc); + goto frontend_error; + } + + params.initial_id = 1; + params.data_type = MSG_SMS_DVBT_BDA_DATA; + params.onresponse_handler = smsdvb_onresponse; + params.onremove_handler = smsdvb_onremove; + params.context = client; + + rc = smscore_register_client(coredev, ¶ms, &client->smsclient); + if (rc < 0) { + pr_err("smscore_register_client() failed %d\n", rc); + goto client_error; + } + + client->coredev = coredev; + + init_completion(&client->tune_done); + init_completion(&client->stats_done); + + mutex_lock(&g_smsdvb_clientslock); + + list_add(&client->entry, &g_smsdvb_clients); + + mutex_unlock(&g_smsdvb_clientslock); + + client->event_fe_state = -1; + client->event_unc_state = -1; + sms_board_dvb3_event(client, DVB3_EVENT_HOTPLUG); + + sms_board_setup(coredev); + + if (smsdvb_debugfs_create(client) < 0) + pr_info("failed to create debugfs node\n"); + + rc = dvb_create_media_graph(&client->adapter, true); + if (rc < 0) { + pr_err("dvb_create_media_graph failed %d\n", rc); + goto media_graph_error; + } + + pr_info("DVB interface registered.\n"); + return 0; + +media_graph_error: + mutex_lock(&g_smsdvb_clientslock); + list_del(&client->entry); + mutex_unlock(&g_smsdvb_clientslock); + + smsdvb_debugfs_release(client); + +client_error: + dvb_unregister_frontend(&client->frontend); + +frontend_error: + dvb_dmxdev_release(&client->dmxdev); + +dmxdev_error: + dvb_dmx_release(&client->demux); + +dvbdmx_error: + smsdvb_media_device_unregister(client); + dvb_unregister_adapter(&client->adapter); + +adapter_error: + kfree(client); + return rc; +} + +static int __init smsdvb_module_init(void) +{ + int rc; + + smsdvb_debugfs_register(); + + rc = smscore_register_hotplug(smsdvb_hotplug); + + pr_debug("\n"); + + return rc; +} + +static void __exit smsdvb_module_exit(void) +{ + smscore_unregister_hotplug(smsdvb_hotplug); + + mutex_lock(&g_smsdvb_clientslock); + + while (!list_empty(&g_smsdvb_clients)) + smsdvb_unregister_client((struct smsdvb_client_t *)g_smsdvb_clients.next); + + smsdvb_debugfs_unregister(); + + mutex_unlock(&g_smsdvb_clientslock); +} + +module_init(smsdvb_module_init); +module_exit(smsdvb_module_exit); + +MODULE_DESCRIPTION("SMS DVB subsystem adaptation module"); +MODULE_AUTHOR("Siano Mobile Silicon, Inc. (uris@siano-ms.com)"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h new file mode 100644 index 0000000000..b43cbb5c0d --- /dev/null +++ b/drivers/media/common/siano/smsdvb.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/*********************************************************************** + * + ***********************************************************************/ + +struct smsdvb_debugfs; +struct smsdvb_client_t; + +typedef void (*sms_prt_dvb_stats_t)(struct smsdvb_debugfs *debug_data, + struct sms_stats *p); + +typedef void (*sms_prt_isdb_stats_t)(struct smsdvb_debugfs *debug_data, + struct sms_isdbt_stats *p); + +typedef void (*sms_prt_isdb_stats_ex_t) + (struct smsdvb_debugfs *debug_data, + struct sms_isdbt_stats_ex *p); + + +struct smsdvb_client_t { + struct list_head entry; + + struct smscore_device_t *coredev; + struct smscore_client_t *smsclient; + + struct dvb_adapter adapter; + struct dvb_demux demux; + struct dmxdev dmxdev; + struct dvb_frontend frontend; + + enum fe_status fe_status; + + struct completion tune_done; + struct completion stats_done; + + int last_per; + + int legacy_ber, legacy_per; + + int event_fe_state; + int event_unc_state; + + unsigned long get_stats_jiffies; + + int feed_users; + bool has_tuned; + + /* stats debugfs data */ + struct dentry *debugfs; + + struct smsdvb_debugfs *debug_data; + + sms_prt_dvb_stats_t prt_dvb_stats; + sms_prt_isdb_stats_t prt_isdb_stats; + sms_prt_isdb_stats_ex_t prt_isdb_stats_ex; +}; + +/* + * This struct is a mix of struct sms_rx_stats_ex and + * struct sms_srvm_signal_status. + * It was obtained by comparing the way it was filled by the original code + */ +struct RECEPTION_STATISTICS_PER_SLICES_S { + u32 result; + u32 snr; + s32 in_band_power; + u32 ts_packets; + u32 ets_packets; + u32 constellation; + u32 hp_code; + u32 tps_srv_ind_lp; + u32 tps_srv_ind_hp; + u32 cell_id; + u32 reason; + u32 request_id; + u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */ + + u32 ber; /* Post Viterbi BER [1E-5] */ + s32 RSSI; /* dBm */ + s32 carrier_offset; /* Carrier Offset in bin/1024 */ + + u32 is_rf_locked; /* 0 - not locked, 1 - locked */ + u32 is_demod_locked; /* 0 - not locked, 1 - locked */ + + u32 ber_bit_count; /* Total number of SYNC bits. */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ + + s32 MRC_SNR; /* dB */ + s32 mrc_in_band_pwr; /* In band power in dBM */ + s32 MRC_RSSI; /* dBm */ +}; + +/* From smsdvb-debugfs.c */ +#ifdef CONFIG_SMS_SIANO_DEBUGFS + +int smsdvb_debugfs_create(struct smsdvb_client_t *client); +void smsdvb_debugfs_release(struct smsdvb_client_t *client); +void smsdvb_debugfs_register(void); +void smsdvb_debugfs_unregister(void); + +#else + +static inline int smsdvb_debugfs_create(struct smsdvb_client_t *client) +{ + return 0; +} + +static inline void smsdvb_debugfs_release(struct smsdvb_client_t *client) {} + +static inline void smsdvb_debugfs_register(void) {} + +static inline void smsdvb_debugfs_unregister(void) {}; + +#endif + diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c new file mode 100644 index 0000000000..a357381491 --- /dev/null +++ b/drivers/media/common/siano/smsendian.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/**************************************************************** + + Siano Mobile Silicon, Inc. + MDTV receiver kernel modules. + Copyright (C) 2006-2009, Uri Shkolnik + + + ****************************************************************/ + +#include +#include + +#include "smsendian.h" +#include "smscoreapi.h" + +void smsendian_handle_tx_message(void *buffer) +{ +#ifdef __BIG_ENDIAN + struct sms_msg_data *msg = buffer; + int i; + int msg_words; + + switch (msg->x_msg_header.msg_type) { + case MSG_SMS_DATA_DOWNLOAD_REQ: + { + msg->msg_data[0] = le32_to_cpu((__force __le32)(msg->msg_data[0])); + break; + } + + default: + msg_words = (msg->x_msg_header.msg_length - + sizeof(struct sms_msg_hdr))/4; + + for (i = 0; i < msg_words; i++) + msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); + + break; + } +#endif /* __BIG_ENDIAN */ +} +EXPORT_SYMBOL_GPL(smsendian_handle_tx_message); + +void smsendian_handle_rx_message(void *buffer) +{ +#ifdef __BIG_ENDIAN + struct sms_msg_data *msg = (struct sms_msg_data *)buffer; + int i; + int msg_words; + + switch (msg->x_msg_header.msg_type) { + case MSG_SMS_GET_VERSION_EX_RES: + { + struct sms_version_res *ver = + (struct sms_version_res *) msg; + ver->chip_model = le16_to_cpu((__force __le16)ver->chip_model); + break; + } + + case MSG_SMS_DVBT_BDA_DATA: + case MSG_SMS_DAB_CHANNEL: + case MSG_SMS_DATA_MSG: + { + break; + } + + default: + { + msg_words = (msg->x_msg_header.msg_length - + sizeof(struct sms_msg_hdr))/4; + + for (i = 0; i < msg_words; i++) + msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); + + break; + } + } +#endif /* __BIG_ENDIAN */ +} +EXPORT_SYMBOL_GPL(smsendian_handle_rx_message); + +void smsendian_handle_message_header(void *msg) +{ +#ifdef __BIG_ENDIAN + struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg; + + phdr->msg_type = le16_to_cpu((__force __le16)phdr->msg_type); + phdr->msg_length = le16_to_cpu((__force __le16)phdr->msg_length); + phdr->msg_flags = le16_to_cpu((__force __le16)phdr->msg_flags); +#endif /* __BIG_ENDIAN */ +} +EXPORT_SYMBOL_GPL(smsendian_handle_message_header); diff --git a/drivers/media/common/siano/smsendian.h b/drivers/media/common/siano/smsendian.h new file mode 100644 index 0000000000..f64215c47a --- /dev/null +++ b/drivers/media/common/siano/smsendian.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/**************************************************************** + +Siano Mobile Silicon, Inc. +MDTV receiver kernel modules. +Copyright (C) 2006-2009, Uri Shkolnik + + +****************************************************************/ + +#ifndef __SMS_ENDIAN_H__ +#define __SMS_ENDIAN_H__ + +#include + +extern void smsendian_handle_tx_message(void *buffer); +extern void smsendian_handle_rx_message(void *buffer); +extern void smsendian_handle_message_header(void *msg); + +#endif /* __SMS_ENDIAN_H__ */ + diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c new file mode 100644 index 0000000000..d85c78c104 --- /dev/null +++ b/drivers/media/common/siano/smsir.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Siano Mobile Silicon, Inc. +// MDTV receiver kernel modules. +// Copyright (C) 2006-2009, Uri Shkolnik +// +// Copyright (c) 2010 - Mauro Carvalho Chehab +// - Ported the driver to use rc-core +// - IR raw event decoding is now done at rc-core +// - Code almost re-written + + +#include "smscoreapi.h" + +#include +#include + +#include "smsir.h" +#include "sms-cards.h" + +#define MODULE_NAME "smsmdtv" + +void sms_ir_event(struct smscore_device_t *coredev, const char *buf, int len) +{ + int i; + const s32 *samples = (const void *)buf; + + for (i = 0; i < len >> 2; i++) { + struct ir_raw_event ev = { + .duration = abs(samples[i]), + .pulse = (samples[i] > 0) ? false : true + }; + + ir_raw_event_store(coredev->ir.dev, &ev); + } + ir_raw_event_handle(coredev->ir.dev); +} + +int sms_ir_init(struct smscore_device_t *coredev) +{ + int err; + int board_id = smscore_get_board_id(coredev); + struct rc_dev *dev; + + pr_debug("Allocating rc device\n"); + dev = rc_allocate_device(RC_DRIVER_IR_RAW); + if (!dev) + return -ENOMEM; + + coredev->ir.controller = 0; /* Todo: vega/nova SPI number */ + coredev->ir.timeout = US_TO_NS(IR_DEFAULT_TIMEOUT); + pr_debug("IR port %d, timeout %d ms\n", + coredev->ir.controller, coredev->ir.timeout); + + snprintf(coredev->ir.name, sizeof(coredev->ir.name), + "SMS IR (%s)", sms_get_board(board_id)->name); + + strscpy(coredev->ir.phys, coredev->devpath, sizeof(coredev->ir.phys)); + strlcat(coredev->ir.phys, "/ir0", sizeof(coredev->ir.phys)); + + dev->device_name = coredev->ir.name; + dev->input_phys = coredev->ir.phys; + dev->dev.parent = coredev->device; + +#if 0 + /* TODO: properly initialize the parameters below */ + dev->input_id.bustype = BUS_USB; + dev->input_id.version = 1; + dev->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); + dev->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct); +#endif + + dev->priv = coredev; + dev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; + dev->map_name = sms_get_board(board_id)->rc_codes; + dev->driver_name = MODULE_NAME; + + pr_debug("Input device (IR) %s is set for key events\n", + dev->device_name); + + err = rc_register_device(dev); + if (err < 0) { + pr_err("Failed to register device\n"); + rc_free_device(dev); + return err; + } + + coredev->ir.dev = dev; + return 0; +} + +void sms_ir_exit(struct smscore_device_t *coredev) +{ + rc_unregister_device(coredev->ir.dev); + + pr_debug("\n"); +} diff --git a/drivers/media/common/siano/smsir.h b/drivers/media/common/siano/smsir.h new file mode 100644 index 0000000000..ada41d5c4e --- /dev/null +++ b/drivers/media/common/siano/smsir.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * + * Siano Mobile Silicon, Inc. + * MDTV receiver kernel modules. + * Copyright (C) 2006-2009, Uri Shkolnik + * + * Copyright (c) 2010 - Mauro Carvalho Chehab + * - Ported the driver to use rc-core + * - IR raw event decoding is now done at rc-core + * - Code almost re-written + */ + +#ifndef __SMS_IR_H__ +#define __SMS_IR_H__ + +#include +#include + +struct smscore_device_t; + +struct ir_t { + struct rc_dev *dev; + char name[40]; + char phys[32]; + + char *rc_codes; + + u32 timeout; + u32 controller; +}; + +#ifdef CONFIG_SMS_SIANO_RC +int sms_ir_init(struct smscore_device_t *coredev); +void sms_ir_exit(struct smscore_device_t *coredev); +void sms_ir_event(struct smscore_device_t *coredev, + const char *buf, int len); +#else +inline static int sms_ir_init(struct smscore_device_t *coredev) { + return 0; +} +inline static void sms_ir_exit(struct smscore_device_t *coredev) {}; +inline static void sms_ir_event(struct smscore_device_t *coredev, + const char *buf, int len) {}; +#endif + +#endif /* __SMS_IR_H__ */ + diff --git a/drivers/media/common/ttpci-eeprom.c b/drivers/media/common/ttpci-eeprom.c new file mode 100644 index 0000000000..ef8746684d --- /dev/null +++ b/drivers/media/common/ttpci-eeprom.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + Retrieve encoded MAC address from 24C16 serial 2-wire EEPROM, + decode it and store it in the associated adapter struct for + use by dvb_net.c + + This card appear to have the 24C16 write protect held to ground, + thus permitting normal read/write operation. Theoretically it + would be possible to write routines to burn a different (encoded) + MAC address into the EEPROM. + + Robert Schlabbach GMX + Michael Glaum KVH Industries + Holger Waechtler Convergence + + Copyright (C) 2002-2003 Ralph Metzler + Metzler Brothers Systementwicklung GbR + + +*/ + +#include +#include +#include +#include +#include +#include + +#include "ttpci-eeprom.h" + +#if 1 +#define dprintk(x...) do { printk(x); } while (0) +#else +#define dprintk(x...) do { } while (0) +#endif + + +static int check_mac_tt(u8 *buf) +{ + int i; + u16 tmp = 0xffff; + + for (i = 0; i < 8; i++) { + tmp = (tmp << 8) | ((tmp >> 8) ^ buf[i]); + tmp ^= (tmp >> 4) & 0x0f; + tmp ^= (tmp << 12) ^ ((tmp & 0xff) << 5); + } + tmp ^= 0xffff; + return (((tmp >> 8) ^ buf[8]) | ((tmp & 0xff) ^ buf[9])); +} + +static int getmac_tt(u8 * decodedMAC, u8 * encodedMAC) +{ + u8 xor[20] = { 0x72, 0x23, 0x68, 0x19, 0x5c, 0xa8, 0x71, 0x2c, + 0x54, 0xd3, 0x7b, 0xf1, 0x9E, 0x23, 0x16, 0xf6, + 0x1d, 0x36, 0x64, 0x78}; + u8 data[20]; + int i; + + /* In case there is a sig check failure have the orig contents available */ + memcpy(data, encodedMAC, 20); + + for (i = 0; i < 20; i++) + data[i] ^= xor[i]; + for (i = 0; i < 10; i++) + data[i] = ((data[2 * i + 1] << 8) | data[2 * i]) + >> ((data[2 * i + 1] >> 6) & 3); + + if (check_mac_tt(data)) + return -ENODEV; + + decodedMAC[0] = data[2]; decodedMAC[1] = data[1]; decodedMAC[2] = data[0]; + decodedMAC[3] = data[6]; decodedMAC[4] = data[5]; decodedMAC[5] = data[4]; + return 0; +} + +int ttpci_eeprom_decode_mac(u8 *decodedMAC, u8 *encodedMAC) +{ + u8 xor[20] = { 0x72, 0x23, 0x68, 0x19, 0x5c, 0xa8, 0x71, 0x2c, + 0x54, 0xd3, 0x7b, 0xf1, 0x9E, 0x23, 0x16, 0xf6, + 0x1d, 0x36, 0x64, 0x78}; + u8 data[20]; + int i; + + memcpy(data, encodedMAC, 20); + + for (i = 0; i < 20; i++) + data[i] ^= xor[i]; + for (i = 0; i < 10; i++) + data[i] = ((data[2 * i + 1] << 8) | data[2 * i]) + >> ((data[2 * i + 1] >> 6) & 3); + + if (check_mac_tt(data)) + return -ENODEV; + + decodedMAC[0] = data[2]; + decodedMAC[1] = data[1]; + decodedMAC[2] = data[0]; + decodedMAC[3] = data[6]; + decodedMAC[4] = data[5]; + decodedMAC[5] = data[4]; + return 0; +} +EXPORT_SYMBOL(ttpci_eeprom_decode_mac); + +static int ttpci_eeprom_read_encodedMAC(struct i2c_adapter *adapter, u8 * encodedMAC) +{ + int ret; + u8 b0[] = { 0xcc }; + + struct i2c_msg msg[] = { + { .addr = 0x50, .flags = 0, .buf = b0, .len = 1 }, + { .addr = 0x50, .flags = I2C_M_RD, .buf = encodedMAC, .len = 20 } + }; + + /* dprintk("%s\n", __func__); */ + + ret = i2c_transfer(adapter, msg, 2); + + if (ret != 2) /* Assume EEPROM isn't there */ + return (-ENODEV); + + return 0; +} + + +int ttpci_eeprom_parse_mac(struct i2c_adapter *adapter, u8 *proposed_mac) +{ + int ret; + u8 encodedMAC[20]; + u8 decodedMAC[6]; + + ret = ttpci_eeprom_read_encodedMAC(adapter, encodedMAC); + + if (ret != 0) { /* Will only be -ENODEV */ + dprintk("Couldn't read from EEPROM: not there?\n"); + eth_zero_addr(proposed_mac); + return ret; + } + + ret = getmac_tt(decodedMAC, encodedMAC); + if( ret != 0 ) { + dprintk("adapter failed MAC signature check\n"); + dprintk("encoded MAC from EEPROM was %*phC", + (int)sizeof(encodedMAC), &encodedMAC); + eth_zero_addr(proposed_mac); + return ret; + } + + memcpy(proposed_mac, decodedMAC, 6); + dprintk("adapter has MAC addr = %pM\n", decodedMAC); + return 0; +} + +EXPORT_SYMBOL(ttpci_eeprom_parse_mac); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, others"); +MODULE_DESCRIPTION("Decode dvb_net MAC address from EEPROM of PCI DVB cards made by Siemens, Technotrend, Hauppauge"); diff --git a/drivers/media/common/ttpci-eeprom.h b/drivers/media/common/ttpci-eeprom.h new file mode 100644 index 0000000000..ee741867ba --- /dev/null +++ b/drivers/media/common/ttpci-eeprom.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + Retrieve encoded MAC address from ATMEL ttpci_eeprom serial 2-wire EEPROM, + decode it and store it in associated adapter net device + + Robert Schlabbach GMX + Michael Glaum KVH Industries + Holger Waechtler Convergence + + +*/ + +#ifndef __TTPCI_EEPROM_H__ +#define __TTPCI_EEPROM_H__ + +#include +#include + +extern int ttpci_eeprom_decode_mac(u8 *decodedMAC, u8 *encodedMAC); +extern int ttpci_eeprom_parse_mac(struct i2c_adapter *adapter, u8 *propsed_mac); + +#endif diff --git a/drivers/media/common/tveeprom.c b/drivers/media/common/tveeprom.c new file mode 100644 index 0000000000..b5b9d6de6d --- /dev/null +++ b/drivers/media/common/tveeprom.c @@ -0,0 +1,759 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * tveeprom - eeprom decoder for tvcard configuration eeproms + * + * Data and decoding routines shamelessly borrowed from bttv-cards.c + * eeprom access routine shamelessly borrowed from bttv-if.c + * which are: + + Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) + & Marcus Metzler (mocm@thp.uni-koeln.de) + (c) 1999-2001 Gerd Knorr + + * Adjustments to fit a more general model and all bugs: + + Copyright (C) 2003 John Klar + + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +MODULE_DESCRIPTION("i2c Hauppauge eeprom decoder driver"); +MODULE_AUTHOR("John Klar"); +MODULE_LICENSE("GPL"); + +#define STRM(array, i) \ + (i < sizeof(array) / sizeof(char *) ? array[i] : "unknown") + + +/* + * The Hauppauge eeprom uses an 8bit field to determine which + * tuner formats the tuner supports. + */ +static const struct { + int id; + const char * const name; +} hauppauge_tuner_fmt[] = { + { V4L2_STD_UNKNOWN, " UNKNOWN" }, + { V4L2_STD_UNKNOWN, " FM" }, + { V4L2_STD_B|V4L2_STD_GH, " PAL(B/G)" }, + { V4L2_STD_MN, " NTSC(M)" }, + { V4L2_STD_PAL_I, " PAL(I)" }, + { V4L2_STD_SECAM_L|V4L2_STD_SECAM_LC, " SECAM(L/L')" }, + { V4L2_STD_DK, " PAL(D/D1/K)" }, + { V4L2_STD_ATSC, " ATSC/DVB Digital" }, +}; + +/* This is the full list of possible tuners. Many thanks to Hauppauge for + supplying this information. Note that many tuners where only used for + testing and never made it to the outside world. So you will only see + a subset in actual produced cards. */ +static const struct { + int id; + const char * const name; +} hauppauge_tuner[] = { + /* 0-9 */ + { TUNER_ABSENT, "None" }, + { TUNER_ABSENT, "External" }, + { TUNER_ABSENT, "Unspecified" }, + { TUNER_PHILIPS_PAL, "Philips FI1216" }, + { TUNER_PHILIPS_SECAM, "Philips FI1216MF" }, + { TUNER_PHILIPS_NTSC, "Philips FI1236" }, + { TUNER_PHILIPS_PAL_I, "Philips FI1246" }, + { TUNER_PHILIPS_PAL_DK, "Philips FI1256" }, + { TUNER_PHILIPS_PAL, "Philips FI1216 MK2" }, + { TUNER_PHILIPS_SECAM, "Philips FI1216MF MK2" }, + /* 10-19 */ + { TUNER_PHILIPS_NTSC, "Philips FI1236 MK2" }, + { TUNER_PHILIPS_PAL_I, "Philips FI1246 MK2" }, + { TUNER_PHILIPS_PAL_DK, "Philips FI1256 MK2" }, + { TUNER_TEMIC_NTSC, "Temic 4032FY5" }, + { TUNER_TEMIC_PAL, "Temic 4002FH5" }, + { TUNER_TEMIC_PAL_I, "Temic 4062FY5" }, + { TUNER_PHILIPS_PAL, "Philips FR1216 MK2" }, + { TUNER_PHILIPS_SECAM, "Philips FR1216MF MK2" }, + { TUNER_PHILIPS_NTSC, "Philips FR1236 MK2" }, + { TUNER_PHILIPS_PAL_I, "Philips FR1246 MK2" }, + /* 20-29 */ + { TUNER_PHILIPS_PAL_DK, "Philips FR1256 MK2" }, + { TUNER_PHILIPS_PAL, "Philips FM1216" }, + { TUNER_PHILIPS_SECAM, "Philips FM1216MF" }, + { TUNER_PHILIPS_NTSC, "Philips FM1236" }, + { TUNER_PHILIPS_PAL_I, "Philips FM1246" }, + { TUNER_PHILIPS_PAL_DK, "Philips FM1256" }, + { TUNER_TEMIC_4036FY5_NTSC, "Temic 4036FY5" }, + { TUNER_ABSENT, "Samsung TCPN9082D" }, + { TUNER_ABSENT, "Samsung TCPM9092P" }, + { TUNER_TEMIC_4006FH5_PAL, "Temic 4006FH5" }, + /* 30-39 */ + { TUNER_ABSENT, "Samsung TCPN9085D" }, + { TUNER_ABSENT, "Samsung TCPB9085P" }, + { TUNER_ABSENT, "Samsung TCPL9091P" }, + { TUNER_TEMIC_4039FR5_NTSC, "Temic 4039FR5" }, + { TUNER_PHILIPS_FQ1216ME, "Philips FQ1216 ME" }, + { TUNER_TEMIC_4066FY5_PAL_I, "Temic 4066FY5" }, + { TUNER_PHILIPS_NTSC, "Philips TD1536" }, + { TUNER_PHILIPS_NTSC, "Philips TD1536D" }, + { TUNER_PHILIPS_NTSC, "Philips FMR1236" }, /* mono radio */ + { TUNER_ABSENT, "Philips FI1256MP" }, + /* 40-49 */ + { TUNER_ABSENT, "Samsung TCPQ9091P" }, + { TUNER_TEMIC_4006FN5_MULTI_PAL,"Temic 4006FN5" }, + { TUNER_TEMIC_4009FR5_PAL, "Temic 4009FR5" }, + { TUNER_TEMIC_4046FM5, "Temic 4046FM5" }, + { TUNER_TEMIC_4009FN5_MULTI_PAL_FM, "Temic 4009FN5" }, + { TUNER_ABSENT, "Philips TD1536D FH 44"}, + { TUNER_LG_NTSC_FM, "LG TP18NSR01F"}, + { TUNER_LG_PAL_FM, "LG TP18PSB01D"}, + { TUNER_LG_PAL, "LG TP18PSB11D"}, + { TUNER_LG_PAL_I_FM, "LG TAPC-I001D"}, + /* 50-59 */ + { TUNER_LG_PAL_I, "LG TAPC-I701D"}, + { TUNER_ABSENT, "Temic 4042FI5"}, + { TUNER_MICROTUNE_4049FM5, "Microtune 4049 FM5"}, + { TUNER_ABSENT, "LG TPI8NSR11F"}, + { TUNER_ABSENT, "Microtune 4049 FM5 Alt I2C"}, + { TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216ME MK3"}, + { TUNER_ABSENT, "Philips FI1236 MK3"}, + { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216 ME MK3"}, + { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK3"}, + { TUNER_ABSENT, "Philips FM1216MP MK3"}, + /* 60-69 */ + { TUNER_PHILIPS_FM1216ME_MK3, "LG S001D MK3"}, + { TUNER_ABSENT, "LG M001D MK3"}, + { TUNER_PHILIPS_FM1216ME_MK3, "LG S701D MK3"}, + { TUNER_ABSENT, "LG M701D MK3"}, + { TUNER_ABSENT, "Temic 4146FM5"}, + { TUNER_ABSENT, "Temic 4136FY5"}, + { TUNER_ABSENT, "Temic 4106FH5"}, + { TUNER_ABSENT, "Philips FQ1216LMP MK3"}, + { TUNER_LG_NTSC_TAPE, "LG TAPE H001F MK3"}, + { TUNER_LG_NTSC_TAPE, "LG TAPE H701F MK3"}, + /* 70-79 */ + { TUNER_ABSENT, "LG TALN H200T"}, + { TUNER_ABSENT, "LG TALN H250T"}, + { TUNER_ABSENT, "LG TALN M200T"}, + { TUNER_ABSENT, "LG TALN Z200T"}, + { TUNER_ABSENT, "LG TALN S200T"}, + { TUNER_ABSENT, "Thompson DTT7595"}, + { TUNER_ABSENT, "Thompson DTT7592"}, + { TUNER_ABSENT, "Silicon TDA8275C1 8290"}, + { TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"}, + { TUNER_ABSENT, "Thompson DTT757"}, + /* 80-89 */ + { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK3"}, + { TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"}, + { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"}, + { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"}, + { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MI 3"}, + { TUNER_TCL_2002N, "TCL 2002N 6A"}, + { TUNER_PHILIPS_FM1236_MK3, "Philips FQ1236 MK3"}, + { TUNER_SAMSUNG_TCPN_2121P30A, "Samsung TCPN 2121P30A"}, + { TUNER_ABSENT, "Samsung TCPE 4121P30A"}, + { TUNER_PHILIPS_FM1216ME_MK3, "TCL MFPE05 2"}, + /* 90-99 */ + { TUNER_ABSENT, "LG TALN H202T"}, + { TUNER_PHILIPS_FQ1216AME_MK4, "Philips FQ1216AME MK4"}, + { TUNER_PHILIPS_FQ1236A_MK4, "Philips FQ1236A MK4"}, + { TUNER_ABSENT, "Philips FQ1286A MK4"}, + { TUNER_ABSENT, "Philips FQ1216ME MK5"}, + { TUNER_ABSENT, "Philips FQ1236 MK5"}, + { TUNER_SAMSUNG_TCPG_6121P30A, "Samsung TCPG 6121P30A"}, + { TUNER_TCL_2002MB, "TCL 2002MB_3H"}, + { TUNER_ABSENT, "TCL 2002MI_3H"}, + { TUNER_TCL_2002N, "TCL 2002N 5H"}, + /* 100-109 */ + { TUNER_PHILIPS_FMD1216ME_MK3, "Philips FMD1216ME"}, + { TUNER_TEA5767, "Philips TEA5768HL FM Radio"}, + { TUNER_ABSENT, "Panasonic ENV57H12D5"}, + { TUNER_PHILIPS_FM1236_MK3, "TCL MFNM05-4"}, + { TUNER_PHILIPS_FM1236_MK3, "TCL MNM05-4"}, + { TUNER_PHILIPS_FM1216ME_MK3, "TCL MPE05-2"}, + { TUNER_ABSENT, "TCL MQNM05-4"}, + { TUNER_ABSENT, "LG TAPC-W701D"}, + { TUNER_ABSENT, "TCL 9886P-WM"}, + { TUNER_ABSENT, "TCL 1676NM-WM"}, + /* 110-119 */ + { TUNER_ABSENT, "Thompson DTT75105"}, + { TUNER_ABSENT, "Conexant_CX24109"}, + { TUNER_TCL_2002N, "TCL M2523_5N_E"}, + { TUNER_TCL_2002MB, "TCL M2523_3DB_E"}, + { TUNER_ABSENT, "Philips 8275A"}, + { TUNER_ABSENT, "Microtune MT2060"}, + { TUNER_PHILIPS_FM1236_MK3, "Philips FM1236 MK5"}, + { TUNER_PHILIPS_FM1216ME_MK3, "Philips FM1216ME MK5"}, + { TUNER_ABSENT, "TCL M2523_3DI_E"}, + { TUNER_ABSENT, "Samsung THPD5222FG30A"}, + /* 120-129 */ + { TUNER_XC2028, "Xceive XC3028"}, + { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK5"}, + { TUNER_ABSENT, "Philips FQD1216LME"}, + { TUNER_ABSENT, "Conexant CX24118A"}, + { TUNER_ABSENT, "TCL DMF11WIP"}, + { TUNER_ABSENT, "TCL MFNM05_4H_E"}, + { TUNER_ABSENT, "TCL MNM05_4H_E"}, + { TUNER_ABSENT, "TCL MPE05_2H_E"}, + { TUNER_ABSENT, "TCL MQNM05_4_U"}, + { TUNER_ABSENT, "TCL M2523_5NH_E"}, + /* 130-139 */ + { TUNER_ABSENT, "TCL M2523_3DBH_E"}, + { TUNER_ABSENT, "TCL M2523_3DIH_E"}, + { TUNER_ABSENT, "TCL MFPE05_2_U"}, + { TUNER_PHILIPS_FMD1216MEX_MK3, "Philips FMD1216MEX"}, + { TUNER_ABSENT, "Philips FRH2036B"}, + { TUNER_ABSENT, "Panasonic ENGF75_01GF"}, + { TUNER_ABSENT, "MaxLinear MXL5005"}, + { TUNER_ABSENT, "MaxLinear MXL5003"}, + { TUNER_ABSENT, "Xceive XC2028"}, + { TUNER_ABSENT, "Microtune MT2131"}, + /* 140-149 */ + { TUNER_ABSENT, "Philips 8275A_8295"}, + { TUNER_ABSENT, "TCL MF02GIP_5N_E"}, + { TUNER_ABSENT, "TCL MF02GIP_3DB_E"}, + { TUNER_ABSENT, "TCL MF02GIP_3DI_E"}, + { TUNER_ABSENT, "Microtune MT2266"}, + { TUNER_ABSENT, "TCL MF10WPP_4N_E"}, + { TUNER_ABSENT, "LG TAPQ_H702F"}, + { TUNER_ABSENT, "TCL M09WPP_4N_E"}, + { TUNER_ABSENT, "MaxLinear MXL5005_v2"}, + { TUNER_PHILIPS_TDA8290, "Philips 18271_8295"}, + /* 150-159 */ + { TUNER_XC5000, "Xceive XC5000"}, + { TUNER_ABSENT, "Xceive XC3028L"}, + { TUNER_ABSENT, "NXP 18271C2_716x"}, + { TUNER_ABSENT, "Xceive XC4000"}, + { TUNER_ABSENT, "Dibcom 7070"}, + { TUNER_PHILIPS_TDA8290, "NXP 18271C2"}, + { TUNER_ABSENT, "Siano SMS1010"}, + { TUNER_ABSENT, "Siano SMS1150"}, + { TUNER_ABSENT, "MaxLinear 5007"}, + { TUNER_ABSENT, "TCL M09WPP_2P_E"}, + /* 160-169 */ + { TUNER_ABSENT, "Siano SMS1180"}, + { TUNER_ABSENT, "Maxim_MAX2165"}, + { TUNER_ABSENT, "Siano SMS1140"}, + { TUNER_ABSENT, "Siano SMS1150 B1"}, + { TUNER_ABSENT, "MaxLinear 111"}, + { TUNER_ABSENT, "Dibcom 7770"}, + { TUNER_ABSENT, "Siano SMS1180VNS"}, + { TUNER_ABSENT, "Siano SMS1184"}, + { TUNER_PHILIPS_FQ1236_MK5, "TCL M30WTP-4N-E"}, + { TUNER_ABSENT, "TCL_M11WPP_2PN_E"}, + /* 170-179 */ + { TUNER_ABSENT, "MaxLinear 301"}, + { TUNER_ABSENT, "Mirics MSi001"}, + { TUNER_ABSENT, "MaxLinear MxL241SF"}, + { TUNER_XC5000C, "Xceive XC5000C"}, + { TUNER_ABSENT, "Montage M68TS2020"}, + { TUNER_ABSENT, "Siano SMS1530"}, + { TUNER_ABSENT, "Dibcom 7090"}, + { TUNER_ABSENT, "Xceive XC5200C"}, + { TUNER_ABSENT, "NXP 18273"}, + { TUNER_ABSENT, "Montage M88TS2022"}, + /* 180-188 */ + { TUNER_ABSENT, "NXP 18272M"}, + { TUNER_ABSENT, "NXP 18272S"}, + + { TUNER_ABSENT, "Mirics MSi003"}, + { TUNER_ABSENT, "MaxLinear MxL256"}, + { TUNER_ABSENT, "SiLabs Si2158"}, + { TUNER_ABSENT, "SiLabs Si2178"}, + { TUNER_ABSENT, "SiLabs Si2157"}, + { TUNER_ABSENT, "SiLabs Si2177"}, + { TUNER_ABSENT, "ITE IT9137FN"}, +}; + +/* Use TVEEPROM_AUDPROC_INTERNAL for those audio 'chips' that are + * internal to a video chip, i.e. not a separate audio chip. */ +static const struct { + u32 id; + const char * const name; +} audio_ic[] = { + /* 0-4 */ + { TVEEPROM_AUDPROC_NONE, "None" }, + { TVEEPROM_AUDPROC_OTHER, "TEA6300" }, + { TVEEPROM_AUDPROC_OTHER, "TEA6320" }, + { TVEEPROM_AUDPROC_OTHER, "TDA9850" }, + { TVEEPROM_AUDPROC_MSP, "MSP3400C" }, + /* 5-9 */ + { TVEEPROM_AUDPROC_MSP, "MSP3410D" }, + { TVEEPROM_AUDPROC_MSP, "MSP3415" }, + { TVEEPROM_AUDPROC_MSP, "MSP3430" }, + { TVEEPROM_AUDPROC_MSP, "MSP3438" }, + { TVEEPROM_AUDPROC_OTHER, "CS5331" }, + /* 10-14 */ + { TVEEPROM_AUDPROC_MSP, "MSP3435" }, + { TVEEPROM_AUDPROC_MSP, "MSP3440" }, + { TVEEPROM_AUDPROC_MSP, "MSP3445" }, + { TVEEPROM_AUDPROC_MSP, "MSP3411" }, + { TVEEPROM_AUDPROC_MSP, "MSP3416" }, + /* 15-19 */ + { TVEEPROM_AUDPROC_MSP, "MSP3425" }, + { TVEEPROM_AUDPROC_MSP, "MSP3451" }, + { TVEEPROM_AUDPROC_MSP, "MSP3418" }, + { TVEEPROM_AUDPROC_OTHER, "Type 0x12" }, + { TVEEPROM_AUDPROC_OTHER, "OKI7716" }, + /* 20-24 */ + { TVEEPROM_AUDPROC_MSP, "MSP4410" }, + { TVEEPROM_AUDPROC_MSP, "MSP4420" }, + { TVEEPROM_AUDPROC_MSP, "MSP4440" }, + { TVEEPROM_AUDPROC_MSP, "MSP4450" }, + { TVEEPROM_AUDPROC_MSP, "MSP4408" }, + /* 25-29 */ + { TVEEPROM_AUDPROC_MSP, "MSP4418" }, + { TVEEPROM_AUDPROC_MSP, "MSP4428" }, + { TVEEPROM_AUDPROC_MSP, "MSP4448" }, + { TVEEPROM_AUDPROC_MSP, "MSP4458" }, + { TVEEPROM_AUDPROC_MSP, "Type 0x1d" }, + /* 30-34 */ + { TVEEPROM_AUDPROC_INTERNAL, "CX880" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX881" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX883" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX882" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX25840" }, + /* 35-39 */ + { TVEEPROM_AUDPROC_INTERNAL, "CX25841" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX25842" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX25843" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX23418" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX23885" }, + /* 40-44 */ + { TVEEPROM_AUDPROC_INTERNAL, "CX23888" }, + { TVEEPROM_AUDPROC_INTERNAL, "SAA7131" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX23887" }, + { TVEEPROM_AUDPROC_INTERNAL, "SAA7164" }, + { TVEEPROM_AUDPROC_INTERNAL, "AU8522" }, + /* 45-49 */ + { TVEEPROM_AUDPROC_INTERNAL, "AVF4910B" }, + { TVEEPROM_AUDPROC_INTERNAL, "SAA7231" }, + { TVEEPROM_AUDPROC_INTERNAL, "CX23102" }, + { TVEEPROM_AUDPROC_INTERNAL, "SAA7163" }, + { TVEEPROM_AUDPROC_OTHER, "AK4113" }, + /* 50-52 */ + { TVEEPROM_AUDPROC_OTHER, "CS5340" }, + { TVEEPROM_AUDPROC_OTHER, "CS8416" }, + { TVEEPROM_AUDPROC_OTHER, "CX20810" }, +}; + +/* This list is supplied by Hauppauge. Thanks! */ +static const char *decoderIC[] = { + /* 0-4 */ + "None", "BT815", "BT817", "BT819", "BT815A", + /* 5-9 */ + "BT817A", "BT819A", "BT827", "BT829", "BT848", + /* 10-14 */ + "BT848A", "BT849A", "BT829A", "BT827A", "BT878", + /* 15-19 */ + "BT879", "BT880", "VPX3226E", "SAA7114", "SAA7115", + /* 20-24 */ + "CX880", "CX881", "CX883", "SAA7111", "SAA7113", + /* 25-29 */ + "CX882", "TVP5150A", "CX25840", "CX25841", "CX25842", + /* 30-34 */ + "CX25843", "CX23418", "NEC61153", "CX23885", "CX23888", + /* 35-39 */ + "SAA7131", "CX25837", "CX23887", "CX23885A", "CX23887A", + /* 40-44 */ + "SAA7164", "CX23885B", "AU8522", "ADV7401", "AVF4910B", + /* 45-49 */ + "SAA7231", "CX23102", "SAA7163", "ADV7441A", "ADV7181C", + /* 50-53 */ + "CX25836", "TDA9955", "TDA19977", "ADV7842" +}; + +static int hasRadioTuner(int tunerType) +{ + switch (tunerType) { + case 18: /* PNPEnv_TUNER_FR1236_MK2 */ + case 23: /* PNPEnv_TUNER_FM1236 */ + case 38: /* PNPEnv_TUNER_FMR1236 */ + case 16: /* PNPEnv_TUNER_FR1216_MK2 */ + case 19: /* PNPEnv_TUNER_FR1246_MK2 */ + case 21: /* PNPEnv_TUNER_FM1216 */ + case 24: /* PNPEnv_TUNER_FM1246 */ + case 17: /* PNPEnv_TUNER_FR1216MF_MK2 */ + case 22: /* PNPEnv_TUNER_FM1216MF */ + case 20: /* PNPEnv_TUNER_FR1256_MK2 */ + case 25: /* PNPEnv_TUNER_FM1256 */ + case 33: /* PNPEnv_TUNER_4039FR5 */ + case 42: /* PNPEnv_TUNER_4009FR5 */ + case 52: /* PNPEnv_TUNER_4049FM5 */ + case 54: /* PNPEnv_TUNER_4049FM5_AltI2C */ + case 44: /* PNPEnv_TUNER_4009FN5 */ + case 31: /* PNPEnv_TUNER_TCPB9085P */ + case 30: /* PNPEnv_TUNER_TCPN9085D */ + case 46: /* PNPEnv_TUNER_TP18NSR01F */ + case 47: /* PNPEnv_TUNER_TP18PSB01D */ + case 49: /* PNPEnv_TUNER_TAPC_I001D */ + case 60: /* PNPEnv_TUNER_TAPE_S001D_MK3 */ + case 57: /* PNPEnv_TUNER_FM1216ME_MK3 */ + case 59: /* PNPEnv_TUNER_FM1216MP_MK3 */ + case 58: /* PNPEnv_TUNER_FM1236_MK3 */ + case 68: /* PNPEnv_TUNER_TAPE_H001F_MK3 */ + case 61: /* PNPEnv_TUNER_TAPE_M001D_MK3 */ + case 78: /* PNPEnv_TUNER_TDA8275C1_8290_FM */ + case 89: /* PNPEnv_TUNER_TCL_MFPE05_2 */ + case 92: /* PNPEnv_TUNER_PHILIPS_FQ1236A_MK4 */ + case 105: + return 1; + } + return 0; +} + +void tveeprom_hauppauge_analog(struct tveeprom *tvee, + unsigned char *eeprom_data) +{ + /* ---------------------------------------------- + ** The hauppauge eeprom format is tagged + ** + ** if packet[0] == 0x84, then packet[0..1] == length + ** else length = packet[0] & 3f; + ** if packet[0] & f8 == f8, then EOD and packet[1] == checksum + ** + ** In our (ivtv) case we're interested in the following: + ** tuner type: tag [00].05 or [0a].01 (index into hauppauge_tuner) + ** tuner fmts: tag [00].04 or [0a].00 (bitmask index into + ** hauppauge_tuner_fmt) + ** radio: tag [00].{last} or [0e].00 (bitmask. bit2=FM) + ** audio proc: tag [02].01 or [05].00 (mask with 0x7f) + ** decoder proc: tag [09].01) + + ** Fun info: + ** model: tag [00].07-08 or [06].00-01 + ** revision: tag [00].09-0b or [06].04-06 + ** serial#: tag [01].05-07 or [04].04-06 + + ** # of inputs/outputs ??? + */ + + int i, j, len, done, beenhere, tag, start; + + int tuner1 = 0, t_format1 = 0, audioic = -1; + const char *t_name1 = NULL; + const char *t_fmt_name1[8] = { " none", "", "", "", "", "", "", "" }; + + int tuner2 = 0, t_format2 = 0; + const char *t_name2 = NULL; + const char *t_fmt_name2[8] = { " none", "", "", "", "", "", "", "" }; + + memset(tvee, 0, sizeof(*tvee)); + tvee->tuner_type = TUNER_ABSENT; + tvee->tuner2_type = TUNER_ABSENT; + + done = len = beenhere = 0; + + /* Different eeprom start offsets for em28xx, cx2388x and cx23418 */ + if (eeprom_data[0] == 0x1a && + eeprom_data[1] == 0xeb && + eeprom_data[2] == 0x67 && + eeprom_data[3] == 0x95) + start = 0xa0; /* Generic em28xx offset */ + else if ((eeprom_data[0] & 0xe1) == 0x01 && + eeprom_data[1] == 0x00 && + eeprom_data[2] == 0x00 && + eeprom_data[8] == 0x84) + start = 8; /* Generic cx2388x offset */ + else if (eeprom_data[1] == 0x70 && + eeprom_data[2] == 0x00 && + eeprom_data[4] == 0x74 && + eeprom_data[8] == 0x84) + start = 8; /* Generic cx23418 offset (models 74xxx) */ + else + start = 0; + + for (i = start; !done && i < 256; i += len) { + if (eeprom_data[i] == 0x84) { + len = eeprom_data[i + 1] + (eeprom_data[i + 2] << 8); + i += 3; + } else if ((eeprom_data[i] & 0xf0) == 0x70) { + if (eeprom_data[i] & 0x08) { + /* verify checksum! */ + done = 1; + break; + } + len = eeprom_data[i] & 0x07; + ++i; + } else { + pr_warn("Encountered bad packet header [%02x]. Corrupt or not a Hauppauge eeprom.\n", + eeprom_data[i]); + return; + } + + pr_debug("Tag [%02x] + %d bytes: %*ph\n", + eeprom_data[i], len - 1, len, &eeprom_data[i]); + + /* process by tag */ + tag = eeprom_data[i]; + switch (tag) { + case 0x00: + /* tag: 'Comprehensive' */ + tuner1 = eeprom_data[i+6]; + t_format1 = eeprom_data[i+5]; + tvee->has_radio = eeprom_data[i+len-1]; + /* old style tag, don't know how to detect + IR presence, mark as unknown. */ + tvee->has_ir = 0; + tvee->model = + eeprom_data[i+8] + + (eeprom_data[i+9] << 8); + tvee->revision = eeprom_data[i+10] + + (eeprom_data[i+11] << 8) + + (eeprom_data[i+12] << 16); + break; + + case 0x01: + /* tag: 'SerialID' */ + tvee->serial_number = + eeprom_data[i+6] + + (eeprom_data[i+7] << 8) + + (eeprom_data[i+8] << 16); + break; + + case 0x02: + /* tag 'AudioInfo' + Note mask with 0x7F, high bit used on some older models + to indicate 4052 mux was removed in favor of using MSP + inputs directly. */ + audioic = eeprom_data[i+2] & 0x7f; + if (audioic < ARRAY_SIZE(audio_ic)) + tvee->audio_processor = audio_ic[audioic].id; + else + tvee->audio_processor = TVEEPROM_AUDPROC_OTHER; + break; + + /* case 0x03: tag 'EEInfo' */ + + case 0x04: + /* tag 'SerialID2' */ + tvee->serial_number = + eeprom_data[i+5] + + (eeprom_data[i+6] << 8) + + (eeprom_data[i+7] << 16)+ + (eeprom_data[i+8] << 24); + + if (eeprom_data[i + 8] == 0xf0) { + tvee->MAC_address[0] = 0x00; + tvee->MAC_address[1] = 0x0D; + tvee->MAC_address[2] = 0xFE; + tvee->MAC_address[3] = eeprom_data[i + 7]; + tvee->MAC_address[4] = eeprom_data[i + 6]; + tvee->MAC_address[5] = eeprom_data[i + 5]; + tvee->has_MAC_address = 1; + } + break; + + case 0x05: + /* tag 'Audio2' + Note mask with 0x7F, high bit used on some older models + to indicate 4052 mux was removed in favor of using MSP + inputs directly. */ + audioic = eeprom_data[i+1] & 0x7f; + if (audioic < ARRAY_SIZE(audio_ic)) + tvee->audio_processor = audio_ic[audioic].id; + else + tvee->audio_processor = TVEEPROM_AUDPROC_OTHER; + + break; + + case 0x06: + /* tag 'ModelRev' */ + tvee->model = + eeprom_data[i + 1] + + (eeprom_data[i + 2] << 8) + + (eeprom_data[i + 3] << 16) + + (eeprom_data[i + 4] << 24); + tvee->revision = + eeprom_data[i + 5] + + (eeprom_data[i + 6] << 8) + + (eeprom_data[i + 7] << 16); + break; + + case 0x07: + /* tag 'Details': according to Hauppauge not interesting + on any PCI-era or later boards. */ + break; + + /* there is no tag 0x08 defined */ + + case 0x09: + /* tag 'Video' */ + tvee->decoder_processor = eeprom_data[i + 1]; + break; + + case 0x0a: + /* tag 'Tuner' */ + if (beenhere == 0) { + tuner1 = eeprom_data[i + 2]; + t_format1 = eeprom_data[i + 1]; + beenhere = 1; + } else { + /* a second (radio) tuner may be present */ + tuner2 = eeprom_data[i + 2]; + t_format2 = eeprom_data[i + 1]; + /* not a TV tuner? */ + if (t_format2 == 0) + tvee->has_radio = 1; /* must be radio */ + } + break; + + case 0x0b: + /* tag 'Inputs': according to Hauppauge this is specific + to each driver family, so no good assumptions can be + made. */ + break; + + /* case 0x0c: tag 'Balun' */ + /* case 0x0d: tag 'Teletext' */ + + case 0x0e: + /* tag: 'Radio' */ + tvee->has_radio = eeprom_data[i+1]; + break; + + case 0x0f: + /* tag 'IRInfo' */ + tvee->has_ir = 1 | (eeprom_data[i+1] << 1); + break; + + /* case 0x10: tag 'VBIInfo' */ + /* case 0x11: tag 'QCInfo' */ + /* case 0x12: tag 'InfoBits' */ + + default: + pr_debug("Not sure what to do with tag [%02x]\n", + tag); + /* dump the rest of the packet? */ + } + } + + if (!done) { + pr_warn("Ran out of data!\n"); + return; + } + + if (tvee->revision != 0) { + tvee->rev_str[0] = 32 + ((tvee->revision >> 18) & 0x3f); + tvee->rev_str[1] = 32 + ((tvee->revision >> 12) & 0x3f); + tvee->rev_str[2] = 32 + ((tvee->revision >> 6) & 0x3f); + tvee->rev_str[3] = 32 + (tvee->revision & 0x3f); + tvee->rev_str[4] = 0; + } + + if (hasRadioTuner(tuner1) && !tvee->has_radio) { + pr_info("The eeprom says no radio is present, but the tuner type\n"); + pr_info("indicates otherwise. I will assume that radio is present.\n"); + tvee->has_radio = 1; + } + + if (tuner1 < ARRAY_SIZE(hauppauge_tuner)) { + tvee->tuner_type = hauppauge_tuner[tuner1].id; + t_name1 = hauppauge_tuner[tuner1].name; + } else { + t_name1 = "unknown"; + } + + if (tuner2 < ARRAY_SIZE(hauppauge_tuner)) { + tvee->tuner2_type = hauppauge_tuner[tuner2].id; + t_name2 = hauppauge_tuner[tuner2].name; + } else { + t_name2 = "unknown"; + } + + tvee->tuner_hauppauge_model = tuner1; + tvee->tuner2_hauppauge_model = tuner2; + tvee->tuner_formats = 0; + tvee->tuner2_formats = 0; + for (i = j = 0; i < 8; i++) { + if (t_format1 & (1 << i)) { + tvee->tuner_formats |= hauppauge_tuner_fmt[i].id; + t_fmt_name1[j++] = hauppauge_tuner_fmt[i].name; + } + } + for (i = j = 0; i < 8; i++) { + if (t_format2 & (1 << i)) { + tvee->tuner2_formats |= hauppauge_tuner_fmt[i].id; + t_fmt_name2[j++] = hauppauge_tuner_fmt[i].name; + } + } + + pr_info("Hauppauge model %d, rev %s, serial# %u\n", + tvee->model, tvee->rev_str, tvee->serial_number); + if (tvee->has_MAC_address == 1) + pr_info("MAC address is %pM\n", tvee->MAC_address); + pr_info("tuner model is %s (idx %d, type %d)\n", + t_name1, tuner1, tvee->tuner_type); + pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n", + t_fmt_name1[0], t_fmt_name1[1], t_fmt_name1[2], + t_fmt_name1[3], t_fmt_name1[4], t_fmt_name1[5], + t_fmt_name1[6], t_fmt_name1[7], t_format1); + if (tuner2) + pr_info("second tuner model is %s (idx %d, type %d)\n", + t_name2, tuner2, tvee->tuner2_type); + if (t_format2) + pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n", + t_fmt_name2[0], t_fmt_name2[1], t_fmt_name2[2], + t_fmt_name2[3], t_fmt_name2[4], t_fmt_name2[5], + t_fmt_name2[6], t_fmt_name2[7], t_format2); + if (audioic < 0) { + pr_info("audio processor is unknown (no idx)\n"); + tvee->audio_processor = TVEEPROM_AUDPROC_OTHER; + } else { + if (audioic < ARRAY_SIZE(audio_ic)) + pr_info("audio processor is %s (idx %d)\n", + audio_ic[audioic].name, audioic); + else + pr_info("audio processor is unknown (idx %d)\n", + audioic); + } + if (tvee->decoder_processor) + pr_info("decoder processor is %s (idx %d)\n", + STRM(decoderIC, tvee->decoder_processor), + tvee->decoder_processor); + if (tvee->has_ir) + pr_info("has %sradio, has %sIR receiver, has %sIR transmitter\n", + tvee->has_radio ? "" : "no ", + (tvee->has_ir & 2) ? "" : "no ", + (tvee->has_ir & 4) ? "" : "no "); + else + pr_info("has %sradio\n", + tvee->has_radio ? "" : "no "); +} +EXPORT_SYMBOL(tveeprom_hauppauge_analog); + +/* ----------------------------------------------------------------------- */ +/* generic helper functions */ + +int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len) +{ + unsigned char buf; + int err; + + buf = 0; + err = i2c_master_send(c, &buf, 1); + if (err != 1) { + pr_info("Huh, no eeprom present (err=%d)?\n", err); + return -1; + } + err = i2c_master_recv(c, eedata, len); + if (err != len) { + pr_warn("i2c eeprom read error (err=%d)\n", err); + return -1; + } + + print_hex_dump_debug("full 256-byte eeprom dump:", DUMP_PREFIX_NONE, + 16, 1, eedata, len, true); + return 0; +} +EXPORT_SYMBOL(tveeprom_read); diff --git a/drivers/media/common/uvc.c b/drivers/media/common/uvc.c new file mode 100644 index 0000000000..9c0ba7a6c1 --- /dev/null +++ b/drivers/media/common/uvc.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include +#include +#include + +/* ------------------------------------------------------------------------ + * Video formats + */ + +static const struct uvc_format_desc uvc_fmts[] = { + { + .guid = UVC_GUID_FORMAT_YUY2, + .fcc = V4L2_PIX_FMT_YUYV, + }, + { + .guid = UVC_GUID_FORMAT_YUY2_ISIGHT, + .fcc = V4L2_PIX_FMT_YUYV, + }, + { + .guid = UVC_GUID_FORMAT_NV12, + .fcc = V4L2_PIX_FMT_NV12, + }, + { + .guid = UVC_GUID_FORMAT_MJPEG, + .fcc = V4L2_PIX_FMT_MJPEG, + }, + { + .guid = UVC_GUID_FORMAT_YV12, + .fcc = V4L2_PIX_FMT_YVU420, + }, + { + .guid = UVC_GUID_FORMAT_I420, + .fcc = V4L2_PIX_FMT_YUV420, + }, + { + .guid = UVC_GUID_FORMAT_M420, + .fcc = V4L2_PIX_FMT_M420, + }, + { + .guid = UVC_GUID_FORMAT_UYVY, + .fcc = V4L2_PIX_FMT_UYVY, + }, + { + .guid = UVC_GUID_FORMAT_Y800, + .fcc = V4L2_PIX_FMT_GREY, + }, + { + .guid = UVC_GUID_FORMAT_Y8, + .fcc = V4L2_PIX_FMT_GREY, + }, + { + .guid = UVC_GUID_FORMAT_D3DFMT_L8, + .fcc = V4L2_PIX_FMT_GREY, + }, + { + .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR, + .fcc = V4L2_PIX_FMT_GREY, + }, + { + .guid = UVC_GUID_FORMAT_Y10, + .fcc = V4L2_PIX_FMT_Y10, + }, + { + .guid = UVC_GUID_FORMAT_Y12, + .fcc = V4L2_PIX_FMT_Y12, + }, + { + .guid = UVC_GUID_FORMAT_Y16, + .fcc = V4L2_PIX_FMT_Y16, + }, + { + .guid = UVC_GUID_FORMAT_BY8, + .fcc = V4L2_PIX_FMT_SBGGR8, + }, + { + .guid = UVC_GUID_FORMAT_BA81, + .fcc = V4L2_PIX_FMT_SBGGR8, + }, + { + .guid = UVC_GUID_FORMAT_GBRG, + .fcc = V4L2_PIX_FMT_SGBRG8, + }, + { + .guid = UVC_GUID_FORMAT_GRBG, + .fcc = V4L2_PIX_FMT_SGRBG8, + }, + { + .guid = UVC_GUID_FORMAT_RGGB, + .fcc = V4L2_PIX_FMT_SRGGB8, + }, + { + .guid = UVC_GUID_FORMAT_RGBP, + .fcc = V4L2_PIX_FMT_RGB565, + }, + { + .guid = UVC_GUID_FORMAT_BGR3, + .fcc = V4L2_PIX_FMT_BGR24, + }, + { + .guid = UVC_GUID_FORMAT_BGR4, + .fcc = V4L2_PIX_FMT_XBGR32, + }, + { + .guid = UVC_GUID_FORMAT_H264, + .fcc = V4L2_PIX_FMT_H264, + }, + { + .guid = UVC_GUID_FORMAT_H265, + .fcc = V4L2_PIX_FMT_HEVC, + }, + { + .guid = UVC_GUID_FORMAT_Y8I, + .fcc = V4L2_PIX_FMT_Y8I, + }, + { + .guid = UVC_GUID_FORMAT_Y12I, + .fcc = V4L2_PIX_FMT_Y12I, + }, + { + .guid = UVC_GUID_FORMAT_Z16, + .fcc = V4L2_PIX_FMT_Z16, + }, + { + .guid = UVC_GUID_FORMAT_RW10, + .fcc = V4L2_PIX_FMT_SRGGB10P, + }, + { + .guid = UVC_GUID_FORMAT_BG16, + .fcc = V4L2_PIX_FMT_SBGGR16, + }, + { + .guid = UVC_GUID_FORMAT_GB16, + .fcc = V4L2_PIX_FMT_SGBRG16, + }, + { + .guid = UVC_GUID_FORMAT_RG16, + .fcc = V4L2_PIX_FMT_SRGGB16, + }, + { + .guid = UVC_GUID_FORMAT_GR16, + .fcc = V4L2_PIX_FMT_SGRBG16, + }, + { + .guid = UVC_GUID_FORMAT_INVZ, + .fcc = V4L2_PIX_FMT_Z16, + }, + { + .guid = UVC_GUID_FORMAT_INVI, + .fcc = V4L2_PIX_FMT_Y10, + }, + { + .guid = UVC_GUID_FORMAT_INZI, + .fcc = V4L2_PIX_FMT_INZI, + }, + { + .guid = UVC_GUID_FORMAT_CNF4, + .fcc = V4L2_PIX_FMT_CNF4, + }, + { + .guid = UVC_GUID_FORMAT_HEVC, + .fcc = V4L2_PIX_FMT_HEVC, + }, +}; + +const struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16]) +{ + unsigned int len = ARRAY_SIZE(uvc_fmts); + unsigned int i; + + for (i = 0; i < len; ++i) { + if (memcmp(guid, uvc_fmts[i].guid, 16) == 0) + return &uvc_fmts[i]; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(uvc_format_by_guid); + +MODULE_LICENSE("GPL"); diff --git a/drivers/media/common/v4l2-tpg/Kconfig b/drivers/media/common/v4l2-tpg/Kconfig new file mode 100644 index 0000000000..7ec4efd12e --- /dev/null +++ b/drivers/media/common/v4l2-tpg/Kconfig @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +config VIDEO_V4L2_TPG + tristate diff --git a/drivers/media/common/v4l2-tpg/Makefile b/drivers/media/common/v4l2-tpg/Makefile new file mode 100644 index 0000000000..f6278ca661 --- /dev/null +++ b/drivers/media/common/v4l2-tpg/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +v4l2-tpg-objs := v4l2-tpg-core.o v4l2-tpg-colors.o + +obj-$(CONFIG_VIDEO_V4L2_TPG) += v4l2-tpg.o diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c new file mode 100644 index 0000000000..a4341205c1 --- /dev/null +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c @@ -0,0 +1,1409 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-tpg-colors.c - A table that converts colors to various colorspaces + * + * The test pattern generator uses the tpg_colors for its test patterns. + * For testing colorspaces the first 8 colors of that table need to be + * converted to their equivalent in the target colorspace. + * + * The tpg_csc_colors[] table is the result of that conversion and since + * it is precalculated the colorspace conversion is just a simple table + * lookup. + * + * This source also contains the code used to generate the tpg_csc_colors + * table. Run the following command to compile it: + * + * gcc v4l2-tpg-colors.c -DCOMPILE_APP -o gen-colors -lm + * + * and run the utility. + * + * Note that the converted colors are in the range 0x000-0xff0 (so times 16) + * in order to preserve precision. + * + * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include +#include + +/* sRGB colors with range [0-255] */ +const struct tpg_rbg_color8 tpg_colors[TPG_COLOR_MAX] = { + /* + * Colors to test colorspace conversion: converting these colors + * to other colorspaces will never lead to out-of-gamut colors. + */ + { 191, 191, 191 }, /* TPG_COLOR_CSC_WHITE */ + { 191, 191, 50 }, /* TPG_COLOR_CSC_YELLOW */ + { 50, 191, 191 }, /* TPG_COLOR_CSC_CYAN */ + { 50, 191, 50 }, /* TPG_COLOR_CSC_GREEN */ + { 191, 50, 191 }, /* TPG_COLOR_CSC_MAGENTA */ + { 191, 50, 50 }, /* TPG_COLOR_CSC_RED */ + { 50, 50, 191 }, /* TPG_COLOR_CSC_BLUE */ + { 50, 50, 50 }, /* TPG_COLOR_CSC_BLACK */ + + /* 75% colors */ + { 191, 191, 0 }, /* TPG_COLOR_75_YELLOW */ + { 0, 191, 191 }, /* TPG_COLOR_75_CYAN */ + { 0, 191, 0 }, /* TPG_COLOR_75_GREEN */ + { 191, 0, 191 }, /* TPG_COLOR_75_MAGENTA */ + { 191, 0, 0 }, /* TPG_COLOR_75_RED */ + { 0, 0, 191 }, /* TPG_COLOR_75_BLUE */ + + /* 100% colors */ + { 255, 255, 255 }, /* TPG_COLOR_100_WHITE */ + { 255, 255, 0 }, /* TPG_COLOR_100_YELLOW */ + { 0, 255, 255 }, /* TPG_COLOR_100_CYAN */ + { 0, 255, 0 }, /* TPG_COLOR_100_GREEN */ + { 255, 0, 255 }, /* TPG_COLOR_100_MAGENTA */ + { 255, 0, 0 }, /* TPG_COLOR_100_RED */ + { 0, 0, 255 }, /* TPG_COLOR_100_BLUE */ + { 0, 0, 0 }, /* TPG_COLOR_100_BLACK */ + + { 0, 0, 0 }, /* TPG_COLOR_RANDOM placeholder */ +}; + +#ifndef COMPILE_APP + +/* Generated table */ +const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = { + 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 7, 7, + 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, + 11, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, + 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, + 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 20, 21, 21, 21, + 21, 22, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 25, + 25, 25, 25, 26, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 28, + 28, 29, 29, 29, 29, 30, 30, 30, 30, 30, 31, 31, 31, 31, 32, 32, + 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 34, 34, 35, 35, 35, 35, + 36, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 38, 39, 39, + 39, 39, 40, 40, 40, 40, 40, 41, 41, 41, 41, 42, 42, 42, 42, 42, + 43, 43, 43, 43, 44, 44, 44, 44, 44, 45, 45, 45, 45, 46, 46, 46, + 46, 46, 47, 47, 47, 47, 48, 48, 48, 48, 48, 49, 49, 49, 49, 50, + 50, 50, 50, 50, 51, 51, 51, 51, 52, 52, 52, 52, 52, 53, 53, 53, + 53, 54, 54, 54, 54, 54, 55, 55, 55, 55, 56, 56, 56, 56, 56, 57, + 57, 57, 57, 58, 58, 58, 58, 58, 59, 59, 59, 59, 60, 60, 60, 60, + 60, 61, 61, 61, 61, 62, 62, 62, 62, 62, 63, 63, 63, 63, 64, 64, + 64, 64, 64, 65, 65, 65, 65, 66, 66, 66, 66, 66, 67, 67, 67, 67, + 68, 68, 68, 68, 68, 69, 69, 69, 69, 70, 70, 70, 70, 70, 71, 71, + 71, 71, 72, 72, 72, 72, 72, 73, 73, 73, 73, 73, 74, 74, 74, 74, + 74, 75, 75, 75, 75, 76, 76, 76, 76, 76, 77, 77, 77, 77, 78, 78, + 78, 78, 79, 79, 79, 79, 79, 80, 80, 80, 80, 81, 81, 81, 81, 82, + 82, 82, 82, 82, 83, 83, 83, 83, 84, 84, 84, 84, 85, 85, 85, 85, + 86, 86, 86, 86, 87, 87, 87, 87, 88, 88, 88, 88, 89, 89, 89, 89, + 90, 90, 90, 90, 91, 91, 91, 91, 92, 92, 92, 92, 93, 93, 93, 93, + 94, 94, 94, 94, 95, 95, 95, 95, 96, 96, 96, 96, 97, 97, 97, 97, + 98, 98, 98, 98, 99, 99, 99, 99, 100, 100, 100, 101, 101, 101, 101, 102, + 102, 102, 102, 103, 103, 103, 103, 104, 104, 104, 105, 105, 105, 105, 106, 106, + 106, 106, 107, 107, 107, 107, 108, 108, 108, 109, 109, 109, 109, 110, 110, 110, + 111, 111, 111, 111, 112, 112, 112, 112, 113, 113, 113, 114, 114, 114, 114, 115, + 115, 115, 116, 116, 116, 116, 117, 117, 117, 118, 118, 118, 118, 119, 119, 119, + 120, 120, 120, 120, 121, 121, 121, 122, 122, 122, 123, 123, 123, 123, 124, 124, + 124, 125, 125, 125, 125, 126, 126, 126, 127, 127, 127, 128, 128, 128, 128, 129, + 129, 129, 130, 130, 130, 131, 131, 131, 132, 132, 132, 132, 133, 133, 133, 134, + 134, 134, 135, 135, 135, 136, 136, 136, 136, 137, 137, 137, 138, 138, 138, 139, + 139, 139, 140, 140, 140, 141, 141, 141, 142, 142, 142, 142, 143, 143, 143, 144, + 144, 144, 145, 145, 145, 146, 146, 146, 147, 147, 147, 148, 148, 148, 149, 149, + 149, 150, 150, 150, 151, 151, 151, 152, 152, 152, 153, 153, 153, 154, 154, 154, + 155, 155, 155, 156, 156, 156, 157, 157, 157, 158, 158, 158, 159, 159, 159, 160, + 160, 160, 161, 161, 161, 162, 162, 162, 163, 163, 163, 164, 164, 164, 165, 165, + 165, 166, 166, 167, 167, 167, 168, 168, 168, 169, 169, 169, 170, 170, 170, 171, + 171, 171, 172, 172, 172, 173, 173, 174, 174, 174, 175, 175, 175, 176, 176, 176, + 177, 177, 177, 178, 178, 179, 179, 179, 180, 180, 180, 181, 181, 181, 182, 182, + 183, 183, 183, 184, 184, 184, 185, 185, 186, 186, 186, 187, 187, 187, 188, 188, + 188, 189, 189, 190, 190, 190, 191, 191, 191, 192, 192, 193, 193, 193, 194, 194, + 194, 195, 195, 196, 196, 196, 197, 197, 198, 198, 198, 199, 199, 199, 200, 200, + 201, 201, 201, 202, 202, 203, 203, 203, 204, 204, 204, 205, 205, 206, 206, 206, + 207, 207, 208, 208, 208, 209, 209, 210, 210, 210, 211, 211, 212, 212, 212, 213, + 213, 214, 214, 214, 215, 215, 216, 216, 216, 217, 217, 218, 218, 218, 219, 219, + 220, 220, 220, 221, 221, 222, 222, 222, 223, 223, 224, 224, 224, 225, 225, 226, + 226, 227, 227, 227, 228, 228, 229, 229, 229, 230, 230, 231, 231, 232, 232, 232, + 233, 233, 234, 234, 234, 235, 235, 236, 236, 237, 237, 237, 238, 238, 239, 239, + 240, 240, 240, 241, 241, 242, 242, 243, 243, 243, 244, 244, 245, 245, 246, 246, + 246, 247, 247, 248, 248, 249, 249, 249, 250, 250, 251, 251, 252, 252, 252, 253, + 253, 254, 254, 255, 255, 256, 256, 256, 257, 257, 258, 258, 259, 259, 260, 260, + 260, 261, 261, 262, 262, 263, 263, 264, 264, 264, 265, 265, 266, 266, 267, 267, + 268, 268, 269, 269, 269, 270, 270, 271, 271, 272, 272, 273, 273, 274, 274, 274, + 275, 275, 276, 276, 277, 277, 278, 278, 279, 279, 279, 280, 280, 281, 281, 282, + 282, 283, 283, 284, 284, 285, 285, 286, 286, 286, 287, 287, 288, 288, 289, 289, + 290, 290, 291, 291, 292, 292, 293, 293, 294, 294, 295, 295, 295, 296, 296, 297, + 297, 298, 298, 299, 299, 300, 300, 301, 301, 302, 302, 303, 303, 304, 304, 305, + 305, 306, 306, 307, 307, 308, 308, 309, 309, 309, 310, 310, 311, 311, 312, 312, + 313, 313, 314, 314, 315, 315, 316, 316, 317, 317, 318, 318, 319, 319, 320, 320, + 321, 321, 322, 322, 323, 323, 324, 324, 325, 325, 326, 326, 327, 327, 328, 328, + 329, 329, 330, 330, 331, 331, 332, 332, 333, 333, 334, 335, 335, 336, 336, 337, + 337, 338, 338, 339, 339, 340, 340, 341, 341, 342, 342, 343, 343, 344, 344, 345, + 345, 346, 346, 347, 347, 348, 348, 349, 349, 350, 351, 351, 352, 352, 353, 353, + 354, 354, 355, 355, 356, 356, 357, 357, 358, 358, 359, 360, 360, 361, 361, 362, + 362, 363, 363, 364, 364, 365, 365, 366, 366, 367, 368, 368, 369, 369, 370, 370, + 371, 371, 372, 372, 373, 373, 374, 375, 375, 376, 376, 377, 377, 378, 378, 379, + 379, 380, 381, 381, 382, 382, 383, 383, 384, 384, 385, 386, 386, 387, 387, 388, + 388, 389, 389, 390, 391, 391, 392, 392, 393, 393, 394, 394, 395, 396, 396, 397, + 397, 398, 398, 399, 399, 400, 401, 401, 402, 402, 403, 403, 404, 405, 405, 406, + 406, 407, 407, 408, 409, 409, 410, 410, 411, 411, 412, 413, 413, 414, 414, 415, + 415, 416, 417, 417, 418, 418, 419, 419, 420, 421, 421, 422, 422, 423, 424, 424, + 425, 425, 426, 426, 427, 428, 428, 429, 429, 430, 431, 431, 432, 432, 433, 433, + 434, 435, 435, 436, 436, 437, 438, 438, 439, 439, 440, 441, 441, 442, 442, 443, + 444, 444, 445, 445, 446, 447, 447, 448, 448, 449, 450, 450, 451, 451, 452, 453, + 453, 454, 454, 455, 456, 456, 457, 457, 458, 459, 459, 460, 460, 461, 462, 462, + 463, 463, 464, 465, 465, 466, 467, 467, 468, 468, 469, 470, 470, 471, 471, 472, + 473, 473, 474, 475, 475, 476, 476, 477, 478, 478, 479, 480, 480, 481, 481, 482, + 483, 483, 484, 485, 485, 486, 486, 487, 488, 488, 489, 490, 490, 491, 491, 492, + 493, 493, 494, 495, 495, 496, 497, 497, 498, 498, 499, 500, 500, 501, 502, 502, + 503, 504, 504, 505, 505, 506, 507, 507, 508, 509, 509, 510, 511, 511, 512, 513, + 513, 514, 514, 515, 516, 516, 517, 518, 518, 519, 520, 520, 521, 522, 522, 523, + 524, 524, 525, 526, 526, 527, 528, 528, 529, 529, 530, 531, 531, 532, 533, 533, + 534, 535, 535, 536, 537, 537, 538, 539, 539, 540, 541, 541, 542, 543, 543, 544, + 545, 545, 546, 547, 547, 548, 549, 549, 550, 551, 551, 552, 553, 553, 554, 555, + 555, 556, 557, 557, 558, 559, 560, 560, 561, 562, 562, 563, 564, 564, 565, 566, + 566, 567, 568, 568, 569, 570, 570, 571, 572, 572, 573, 574, 575, 575, 576, 577, + 577, 578, 579, 579, 580, 581, 581, 582, 583, 584, 584, 585, 586, 586, 587, 588, + 588, 589, 590, 590, 591, 592, 593, 593, 594, 595, 595, 596, 597, 598, 598, 599, + 600, 600, 601, 602, 602, 603, 604, 605, 605, 606, 607, 607, 608, 609, 610, 610, + 611, 612, 612, 613, 614, 615, 615, 616, 617, 617, 618, 619, 620, 620, 621, 622, + 622, 623, 624, 625, 625, 626, 627, 627, 628, 629, 630, 630, 631, 632, 632, 633, + 634, 635, 635, 636, 637, 638, 638, 639, 640, 640, 641, 642, 643, 643, 644, 645, + 646, 646, 647, 648, 649, 649, 650, 651, 652, 652, 653, 654, 654, 655, 656, 657, + 657, 658, 659, 660, 660, 661, 662, 663, 663, 664, 665, 666, 666, 667, 668, 669, + 669, 670, 671, 672, 672, 673, 674, 675, 675, 676, 677, 678, 678, 679, 680, 681, + 681, 682, 683, 684, 684, 685, 686, 687, 687, 688, 689, 690, 690, 691, 692, 693, + 694, 694, 695, 696, 697, 697, 698, 699, 700, 700, 701, 702, 703, 703, 704, 705, + 706, 707, 707, 708, 709, 710, 710, 711, 712, 713, 714, 714, 715, 716, 717, 717, + 718, 719, 720, 720, 721, 722, 723, 724, 724, 725, 726, 727, 728, 728, 729, 730, + 731, 731, 732, 733, 734, 735, 735, 736, 737, 738, 739, 739, 740, 741, 742, 742, + 743, 744, 745, 746, 746, 747, 748, 749, 750, 750, 751, 752, 753, 754, 754, 755, + 756, 757, 758, 758, 759, 760, 761, 762, 762, 763, 764, 765, 766, 766, 767, 768, + 769, 770, 771, 771, 772, 773, 774, 775, 775, 776, 777, 778, 779, 779, 780, 781, + 782, 783, 783, 784, 785, 786, 787, 788, 788, 789, 790, 791, 792, 793, 793, 794, + 795, 796, 797, 797, 798, 799, 800, 801, 802, 802, 803, 804, 805, 806, 807, 807, + 808, 809, 810, 811, 812, 812, 813, 814, 815, 816, 817, 817, 818, 819, 820, 821, + 822, 822, 823, 824, 825, 826, 827, 827, 828, 829, 830, 831, 832, 832, 833, 834, + 835, 836, 837, 838, 838, 839, 840, 841, 842, 843, 843, 844, 845, 846, 847, 848, + 849, 849, 850, 851, 852, 853, 854, 855, 855, 856, 857, 858, 859, 860, 861, 861, + 862, 863, 864, 865, 866, 867, 867, 868, 869, 870, 871, 872, 873, 873, 874, 875, + 876, 877, 878, 879, 880, 880, 881, 882, 883, 884, 885, 886, 887, 887, 888, 889, + 890, 891, 892, 893, 894, 894, 895, 896, 897, 898, 899, 900, 901, 901, 902, 903, + 904, 905, 906, 907, 908, 909, 909, 910, 911, 912, 913, 914, 915, 916, 916, 917, + 918, 919, 920, 921, 922, 923, 924, 925, 925, 926, 927, 928, 929, 930, 931, 932, + 933, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 942, 943, 944, 945, 946, + 947, 948, 949, 950, 951, 952, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, + 962, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 973, 974, 975, + 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 985, 986, 987, 988, 989, 990, + 991, 992, 993, 994, 995, 996, 997, 998, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, + 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1030, 1031, 1032, 1033, 1034, 1035, + 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1050, + 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, + 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1078, 1079, 1080, 1081, + 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, + 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, + 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, + 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, + 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, + 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, + 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1193, 1194, + 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, + 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1223, 1224, 1225, 1226, 1227, + 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, + 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1262, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, + 1278, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1295, + 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1309, 1310, 1311, 1312, + 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, + 1330, 1331, 1332, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1345, 1346, 1347, + 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, + 1365, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1377, 1378, 1379, 1380, 1381, 1382, + 1383, 1384, 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1396, 1397, 1398, 1399, 1400, + 1401, 1402, 1403, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1414, 1415, 1416, 1417, 1418, + 1419, 1420, 1421, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436, + 1437, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1448, 1449, 1450, 1451, 1452, 1453, 1455, + 1456, 1457, 1458, 1459, 1460, 1461, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1471, 1472, 1473, + 1474, 1475, 1476, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1486, 1487, 1488, 1489, 1490, 1491, + 1493, 1494, 1495, 1496, 1497, 1498, 1500, 1501, 1502, 1503, 1504, 1505, 1507, 1508, 1509, 1510, + 1511, 1512, 1514, 1515, 1516, 1517, 1518, 1519, 1521, 1522, 1523, 1524, 1525, 1527, 1528, 1529, + 1530, 1531, 1532, 1534, 1535, 1536, 1537, 1538, 1540, 1541, 1542, 1543, 1544, 1545, 1547, 1548, + 1549, 1550, 1551, 1553, 1554, 1555, 1556, 1557, 1559, 1560, 1561, 1562, 1563, 1564, 1566, 1567, + 1568, 1569, 1570, 1572, 1573, 1574, 1575, 1576, 1578, 1579, 1580, 1581, 1582, 1584, 1585, 1586, + 1587, 1588, 1590, 1591, 1592, 1593, 1594, 1596, 1597, 1598, 1599, 1601, 1602, 1603, 1604, 1605, + 1607, 1608, 1609, 1610, 1611, 1613, 1614, 1615, 1616, 1617, 1619, 1620, 1621, 1622, 1624, 1625, + 1626, 1627, 1628, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1641, 1642, 1643, 1644, + 1646, 1647, 1648, 1649, 1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1660, 1662, 1663, 1664, + 1665, 1667, 1668, 1669, 1670, 1671, 1673, 1674, 1675, 1676, 1678, 1679, 1680, 1681, 1683, 1684, + 1685, 1686, 1688, 1689, 1690, 1691, 1693, 1694, 1695, 1696, 1698, 1699, 1700, 1701, 1703, 1704, + 1705, 1706, 1708, 1709, 1710, 1711, 1713, 1714, 1715, 1716, 1718, 1719, 1720, 1721, 1723, 1724, + 1725, 1726, 1728, 1729, 1730, 1731, 1733, 1734, 1735, 1737, 1738, 1739, 1740, 1742, 1743, 1744, + 1745, 1747, 1748, 1749, 1750, 1752, 1753, 1754, 1756, 1757, 1758, 1759, 1761, 1762, 1763, 1764, + 1766, 1767, 1768, 1770, 1771, 1772, 1773, 1775, 1776, 1777, 1778, 1780, 1781, 1782, 1784, 1785, + 1786, 1787, 1789, 1790, 1791, 1793, 1794, 1795, 1796, 1798, 1799, 1800, 1802, 1803, 1804, 1806, + 1807, 1808, 1809, 1811, 1812, 1813, 1815, 1816, 1817, 1818, 1820, 1821, 1822, 1824, 1825, 1826, + 1828, 1829, 1830, 1831, 1833, 1834, 1835, 1837, 1838, 1839, 1841, 1842, 1843, 1844, 1846, 1847, + 1848, 1850, 1851, 1852, 1854, 1855, 1856, 1858, 1859, 1860, 1862, 1863, 1864, 1865, 1867, 1868, + 1869, 1871, 1872, 1873, 1875, 1876, 1877, 1879, 1880, 1881, 1883, 1884, 1885, 1887, 1888, 1889, + 1891, 1892, 1893, 1894, 1896, 1897, 1898, 1900, 1901, 1902, 1904, 1905, 1906, 1908, 1909, 1910, + 1912, 1913, 1914, 1916, 1917, 1918, 1920, 1921, 1922, 1924, 1925, 1926, 1928, 1929, 1930, 1932, + 1933, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1947, 1948, 1949, 1951, 1952, 1953, + 1955, 1956, 1957, 1959, 1960, 1961, 1963, 1964, 1965, 1967, 1968, 1970, 1971, 1972, 1974, 1975, + 1976, 1978, 1979, 1980, 1982, 1983, 1984, 1986, 1987, 1989, 1990, 1991, 1993, 1994, 1995, 1997, + 1998, 1999, 2001, 2002, 2004, 2005, 2006, 2008, 2009, 2010, 2012, 2013, 2015, 2016, 2017, 2019, + 2020, 2021, 2023, 2024, 2026, 2027, 2028, 2030, 2031, 2032, 2034, 2035, 2037, 2038, 2039, 2041, + 2042, 2043, 2045, 2046, 2048, 2049, 2050, 2052, 2053, 2055, 2056, 2057, 2059, 2060, 2061, 2063, + 2064, 2066, 2067, 2068, 2070, 2071, 2073, 2074, 2075, 2077, 2078, 2080, 2081, 2082, 2084, 2085, + 2087, 2088, 2089, 2091, 2092, 2094, 2095, 2096, 2098, 2099, 2101, 2102, 2103, 2105, 2106, 2108, + 2109, 2110, 2112, 2113, 2115, 2116, 2117, 2119, 2120, 2122, 2123, 2124, 2126, 2127, 2129, 2130, + 2132, 2133, 2134, 2136, 2137, 2139, 2140, 2141, 2143, 2144, 2146, 2147, 2149, 2150, 2151, 2153, + 2154, 2156, 2157, 2159, 2160, 2161, 2163, 2164, 2166, 2167, 2169, 2170, 2171, 2173, 2174, 2176, + 2177, 2179, 2180, 2181, 2183, 2184, 2186, 2187, 2189, 2190, 2191, 2193, 2194, 2196, 2197, 2199, + 2200, 2202, 2203, 2204, 2206, 2207, 2209, 2210, 2212, 2213, 2214, 2216, 2217, 2219, 2220, 2222, + 2223, 2225, 2226, 2228, 2229, 2230, 2232, 2233, 2235, 2236, 2238, 2239, 2241, 2242, 2243, 2245, + 2246, 2248, 2249, 2251, 2252, 2254, 2255, 2257, 2258, 2260, 2261, 2262, 2264, 2265, 2267, 2268, + 2270, 2271, 2273, 2274, 2276, 2277, 2279, 2280, 2282, 2283, 2284, 2286, 2287, 2289, 2290, 2292, + 2293, 2295, 2296, 2298, 2299, 2301, 2302, 2304, 2305, 2307, 2308, 2310, 2311, 2312, 2314, 2315, + 2317, 2318, 2320, 2321, 2323, 2324, 2326, 2327, 2329, 2330, 2332, 2333, 2335, 2336, 2338, 2339, + 2341, 2342, 2344, 2345, 2347, 2348, 2350, 2351, 2353, 2354, 2356, 2357, 2359, 2360, 2362, 2363, + 2365, 2366, 2368, 2369, 2371, 2372, 2374, 2375, 2377, 2378, 2380, 2381, 2383, 2384, 2386, 2387, + 2389, 2390, 2392, 2393, 2395, 2396, 2398, 2399, 2401, 2402, 2404, 2405, 2407, 2408, 2410, 2411, + 2413, 2414, 2416, 2417, 2419, 2420, 2422, 2423, 2425, 2426, 2428, 2429, 2431, 2433, 2434, 2436, + 2437, 2439, 2440, 2442, 2443, 2445, 2446, 2448, 2449, 2451, 2452, 2454, 2455, 2457, 2458, 2460, + 2462, 2463, 2465, 2466, 2468, 2469, 2471, 2472, 2474, 2475, 2477, 2478, 2480, 2481, 2483, 2485, + 2486, 2488, 2489, 2491, 2492, 2494, 2495, 2497, 2498, 2500, 2502, 2503, 2505, 2506, 2508, 2509, + 2511, 2512, 2514, 2515, 2517, 2519, 2520, 2522, 2523, 2525, 2526, 2528, 2529, 2531, 2533, 2534, + 2536, 2537, 2539, 2540, 2542, 2543, 2545, 2547, 2548, 2550, 2551, 2553, 2554, 2556, 2557, 2559, + 2561, 2562, 2564, 2565, 2567, 2568, 2570, 2572, 2573, 2575, 2576, 2578, 2579, 2581, 2583, 2584, + 2586, 2587, 2589, 2590, 2592, 2594, 2595, 2597, 2598, 2600, 2601, 2603, 2605, 2606, 2608, 2609, + 2611, 2613, 2614, 2616, 2617, 2619, 2620, 2622, 2624, 2625, 2627, 2628, 2630, 2632, 2633, 2635, + 2636, 2638, 2640, 2641, 2643, 2644, 2646, 2647, 2649, 2651, 2652, 2654, 2655, 2657, 2659, 2660, + 2662, 2663, 2665, 2667, 2668, 2670, 2671, 2673, 2675, 2676, 2678, 2679, 2681, 2683, 2684, 2686, + 2687, 2689, 2691, 2692, 2694, 2696, 2697, 2699, 2700, 2702, 2704, 2705, 2707, 2708, 2710, 2712, + 2713, 2715, 2716, 2718, 2720, 2721, 2723, 2725, 2726, 2728, 2729, 2731, 2733, 2734, 2736, 2738, + 2739, 2741, 2742, 2744, 2746, 2747, 2749, 2751, 2752, 2754, 2755, 2757, 2759, 2760, 2762, 2764, + 2765, 2767, 2769, 2770, 2772, 2773, 2775, 2777, 2778, 2780, 2782, 2783, 2785, 2787, 2788, 2790, + 2791, 2793, 2795, 2796, 2798, 2800, 2801, 2803, 2805, 2806, 2808, 2810, 2811, 2813, 2814, 2816, + 2818, 2819, 2821, 2823, 2824, 2826, 2828, 2829, 2831, 2833, 2834, 2836, 2838, 2839, 2841, 2843, + 2844, 2846, 2848, 2849, 2851, 2853, 2854, 2856, 2857, 2859, 2861, 2862, 2864, 2866, 2867, 2869, + 2871, 2872, 2874, 2876, 2877, 2879, 2881, 2882, 2884, 2886, 2888, 2889, 2891, 2893, 2894, 2896, + 2898, 2899, 2901, 2903, 2904, 2906, 2908, 2909, 2911, 2913, 2914, 2916, 2918, 2919, 2921, 2923, + 2924, 2926, 2928, 2929, 2931, 2933, 2935, 2936, 2938, 2940, 2941, 2943, 2945, 2946, 2948, 2950, + 2951, 2953, 2955, 2956, 2958, 2960, 2962, 2963, 2965, 2967, 2968, 2970, 2972, 2973, 2975, 2977, + 2979, 2980, 2982, 2984, 2985, 2987, 2989, 2990, 2992, 2994, 2996, 2997, 2999, 3001, 3002, 3004, + 3006, 3008, 3009, 3011, 3013, 3014, 3016, 3018, 3020, 3021, 3023, 3025, 3026, 3028, 3030, 3032, + 3033, 3035, 3037, 3038, 3040, 3042, 3044, 3045, 3047, 3049, 3050, 3052, 3054, 3056, 3057, 3059, + 3061, 3063, 3064, 3066, 3068, 3069, 3071, 3073, 3075, 3076, 3078, 3080, 3082, 3083, 3085, 3087, + 3089, 3090, 3092, 3094, 3095, 3097, 3099, 3101, 3102, 3104, 3106, 3108, 3109, 3111, 3113, 3115, + 3116, 3118, 3120, 3122, 3123, 3125, 3127, 3129, 3130, 3132, 3134, 3136, 3137, 3139, 3141, 3143, + 3144, 3146, 3148, 3150, 3151, 3153, 3155, 3157, 3158, 3160, 3162, 3164, 3165, 3167, 3169, 3171, + 3172, 3174, 3176, 3178, 3179, 3181, 3183, 3185, 3187, 3188, 3190, 3192, 3194, 3195, 3197, 3199, + 3201, 3202, 3204, 3206, 3208, 3209, 3211, 3213, 3215, 3217, 3218, 3220, 3222, 3224, 3225, 3227, + 3229, 3231, 3233, 3234, 3236, 3238, 3240, 3241, 3243, 3245, 3247, 3249, 3250, 3252, 3254, 3256, + 3258, 3259, 3261, 3263, 3265, 3266, 3268, 3270, 3272, 3274, 3275, 3277, 3279, 3281, 3283, 3284, + 3286, 3288, 3290, 3292, 3293, 3295, 3297, 3299, 3301, 3302, 3304, 3306, 3308, 3310, 3311, 3313, + 3315, 3317, 3319, 3320, 3322, 3324, 3326, 3328, 3329, 3331, 3333, 3335, 3337, 3338, 3340, 3342, + 3344, 3346, 3348, 3349, 3351, 3353, 3355, 3357, 3358, 3360, 3362, 3364, 3366, 3368, 3369, 3371, + 3373, 3375, 3377, 3378, 3380, 3382, 3384, 3386, 3388, 3389, 3391, 3393, 3395, 3397, 3399, 3400, + 3402, 3404, 3406, 3408, 3410, 3411, 3413, 3415, 3417, 3419, 3421, 3422, 3424, 3426, 3428, 3430, + 3432, 3433, 3435, 3437, 3439, 3441, 3443, 3444, 3446, 3448, 3450, 3452, 3454, 3455, 3457, 3459, + 3461, 3463, 3465, 3467, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3481, 3483, 3485, 3487, 3489, + 3491, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3507, 3509, 3511, 3513, 3515, 3517, 3519, + 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3548, + 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3578, + 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, + 3611, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3629, 3631, 3633, 3635, 3637, 3639, + 3641, 3643, 3645, 3647, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3667, 3669, + 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3688, 3690, 3692, 3694, 3696, 3698, 3700, + 3702, 3704, 3706, 3708, 3710, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731, + 3733, 3735, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762, + 3764, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793, + 3795, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824, + 3826, 3828, 3830, 3832, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855, + 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3884, 3886, + 3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918, + 3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950, + 3952, 3954, 3956, 3958, 3960, 3962, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982, + 3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014, + 4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046, + 4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078, + 4080, +}; + +/* Generated table */ +const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = { + 0, 5, 9, 14, 18, 22, 27, 32, 36, 41, 45, 50, 54, 59, 63, 68, + 72, 77, 81, 86, 90, 95, 99, 104, 108, 113, 117, 122, 126, 131, 135, 139, + 144, 149, 153, 158, 162, 167, 171, 176, 180, 185, 189, 194, 198, 203, 207, 212, + 216, 221, 225, 230, 234, 239, 243, 248, 252, 257, 261, 266, 270, 275, 279, 284, + 288, 293, 297, 302, 306, 311, 315, 320, 324, 328, 334, 338, 343, 347, 352, 356, + 360, 365, 369, 373, 377, 381, 386, 390, 394, 398, 402, 406, 410, 414, 418, 422, + 426, 430, 433, 437, 441, 445, 449, 452, 456, 460, 464, 467, 471, 475, 478, 482, + 485, 489, 492, 496, 499, 503, 506, 510, 513, 517, 520, 524, 527, 530, 534, 537, + 540, 544, 547, 550, 554, 557, 560, 563, 566, 570, 573, 576, 579, 582, 586, 589, + 592, 595, 598, 601, 604, 607, 610, 613, 616, 619, 622, 625, 628, 631, 634, 637, + 640, 643, 646, 649, 652, 655, 658, 660, 663, 666, 669, 672, 675, 677, 680, 683, + 686, 689, 691, 694, 697, 700, 702, 705, 708, 711, 713, 716, 719, 721, 724, 727, + 729, 732, 735, 737, 740, 743, 745, 748, 750, 753, 756, 758, 761, 763, 766, 768, + 771, 773, 776, 779, 781, 784, 786, 789, 791, 794, 796, 799, 801, 803, 806, 808, + 811, 813, 816, 818, 821, 823, 825, 828, 830, 833, 835, 837, 840, 842, 844, 847, + 849, 851, 854, 856, 858, 861, 863, 865, 868, 870, 872, 875, 877, 879, 881, 884, + 886, 888, 891, 893, 895, 897, 900, 902, 904, 906, 908, 911, 913, 915, 917, 919, + 922, 924, 926, 928, 930, 933, 935, 937, 939, 941, 943, 946, 948, 950, 952, 954, + 956, 958, 960, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 984, 986, 988, + 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, + 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1040, 1042, 1044, 1046, 1048, 1050, 1052, + 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, + 1085, 1087, 1089, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1107, 1109, 1111, 1113, + 1115, 1117, 1119, 1120, 1122, 1124, 1126, 1128, 1130, 1131, 1133, 1135, 1137, 1139, 1141, 1142, + 1144, 1146, 1148, 1150, 1151, 1153, 1155, 1157, 1159, 1160, 1162, 1164, 1166, 1168, 1169, 1171, + 1173, 1175, 1176, 1178, 1180, 1182, 1184, 1185, 1187, 1189, 1191, 1192, 1194, 1196, 1198, 1199, + 1201, 1203, 1204, 1206, 1208, 1210, 1211, 1213, 1215, 1217, 1218, 1220, 1222, 1223, 1225, 1227, + 1228, 1230, 1232, 1234, 1235, 1237, 1239, 1240, 1242, 1244, 1245, 1247, 1249, 1250, 1252, 1254, + 1255, 1257, 1259, 1260, 1262, 1264, 1265, 1267, 1269, 1270, 1272, 1274, 1275, 1277, 1279, 1280, + 1282, 1283, 1285, 1287, 1288, 1290, 1292, 1293, 1295, 1296, 1298, 1300, 1301, 1303, 1305, 1306, + 1308, 1309, 1311, 1313, 1314, 1316, 1317, 1319, 1321, 1322, 1324, 1325, 1327, 1328, 1330, 1332, + 1333, 1335, 1336, 1338, 1339, 1341, 1343, 1344, 1346, 1347, 1349, 1350, 1352, 1354, 1355, 1357, + 1358, 1360, 1361, 1363, 1364, 1366, 1367, 1369, 1371, 1372, 1374, 1375, 1377, 1378, 1380, 1381, + 1383, 1384, 1386, 1387, 1389, 1390, 1392, 1393, 1395, 1396, 1398, 1399, 1401, 1402, 1404, 1405, + 1407, 1408, 1410, 1411, 1413, 1414, 1416, 1417, 1419, 1420, 1422, 1423, 1425, 1426, 1428, 1429, + 1431, 1432, 1434, 1435, 1437, 1438, 1440, 1441, 1442, 1444, 1445, 1447, 1448, 1450, 1451, 1453, + 1454, 1456, 1457, 1458, 1460, 1461, 1463, 1464, 1466, 1467, 1469, 1470, 1471, 1473, 1474, 1476, + 1477, 1479, 1480, 1481, 1483, 1484, 1486, 1487, 1489, 1490, 1491, 1493, 1494, 1496, 1497, 1498, + 1500, 1501, 1503, 1504, 1505, 1507, 1508, 1510, 1511, 1512, 1514, 1515, 1517, 1518, 1519, 1521, + 1522, 1524, 1525, 1526, 1528, 1529, 1531, 1532, 1533, 1535, 1536, 1537, 1539, 1540, 1542, 1543, + 1544, 1546, 1547, 1548, 1550, 1551, 1553, 1554, 1555, 1557, 1558, 1559, 1561, 1562, 1563, 1565, + 1566, 1567, 1569, 1570, 1571, 1573, 1574, 1576, 1577, 1578, 1580, 1581, 1582, 1584, 1585, 1586, + 1588, 1589, 1590, 1592, 1593, 1594, 1596, 1597, 1598, 1600, 1601, 1602, 1603, 1605, 1606, 1607, + 1609, 1610, 1611, 1613, 1614, 1615, 1617, 1618, 1619, 1621, 1622, 1623, 1624, 1626, 1627, 1628, + 1630, 1631, 1632, 1634, 1635, 1636, 1637, 1639, 1640, 1641, 1643, 1644, 1645, 1647, 1648, 1649, + 1650, 1652, 1653, 1654, 1655, 1657, 1658, 1659, 1661, 1662, 1663, 1664, 1666, 1667, 1668, 1670, + 1671, 1672, 1673, 1675, 1676, 1677, 1678, 1680, 1681, 1682, 1683, 1685, 1686, 1687, 1688, 1690, + 1691, 1692, 1693, 1695, 1696, 1697, 1698, 1700, 1701, 1702, 1703, 1705, 1706, 1707, 1708, 1710, + 1711, 1712, 1713, 1715, 1716, 1717, 1718, 1720, 1721, 1722, 1723, 1724, 1726, 1727, 1728, 1729, + 1731, 1732, 1733, 1734, 1736, 1737, 1738, 1739, 1740, 1742, 1743, 1744, 1745, 1746, 1748, 1749, + 1750, 1751, 1753, 1754, 1755, 1756, 1757, 1759, 1760, 1761, 1762, 1763, 1765, 1766, 1767, 1768, + 1769, 1771, 1772, 1773, 1774, 1775, 1777, 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, 1787, + 1788, 1790, 1791, 1792, 1793, 1794, 1796, 1797, 1798, 1799, 1800, 1801, 1803, 1804, 1805, 1806, + 1807, 1809, 1810, 1811, 1812, 1813, 1814, 1816, 1817, 1818, 1819, 1820, 1821, 1823, 1824, 1825, + 1826, 1827, 1828, 1829, 1831, 1832, 1833, 1834, 1835, 1836, 1838, 1839, 1840, 1841, 1842, 1843, + 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1861, 1862, + 1863, 1864, 1865, 1866, 1867, 1868, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1878, 1879, 1880, + 1881, 1882, 1883, 1884, 1885, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1896, 1897, 1898, + 1899, 1900, 1901, 1902, 1903, 1904, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1916, + 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1927, 1928, 1929, 1930, 1931, 1932, 1933, + 1934, 1935, 1936, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1950, 1951, + 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1963, 1964, 1965, 1966, 1967, 1968, + 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, + 1986, 1987, 1988, 1989, 1990, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, + 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, + 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2031, 2032, 2033, 2034, 2035, 2036, + 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, + 2053, 2054, 2055, 2056, 2057, 2058, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, + 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, + 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, + 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, + 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, + 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, + 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, + 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, + 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, + 2197, 2198, 2199, 2200, 2201, 2202, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, + 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2224, 2225, 2226, + 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2241, + 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, + 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2271, + 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2283, 2284, 2285, 2286, + 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2295, 2296, 2297, 2298, 2299, 2300, 2301, + 2302, 2303, 2304, 2305, 2306, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316, + 2317, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2327, 2328, 2329, 2330, + 2331, 2332, 2333, 2334, 2335, 2336, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, + 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2354, 2355, 2356, 2357, 2358, 2359, + 2360, 2361, 2362, 2363, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2371, 2372, 2373, + 2374, 2375, 2376, 2377, 2378, 2379, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2386, 2387, + 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2401, + 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2415, + 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2428, 2429, + 2430, 2431, 2432, 2433, 2434, 2435, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2441, 2442, 2443, + 2444, 2445, 2446, 2447, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2453, 2454, 2455, 2456, 2457, + 2458, 2459, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2465, 2466, 2467, 2468, 2469, 2470, 2471, + 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2477, 2478, 2479, 2480, 2481, 2482, 2482, 2483, 2484, + 2485, 2486, 2487, 2488, 2488, 2489, 2490, 2491, 2492, 2493, 2493, 2494, 2495, 2496, 2497, 2498, + 2499, 2499, 2500, 2501, 2502, 2503, 2504, 2504, 2505, 2506, 2507, 2508, 2509, 2509, 2510, 2511, + 2512, 2513, 2514, 2514, 2515, 2516, 2517, 2518, 2519, 2519, 2520, 2521, 2522, 2523, 2524, 2524, + 2525, 2526, 2527, 2528, 2529, 2529, 2530, 2531, 2532, 2533, 2534, 2534, 2535, 2536, 2537, 2538, + 2539, 2539, 2540, 2541, 2542, 2543, 2544, 2544, 2545, 2546, 2547, 2548, 2548, 2549, 2550, 2551, + 2552, 2553, 2553, 2554, 2555, 2556, 2557, 2558, 2558, 2559, 2560, 2561, 2562, 2562, 2563, 2564, + 2565, 2566, 2567, 2567, 2568, 2569, 2570, 2571, 2571, 2572, 2573, 2574, 2575, 2576, 2576, 2577, + 2578, 2579, 2580, 2580, 2581, 2582, 2583, 2584, 2584, 2585, 2586, 2587, 2588, 2589, 2589, 2590, + 2591, 2592, 2593, 2593, 2594, 2595, 2596, 2597, 2597, 2598, 2599, 2600, 2601, 2601, 2602, 2603, + 2604, 2605, 2605, 2606, 2607, 2608, 2609, 2610, 2610, 2611, 2612, 2613, 2614, 2614, 2615, 2616, + 2617, 2618, 2618, 2619, 2620, 2621, 2622, 2622, 2623, 2624, 2625, 2626, 2626, 2627, 2628, 2629, + 2630, 2630, 2631, 2632, 2633, 2634, 2634, 2635, 2636, 2637, 2637, 2638, 2639, 2640, 2641, 2641, + 2642, 2643, 2644, 2645, 2645, 2646, 2647, 2648, 2649, 2649, 2650, 2651, 2652, 2653, 2653, 2654, + 2655, 2656, 2656, 2657, 2658, 2659, 2660, 2660, 2661, 2662, 2663, 2664, 2664, 2665, 2666, 2667, + 2668, 2668, 2669, 2670, 2671, 2671, 2672, 2673, 2674, 2675, 2675, 2676, 2677, 2678, 2678, 2679, + 2680, 2681, 2682, 2682, 2683, 2684, 2685, 2686, 2686, 2687, 2688, 2689, 2689, 2690, 2691, 2692, + 2693, 2693, 2694, 2695, 2696, 2696, 2697, 2698, 2699, 2700, 2700, 2701, 2702, 2703, 2703, 2704, + 2705, 2706, 2706, 2707, 2708, 2709, 2710, 2710, 2711, 2712, 2713, 2713, 2714, 2715, 2716, 2717, + 2717, 2718, 2719, 2720, 2720, 2721, 2722, 2723, 2723, 2724, 2725, 2726, 2727, 2727, 2728, 2729, + 2730, 2730, 2731, 2732, 2733, 2733, 2734, 2735, 2736, 2736, 2737, 2738, 2739, 2740, 2740, 2741, + 2742, 2743, 2743, 2744, 2745, 2746, 2746, 2747, 2748, 2749, 2749, 2750, 2751, 2752, 2752, 2753, + 2754, 2755, 2755, 2756, 2757, 2758, 2759, 2759, 2760, 2761, 2762, 2762, 2763, 2764, 2765, 2765, + 2766, 2767, 2768, 2768, 2769, 2770, 2771, 2771, 2772, 2773, 2774, 2774, 2775, 2776, 2777, 2777, + 2778, 2779, 2780, 2780, 2781, 2782, 2783, 2783, 2784, 2785, 2786, 2786, 2787, 2788, 2789, 2789, + 2790, 2791, 2792, 2792, 2793, 2794, 2795, 2795, 2796, 2797, 2798, 2798, 2799, 2800, 2801, 2801, + 2802, 2803, 2804, 2804, 2805, 2806, 2807, 2807, 2808, 2809, 2810, 2810, 2811, 2812, 2813, 2813, + 2814, 2815, 2815, 2816, 2817, 2818, 2818, 2819, 2820, 2821, 2821, 2822, 2823, 2824, 2824, 2825, + 2826, 2827, 2827, 2828, 2829, 2830, 2830, 2831, 2832, 2832, 2833, 2834, 2835, 2835, 2836, 2837, + 2838, 2838, 2839, 2840, 2841, 2841, 2842, 2843, 2844, 2844, 2845, 2846, 2846, 2847, 2848, 2849, + 2849, 2850, 2851, 2852, 2852, 2853, 2854, 2855, 2855, 2856, 2857, 2857, 2858, 2859, 2860, 2860, + 2861, 2862, 2863, 2863, 2864, 2865, 2865, 2866, 2867, 2868, 2868, 2869, 2870, 2871, 2871, 2872, + 2873, 2873, 2874, 2875, 2876, 2876, 2877, 2878, 2879, 2879, 2880, 2881, 2881, 2882, 2883, 2884, + 2884, 2885, 2886, 2886, 2887, 2888, 2889, 2889, 2890, 2891, 2892, 2892, 2893, 2894, 2894, 2895, + 2896, 2897, 2897, 2898, 2899, 2899, 2900, 2901, 2902, 2902, 2903, 2904, 2904, 2905, 2906, 2907, + 2907, 2908, 2909, 2909, 2910, 2911, 2912, 2912, 2913, 2914, 2914, 2915, 2916, 2917, 2917, 2918, + 2919, 2919, 2920, 2921, 2922, 2922, 2923, 2924, 2924, 2925, 2926, 2927, 2927, 2928, 2929, 2929, + 2930, 2931, 2932, 2932, 2933, 2934, 2934, 2935, 2936, 2937, 2937, 2938, 2939, 2939, 2940, 2941, + 2941, 2942, 2943, 2944, 2944, 2945, 2946, 2946, 2947, 2948, 2949, 2949, 2950, 2951, 2951, 2952, + 2953, 2953, 2954, 2955, 2956, 2956, 2957, 2958, 2958, 2959, 2960, 2961, 2961, 2962, 2963, 2963, + 2964, 2965, 2965, 2966, 2967, 2968, 2968, 2969, 2970, 2970, 2971, 2972, 2972, 2973, 2974, 2975, + 2975, 2976, 2977, 2977, 2978, 2979, 2979, 2980, 2981, 2982, 2982, 2983, 2984, 2984, 2985, 2986, + 2986, 2987, 2988, 2988, 2989, 2990, 2991, 2991, 2992, 2993, 2993, 2994, 2995, 2995, 2996, 2997, + 2998, 2998, 2999, 3000, 3000, 3001, 3002, 3002, 3003, 3004, 3004, 3005, 3006, 3006, 3007, 3008, + 3009, 3009, 3010, 3011, 3011, 3012, 3013, 3013, 3014, 3015, 3015, 3016, 3017, 3018, 3018, 3019, + 3020, 3020, 3021, 3022, 3022, 3023, 3024, 3024, 3025, 3026, 3026, 3027, 3028, 3029, 3029, 3030, + 3031, 3031, 3032, 3033, 3033, 3034, 3035, 3035, 3036, 3037, 3037, 3038, 3039, 3039, 3040, 3041, + 3042, 3042, 3043, 3044, 3044, 3045, 3046, 3046, 3047, 3048, 3048, 3049, 3050, 3050, 3051, 3052, + 3052, 3053, 3054, 3054, 3055, 3056, 3056, 3057, 3058, 3059, 3059, 3060, 3061, 3061, 3062, 3063, + 3063, 3064, 3065, 3065, 3066, 3067, 3067, 3068, 3069, 3069, 3070, 3071, 3071, 3072, 3073, 3073, + 3074, 3075, 3075, 3076, 3077, 3077, 3078, 3079, 3079, 3080, 3081, 3081, 3082, 3083, 3084, 3084, + 3085, 3086, 3086, 3087, 3088, 3088, 3089, 3090, 3090, 3091, 3092, 3092, 3093, 3094, 3094, 3095, + 3096, 3096, 3097, 3098, 3098, 3099, 3100, 3100, 3101, 3102, 3102, 3103, 3104, 3104, 3105, 3106, + 3106, 3107, 3108, 3108, 3109, 3110, 3110, 3111, 3112, 3112, 3113, 3114, 3114, 3115, 3116, 3116, + 3117, 3118, 3118, 3119, 3120, 3120, 3121, 3122, 3122, 3123, 3124, 3124, 3125, 3126, 3126, 3127, + 3128, 3128, 3129, 3130, 3130, 3131, 3132, 3132, 3133, 3134, 3134, 3135, 3135, 3136, 3137, 3137, + 3138, 3139, 3139, 3140, 3141, 3141, 3142, 3143, 3143, 3144, 3145, 3145, 3146, 3147, 3147, 3148, + 3149, 3149, 3150, 3151, 3151, 3152, 3153, 3153, 3154, 3155, 3155, 3156, 3157, 3157, 3158, 3159, + 3159, 3160, 3160, 3161, 3162, 3162, 3163, 3164, 3164, 3165, 3166, 3166, 3167, 3168, 3168, 3169, + 3170, 3170, 3171, 3172, 3172, 3173, 3174, 3174, 3175, 3175, 3176, 3177, 3177, 3178, 3179, 3179, + 3180, 3181, 3181, 3182, 3183, 3183, 3184, 3185, 3185, 3186, 3187, 3187, 3188, 3188, 3189, 3190, + 3190, 3191, 3192, 3192, 3193, 3194, 3194, 3195, 3196, 3196, 3197, 3198, 3198, 3199, 3199, 3200, + 3201, 3201, 3202, 3203, 3203, 3204, 3205, 3205, 3206, 3207, 3207, 3208, 3209, 3209, 3210, 3210, + 3211, 3212, 3212, 3213, 3214, 3214, 3215, 3216, 3216, 3217, 3218, 3218, 3219, 3219, 3220, 3221, + 3221, 3222, 3223, 3223, 3224, 3225, 3225, 3226, 3227, 3227, 3228, 3228, 3229, 3230, 3230, 3231, + 3232, 3232, 3233, 3234, 3234, 3235, 3235, 3236, 3237, 3237, 3238, 3239, 3239, 3240, 3241, 3241, + 3242, 3242, 3243, 3244, 3244, 3245, 3246, 3246, 3247, 3248, 3248, 3249, 3249, 3250, 3251, 3251, + 3252, 3253, 3253, 3254, 3255, 3255, 3256, 3256, 3257, 3258, 3258, 3259, 3260, 3260, 3261, 3262, + 3262, 3263, 3263, 3264, 3265, 3265, 3266, 3267, 3267, 3268, 3268, 3269, 3270, 3270, 3271, 3272, + 3272, 3273, 3274, 3274, 3275, 3275, 3276, 3277, 3277, 3278, 3279, 3279, 3280, 3280, 3281, 3282, + 3282, 3283, 3284, 3284, 3285, 3285, 3286, 3287, 3287, 3288, 3289, 3289, 3290, 3290, 3291, 3292, + 3292, 3293, 3294, 3294, 3295, 3295, 3296, 3297, 3297, 3298, 3299, 3299, 3300, 3300, 3301, 3302, + 3302, 3303, 3304, 3304, 3305, 3305, 3306, 3307, 3307, 3308, 3309, 3309, 3310, 3310, 3311, 3312, + 3312, 3313, 3314, 3314, 3315, 3315, 3316, 3317, 3317, 3318, 3319, 3319, 3320, 3320, 3321, 3322, + 3322, 3323, 3323, 3324, 3325, 3325, 3326, 3327, 3327, 3328, 3328, 3329, 3330, 3330, 3331, 3332, + 3332, 3333, 3333, 3334, 3335, 3335, 3336, 3336, 3337, 3338, 3338, 3339, 3340, 3340, 3341, 3341, + 3342, 3343, 3343, 3344, 3345, 3345, 3346, 3346, 3347, 3348, 3348, 3349, 3349, 3350, 3351, 3351, + 3352, 3352, 3353, 3354, 3354, 3355, 3356, 3356, 3357, 3357, 3358, 3359, 3359, 3360, 3360, 3361, + 3362, 3362, 3363, 3364, 3364, 3365, 3365, 3366, 3367, 3367, 3368, 3368, 3369, 3370, 3370, 3371, + 3371, 3372, 3373, 3373, 3374, 3375, 3375, 3376, 3376, 3377, 3378, 3378, 3379, 3379, 3380, 3381, + 3381, 3382, 3382, 3383, 3384, 3384, 3385, 3385, 3386, 3387, 3387, 3388, 3389, 3389, 3390, 3390, + 3391, 3392, 3392, 3393, 3393, 3394, 3395, 3395, 3396, 3396, 3397, 3398, 3398, 3399, 3399, 3400, + 3401, 3401, 3402, 3402, 3403, 3404, 3404, 3405, 3405, 3406, 3407, 3407, 3408, 3408, 3409, 3410, + 3410, 3411, 3411, 3412, 3413, 3413, 3414, 3414, 3415, 3416, 3416, 3417, 3418, 3418, 3419, 3419, + 3420, 3421, 3421, 3422, 3422, 3423, 3424, 3424, 3425, 3425, 3426, 3427, 3427, 3428, 3428, 3429, + 3430, 3430, 3431, 3431, 3432, 3433, 3433, 3434, 3434, 3435, 3435, 3436, 3437, 3437, 3438, 3438, + 3439, 3440, 3440, 3441, 3441, 3442, 3443, 3443, 3444, 3444, 3445, 3446, 3446, 3447, 3447, 3448, + 3449, 3449, 3450, 3450, 3451, 3452, 3452, 3453, 3453, 3454, 3455, 3455, 3456, 3456, 3457, 3458, + 3458, 3459, 3459, 3460, 3461, 3461, 3462, 3462, 3463, 3463, 3464, 3465, 3465, 3466, 3466, 3467, + 3468, 3468, 3469, 3469, 3470, 3471, 3471, 3472, 3472, 3473, 3474, 3474, 3475, 3475, 3476, 3476, + 3477, 3478, 3478, 3479, 3479, 3480, 3481, 3481, 3482, 3482, 3483, 3484, 3484, 3485, 3485, 3486, + 3486, 3487, 3488, 3488, 3489, 3489, 3490, 3491, 3491, 3492, 3492, 3493, 3494, 3494, 3495, 3495, + 3496, 3496, 3497, 3498, 3498, 3499, 3499, 3500, 3501, 3501, 3502, 3502, 3503, 3504, 3504, 3505, + 3505, 3506, 3506, 3507, 3508, 3508, 3509, 3509, 3510, 3511, 3511, 3512, 3512, 3513, 3513, 3514, + 3515, 3515, 3516, 3516, 3517, 3518, 3518, 3519, 3519, 3520, 3520, 3521, 3522, 3522, 3523, 3523, + 3524, 3525, 3525, 3526, 3526, 3527, 3527, 3528, 3529, 3529, 3530, 3530, 3531, 3531, 3532, 3533, + 3533, 3534, 3534, 3535, 3536, 3536, 3537, 3537, 3538, 3538, 3539, 3540, 3540, 3541, 3541, 3542, + 3542, 3543, 3544, 3544, 3545, 3545, 3546, 3547, 3547, 3548, 3548, 3549, 3549, 3550, 3551, 3551, + 3552, 3552, 3553, 3553, 3554, 3555, 3555, 3556, 3556, 3557, 3557, 3558, 3559, 3559, 3560, 3560, + 3561, 3561, 3562, 3563, 3563, 3564, 3564, 3565, 3566, 3566, 3567, 3567, 3568, 3568, 3569, 3570, + 3570, 3571, 3571, 3572, 3572, 3573, 3574, 3574, 3575, 3575, 3576, 3576, 3577, 3578, 3578, 3579, + 3579, 3580, 3580, 3581, 3582, 3582, 3583, 3583, 3584, 3584, 3585, 3586, 3586, 3587, 3587, 3588, + 3588, 3589, 3590, 3590, 3591, 3591, 3592, 3592, 3593, 3594, 3594, 3595, 3595, 3596, 3596, 3597, + 3597, 3598, 3599, 3599, 3600, 3600, 3601, 3601, 3602, 3603, 3603, 3604, 3604, 3605, 3605, 3606, + 3607, 3607, 3608, 3608, 3609, 3609, 3610, 3611, 3611, 3612, 3612, 3613, 3613, 3614, 3615, 3615, + 3616, 3616, 3617, 3617, 3618, 3618, 3619, 3620, 3620, 3621, 3621, 3622, 3622, 3623, 3624, 3624, + 3625, 3625, 3626, 3626, 3627, 3627, 3628, 3629, 3629, 3630, 3630, 3631, 3631, 3632, 3633, 3633, + 3634, 3634, 3635, 3635, 3636, 3636, 3637, 3638, 3638, 3639, 3639, 3640, 3640, 3641, 3642, 3642, + 3643, 3643, 3644, 3644, 3645, 3645, 3646, 3647, 3647, 3648, 3648, 3649, 3649, 3650, 3650, 3651, + 3652, 3652, 3653, 3653, 3654, 3654, 3655, 3656, 3656, 3657, 3657, 3658, 3658, 3659, 3659, 3660, + 3661, 3661, 3662, 3662, 3663, 3663, 3664, 3664, 3665, 3666, 3666, 3667, 3667, 3668, 3668, 3669, + 3669, 3670, 3671, 3671, 3672, 3672, 3673, 3673, 3674, 3674, 3675, 3676, 3676, 3677, 3677, 3678, + 3678, 3679, 3679, 3680, 3681, 3681, 3682, 3682, 3683, 3683, 3684, 3684, 3685, 3686, 3686, 3687, + 3687, 3688, 3688, 3689, 3689, 3690, 3691, 3691, 3692, 3692, 3693, 3693, 3694, 3694, 3695, 3695, + 3696, 3697, 3697, 3698, 3698, 3699, 3699, 3700, 3700, 3701, 3702, 3702, 3703, 3703, 3704, 3704, + 3705, 3705, 3706, 3707, 3707, 3708, 3708, 3709, 3709, 3710, 3710, 3711, 3711, 3712, 3713, 3713, + 3714, 3714, 3715, 3715, 3716, 3716, 3717, 3717, 3718, 3719, 3719, 3720, 3720, 3721, 3721, 3722, + 3722, 3723, 3724, 3724, 3725, 3725, 3726, 3726, 3727, 3727, 3728, 3728, 3729, 3730, 3730, 3731, + 3731, 3732, 3732, 3733, 3733, 3734, 3734, 3735, 3736, 3736, 3737, 3737, 3738, 3738, 3739, 3739, + 3740, 3740, 3741, 3742, 3742, 3743, 3743, 3744, 3744, 3745, 3745, 3746, 3746, 3747, 3748, 3748, + 3749, 3749, 3750, 3750, 3751, 3751, 3752, 3752, 3753, 3753, 3754, 3755, 3755, 3756, 3756, 3757, + 3757, 3758, 3758, 3759, 3759, 3760, 3761, 3761, 3762, 3762, 3763, 3763, 3764, 3764, 3765, 3765, + 3766, 3766, 3767, 3768, 3768, 3769, 3769, 3770, 3770, 3771, 3771, 3772, 3772, 3773, 3773, 3774, + 3775, 3775, 3776, 3776, 3777, 3777, 3778, 3778, 3779, 3779, 3780, 3781, 3781, 3782, 3782, 3783, + 3783, 3784, 3784, 3785, 3785, 3786, 3786, 3787, 3787, 3788, 3789, 3789, 3790, 3790, 3791, 3791, + 3792, 3792, 3793, 3793, 3794, 3794, 3795, 3796, 3796, 3797, 3797, 3798, 3798, 3799, 3799, 3800, + 3800, 3801, 3801, 3802, 3802, 3803, 3804, 3804, 3805, 3805, 3806, 3806, 3807, 3807, 3808, 3808, + 3809, 3809, 3810, 3811, 3811, 3812, 3812, 3813, 3813, 3814, 3814, 3815, 3815, 3816, 3816, 3817, + 3817, 3818, 3819, 3819, 3820, 3820, 3821, 3821, 3822, 3822, 3823, 3823, 3824, 3824, 3825, 3825, + 3826, 3826, 3827, 3828, 3828, 3829, 3829, 3830, 3830, 3831, 3831, 3832, 3832, 3833, 3833, 3834, + 3834, 3835, 3835, 3836, 3837, 3837, 3838, 3838, 3839, 3839, 3840, 3840, 3841, 3841, 3842, 3842, + 3843, 3843, 3844, 3844, 3845, 3846, 3846, 3847, 3847, 3848, 3848, 3849, 3849, 3850, 3850, 3851, + 3851, 3852, 3852, 3853, 3853, 3854, 3855, 3855, 3856, 3856, 3857, 3857, 3858, 3858, 3859, 3859, + 3860, 3860, 3861, 3861, 3862, 3862, 3863, 3863, 3864, 3864, 3865, 3866, 3866, 3867, 3867, 3868, + 3868, 3869, 3869, 3870, 3870, 3871, 3871, 3872, 3872, 3873, 3873, 3874, 3874, 3875, 3876, 3876, + 3877, 3877, 3878, 3878, 3879, 3879, 3880, 3880, 3881, 3881, 3882, 3882, 3883, 3883, 3884, 3884, + 3885, 3885, 3886, 3886, 3887, 3888, 3888, 3889, 3889, 3890, 3890, 3891, 3891, 3892, 3892, 3893, + 3893, 3894, 3894, 3895, 3895, 3896, 3896, 3897, 3897, 3898, 3898, 3899, 3900, 3900, 3901, 3901, + 3902, 3902, 3903, 3903, 3904, 3904, 3905, 3905, 3906, 3906, 3907, 3907, 3908, 3908, 3909, 3909, + 3910, 3910, 3911, 3911, 3912, 3912, 3913, 3914, 3914, 3915, 3915, 3916, 3916, 3917, 3917, 3918, + 3918, 3919, 3919, 3920, 3920, 3921, 3921, 3922, 3922, 3923, 3923, 3924, 3924, 3925, 3925, 3926, + 3926, 3927, 3927, 3928, 3929, 3929, 3930, 3930, 3931, 3931, 3932, 3932, 3933, 3933, 3934, 3934, + 3935, 3935, 3936, 3936, 3937, 3937, 3938, 3938, 3939, 3939, 3940, 3940, 3941, 3941, 3942, 3942, + 3943, 3943, 3944, 3944, 3945, 3945, 3946, 3947, 3947, 3948, 3948, 3949, 3949, 3950, 3950, 3951, + 3951, 3952, 3952, 3953, 3953, 3954, 3954, 3955, 3955, 3956, 3956, 3957, 3957, 3958, 3958, 3959, + 3959, 3960, 3960, 3961, 3961, 3962, 3962, 3963, 3963, 3964, 3964, 3965, 3965, 3966, 3966, 3967, + 3967, 3968, 3969, 3969, 3970, 3970, 3971, 3971, 3972, 3972, 3973, 3973, 3974, 3974, 3975, 3975, + 3976, 3976, 3977, 3977, 3978, 3978, 3979, 3979, 3980, 3980, 3981, 3981, 3982, 3982, 3983, 3983, + 3984, 3984, 3985, 3985, 3986, 3986, 3987, 3987, 3988, 3988, 3989, 3989, 3990, 3990, 3991, 3991, + 3992, 3992, 3993, 3993, 3994, 3994, 3995, 3995, 3996, 3996, 3997, 3997, 3998, 3998, 3999, 3999, + 4000, 4001, 4001, 4002, 4002, 4003, 4003, 4004, 4004, 4005, 4005, 4006, 4006, 4007, 4007, 4008, + 4008, 4009, 4009, 4010, 4010, 4011, 4011, 4012, 4012, 4013, 4013, 4014, 4014, 4015, 4015, 4016, + 4016, 4017, 4017, 4018, 4018, 4019, 4019, 4020, 4020, 4021, 4021, 4022, 4022, 4023, 4023, 4024, + 4024, 4025, 4025, 4026, 4026, 4027, 4027, 4028, 4028, 4029, 4029, 4030, 4030, 4031, 4031, 4032, + 4032, 4033, 4033, 4034, 4034, 4035, 4035, 4036, 4036, 4037, 4037, 4038, 4038, 4039, 4039, 4040, + 4040, 4041, 4041, 4042, 4042, 4043, 4043, 4044, 4044, 4045, 4045, 4046, 4046, 4047, 4047, 4048, + 4048, 4049, 4049, 4050, 4050, 4051, 4051, 4052, 4052, 4053, 4053, 4054, 4054, 4055, 4055, 4056, + 4056, 4057, 4057, 4058, 4058, 4059, 4059, 4060, 4060, 4061, 4061, 4062, 4062, 4063, 4063, 4064, + 4064, 4065, 4065, 4066, 4066, 4067, 4067, 4068, 4068, 4069, 4069, 4070, 4070, 4071, 4071, 4072, + 4072, 4073, 4073, 4074, 4074, 4075, 4075, 4076, 4076, 4077, 4077, 4078, 4078, 4079, 4079, 4080, + 4080, +}; + +/* Generated table */ +const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_SMPTE2084 + 1][TPG_COLOR_CSC_BLACK + 1] = { + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][3] = { 88, 2990, 575 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][4] = { 3016, 259, 2933 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][5] = { 3030, 405, 558 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][6] = { 478, 428, 2931 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][1] = { 3068, 3077, 838 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][2] = { 0, 3081, 3053 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][3] = { 241, 3102, 828 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][4] = { 3126, 504, 3050 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][1] = { 3046, 3054, 886 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][2] = { 0, 3058, 3031 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][3] = { 360, 3079, 877 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][4] = { 3103, 587, 3027 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][5] = { 3116, 723, 861 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][6] = { 789, 744, 3025 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][3] = { 78, 2978, 536 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][4] = { 3004, 230, 2920 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][5] = { 3018, 363, 518 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][6] = { 437, 387, 2918 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][1] = { 2145, 2159, 142 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][2] = { 0, 2164, 2122 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][3] = { 19, 2198, 138 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][4] = { 2236, 57, 2116 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][5] = { 2256, 90, 133 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][6] = { 110, 96, 2113 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][1] = { 3186, 3194, 1121 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][2] = { 0, 3197, 3173 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][3] = { 523, 3216, 1112 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][4] = { 3237, 792, 3169 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][1] = { 1815, 1818, 910 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 1819, 1811 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][3] = { 472, 1825, 904 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][4] = { 1832, 686, 1810 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][5] = { 1835, 794, 893 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][6] = { 843, 809, 1810 }, + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][3] = { 88, 2990, 575 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][4] = { 3016, 259, 2933 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][5] = { 3030, 405, 558 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][6] = { 478, 428, 2931 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][1] = { 3068, 3077, 838 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][2] = { 0, 3081, 3053 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][3] = { 241, 3102, 828 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][4] = { 3126, 504, 3050 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][1] = { 3046, 3054, 886 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][2] = { 0, 3058, 3031 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][3] = { 360, 3079, 877 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][4] = { 3103, 587, 3027 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][5] = { 3116, 723, 861 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][6] = { 789, 744, 3025 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][3] = { 78, 2978, 536 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][4] = { 3004, 230, 2920 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][5] = { 3018, 363, 518 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][6] = { 437, 387, 2918 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][1] = { 2145, 2159, 142 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][2] = { 0, 2164, 2122 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][3] = { 19, 2198, 138 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][4] = { 2236, 57, 2116 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][5] = { 2256, 90, 133 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][6] = { 110, 96, 2113 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][1] = { 3186, 3194, 1121 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][2] = { 0, 3197, 3173 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][3] = { 523, 3216, 1112 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][4] = { 3237, 792, 3169 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][1] = { 1815, 1818, 910 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 1819, 1811 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][3] = { 472, 1825, 904 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][4] = { 1832, 686, 1810 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][5] = { 1835, 794, 893 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][6] = { 843, 809, 1810 }, + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][3] = { 547, 2939, 547 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][4] = { 2939, 547, 2939 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][5] = { 2939, 547, 547 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][6] = { 547, 547, 2939 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 800 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][2] = { 800, 3056, 3056 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][3] = { 800, 3056, 800 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][4] = { 3056, 800, 3056 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][2] = { 851, 3033, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][3] = { 851, 3033, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][4] = { 3033, 851, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][5] = { 3033, 851, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3033 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][3] = { 507, 2926, 507 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][4] = { 2926, 507, 2926 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][5] = { 2926, 507, 507 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2926 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 130 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][2] = { 130, 2125, 2125 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][3] = { 130, 2125, 130 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][4] = { 2125, 130, 2125 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][5] = { 2125, 130, 130 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2125 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1084 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][2] = { 1084, 3175, 3175 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][3] = { 1084, 3175, 1084 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][4] = { 3175, 1084, 3175 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 886 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][2] = { 886, 1812, 1812 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][3] = { 886, 1812, 886 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][4] = { 1812, 886, 1812 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][5] = { 1812, 886, 886 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1812 }, + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][1] = { 2892, 3034, 910 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][2] = { 1715, 2916, 2914 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][3] = { 1631, 3012, 828 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][4] = { 2497, 119, 2867 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][5] = { 2440, 649, 657 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][6] = { 740, 0, 2841 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3055, 3056 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][1] = { 3013, 3142, 1157 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][2] = { 1926, 3034, 3032 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][3] = { 1847, 3121, 1076 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][4] = { 2651, 304, 2990 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][5] = { 2599, 901, 909 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][6] = { 991, 0, 2966 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][1] = { 2989, 3120, 1180 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][2] = { 1913, 3011, 3009 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][3] = { 1836, 3099, 1105 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][4] = { 2627, 413, 2966 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][5] = { 2576, 943, 951 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][6] = { 1026, 0, 2942 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2879, 3022, 874 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][2] = { 1688, 2903, 2901 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][3] = { 1603, 2999, 791 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][4] = { 2479, 106, 2853 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][5] = { 2422, 610, 618 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][6] = { 702, 0, 2827 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][1] = { 2059, 2262, 266 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][2] = { 771, 2092, 2089 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][3] = { 705, 2229, 231 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][4] = { 1550, 26, 2024 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][5] = { 1484, 163, 165 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][6] = { 196, 0, 1988 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][1] = { 3136, 3251, 1429 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][2] = { 2150, 3156, 3154 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][3] = { 2077, 3233, 1352 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][4] = { 2812, 589, 3116 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][5] = { 2765, 1182, 1190 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][6] = { 1270, 0, 3094 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][1] = { 1800, 1836, 1090 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][2] = { 1436, 1806, 1805 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1405, 1830, 1047 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][4] = { 1691, 527, 1793 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][5] = { 1674, 947, 952 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][6] = { 1000, 0, 1786 }, + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 464 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][2] = { 786, 2939, 2939 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][3] = { 786, 2939, 464 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][4] = { 2879, 547, 2956 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][5] = { 2879, 547, 547 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][6] = { 547, 547, 2956 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 717 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][2] = { 1036, 3056, 3056 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][3] = { 1036, 3056, 717 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][4] = { 3001, 800, 3071 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][5] = { 3001, 800, 799 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3071 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 776 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][2] = { 1068, 3033, 3033 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][3] = { 1068, 3033, 776 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][4] = { 2977, 851, 3048 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][5] = { 2977, 851, 851 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3048 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 423 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][2] = { 749, 2926, 2926 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][3] = { 749, 2926, 423 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][4] = { 2865, 507, 2943 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][5] = { 2865, 507, 507 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2943 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 106 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][2] = { 214, 2125, 2125 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][3] = { 214, 2125, 106 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][4] = { 2041, 130, 2149 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][5] = { 2041, 130, 130 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2149 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1003 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][2] = { 1313, 3175, 3175 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][3] = { 1313, 3175, 1003 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][4] = { 3126, 1084, 3188 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][5] = { 3126, 1084, 1084 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3188 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 833 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][2] = { 1025, 1812, 1812 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][3] = { 1025, 1812, 833 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][4] = { 1796, 886, 1816 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][5] = { 1796, 886, 886 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1816 }, + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][3] = { 547, 2939, 547 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][4] = { 2939, 547, 2939 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][5] = { 2939, 547, 547 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2939 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 800 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][2] = { 800, 3056, 3056 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][3] = { 800, 3056, 800 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][4] = { 3056, 800, 3056 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][2] = { 851, 3033, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][3] = { 851, 3033, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][4] = { 3033, 851, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][5] = { 3033, 851, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3033 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 507, 2926, 507 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2926, 507, 2926 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2926, 507, 507 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2926 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 130 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][2] = { 130, 2125, 2125 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][3] = { 130, 2125, 130 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][4] = { 2125, 130, 2125 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][5] = { 2125, 130, 130 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2125 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1084 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][2] = { 1084, 3175, 3175 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][3] = { 1084, 3175, 1084 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][4] = { 3175, 1084, 3175 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 886 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 886, 1812, 1812 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 886, 1812, 886 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1812, 886, 1812 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1812, 886, 886 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1812 }, + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][3] = { 1622, 2939, 781 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][4] = { 2502, 547, 2881 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][5] = { 2502, 547, 547 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2881 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 1031 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][2] = { 1838, 3056, 3056 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][3] = { 1838, 3056, 1031 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][4] = { 2657, 800, 3002 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][5] = { 2657, 800, 800 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3002 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 1063 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][2] = { 1828, 3033, 3033 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][3] = { 1828, 3033, 1063 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][4] = { 2633, 851, 2979 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][5] = { 2633, 851, 851 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 2979 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 744 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 1594, 2926, 2926 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 1594, 2926, 744 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2484, 507, 2867 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2484, 507, 507 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2867 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 212 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][2] = { 698, 2125, 2125 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][3] = { 698, 2125, 212 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][4] = { 1557, 130, 2043 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 1022 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 1402, 1812, 1812 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 1402, 1812, 1022 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1692, 886, 1797 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1692, 886, 886 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1797 }, + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][1] = { 2877, 2923, 1058 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][2] = { 1837, 2840, 2916 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][3] = { 1734, 2823, 993 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][4] = { 2427, 961, 2812 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][5] = { 2351, 912, 648 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][6] = { 792, 618, 2788 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][1] = { 2999, 3041, 1301 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][2] = { 2040, 2965, 3034 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][3] = { 1944, 2950, 1238 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][4] = { 2587, 1207, 2940 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][5] = { 2517, 1159, 900 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][6] = { 1042, 870, 2917 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][1] = { 2976, 3018, 1315 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][2] = { 2024, 2942, 3011 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][3] = { 1930, 2926, 1256 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][4] = { 2563, 1227, 2916 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][5] = { 2494, 1183, 943 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][6] = { 1073, 916, 2894 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][1] = { 2864, 2910, 1024 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][2] = { 1811, 2826, 2903 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][3] = { 1707, 2809, 958 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][4] = { 2408, 926, 2798 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][5] = { 2331, 876, 609 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][6] = { 755, 579, 2773 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][1] = { 2039, 2102, 338 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][2] = { 873, 1987, 2092 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][3] = { 787, 1965, 305 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][4] = { 1468, 290, 1949 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][5] = { 1382, 268, 162 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][6] = { 216, 152, 1917 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][1] = { 3124, 3161, 1566 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][2] = { 2255, 3094, 3156 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][3] = { 2166, 3080, 1506 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][4] = { 2754, 1477, 3071 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][5] = { 2690, 1431, 1182 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][6] = { 1318, 1153, 3051 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][1] = { 1796, 1808, 1163 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][2] = { 1480, 1786, 1806 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][3] = { 1443, 1781, 1131 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][4] = { 1670, 1116, 1778 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][5] = { 1648, 1091, 947 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][6] = { 1028, 929, 1772 }, + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][1] = { 2936, 2934, 992 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][2] = { 1159, 2890, 2916 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][3] = { 1150, 2885, 921 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][4] = { 2751, 766, 2837 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][5] = { 2747, 747, 650 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][6] = { 563, 570, 2812 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3055 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][1] = { 3052, 3051, 1237 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][2] = { 1397, 3011, 3034 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][3] = { 1389, 3006, 1168 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][4] = { 2884, 1016, 2962 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][5] = { 2880, 998, 902 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][6] = { 816, 823, 2940 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][1] = { 3029, 3028, 1255 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][2] = { 1406, 2988, 3011 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][3] = { 1398, 2983, 1190 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][4] = { 2860, 1050, 2939 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][5] = { 2857, 1033, 945 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][6] = { 866, 873, 2916 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][1] = { 2923, 2921, 957 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][2] = { 1125, 2877, 2902 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][3] = { 1116, 2871, 885 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][4] = { 2736, 729, 2823 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][5] = { 2732, 710, 611 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][6] = { 523, 531, 2798 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][1] = { 2120, 2118, 305 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][2] = { 392, 2056, 2092 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][3] = { 387, 2049, 271 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][4] = { 1868, 206, 1983 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][5] = { 1863, 199, 163 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][6] = { 135, 137, 1950 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][1] = { 3172, 3170, 1505 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][2] = { 1657, 3135, 3155 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][3] = { 1649, 3130, 1439 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][4] = { 3021, 1294, 3091 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][5] = { 3018, 1276, 1184 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][6] = { 1100, 1107, 3071 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][1] = { 1811, 1810, 1131 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][2] = { 1210, 1799, 1806 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][3] = { 1206, 1798, 1096 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][4] = { 1762, 1014, 1785 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][5] = { 1761, 1004, 948 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][6] = { 896, 901, 1778 }, + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 }, +}; + +#else + +/* This code generates the table above */ + +#include +#include +#include + +static const double rec709_to_ntsc1953[3][3] = { + /* + * This transform uses the Bradford method to compensate for + * the different whitepoints. + */ + { 0.6785011, 0.2883441, 0.0331548 }, + { 0.0165284, 1.0518725, -0.0684009 }, + { 0.0179230, 0.0506096, 0.9314674 } +}; + +static const double rec709_to_ebu[3][3] = { + { 0.9578221, 0.0421779, -0.0000000 }, + { -0.0000000, 1.0000000, 0.0000000 }, + { -0.0000000, -0.0119367, 1.0119367 } +}; + +static const double rec709_to_170m[3][3] = { + { 1.0653640, -0.0553900, -0.0099740 }, + { -0.0196361, 1.0363630, -0.0167269 }, + { 0.0016327, 0.0044133, 0.9939540 }, +}; + +static const double rec709_to_240m[3][3] = { + { 1.0653640, -0.0553900, -0.0099740 }, + { -0.0196361, 1.0363630, -0.0167269 }, + { 0.0016327, 0.0044133, 0.9939540 }, +}; + +static const double rec709_to_oprgb[3][3] = { + { 0.7151627, 0.2848373, -0.0000000 }, + { 0.0000000, 1.0000000, 0.0000000 }, + { -0.0000000, 0.0411705, 0.9588295 }, +}; + +static const double rec709_to_bt2020[3][3] = { + { 0.6274524, 0.3292485, 0.0432991 }, + { 0.0691092, 0.9195311, 0.0113597 }, + { 0.0163976, 0.0880301, 0.8955723 }, +}; + +static const double rec709_to_dcip3[3][3] = { + /* + * This transform uses the Bradford method to compensate for + * the different whitepoints. + */ + { 0.8686648, 0.1288456, 0.0024896 }, + { 0.0345479, 0.9618084, 0.0036437 }, + { 0.0167785, 0.0710559, 0.9121655 } +}; + +static void mult_matrix(double *r, double *g, double *b, const double m[3][3]) +{ + double ir, ig, ib; + + ir = m[0][0] * (*r) + m[0][1] * (*g) + m[0][2] * (*b); + ig = m[1][0] * (*r) + m[1][1] * (*g) + m[1][2] * (*b); + ib = m[2][0] * (*r) + m[2][1] * (*g) + m[2][2] * (*b); + *r = ir; + *g = ig; + *b = ib; +} + +static double transfer_srgb_to_rgb(double v) +{ + if (v < -0.04045) + return pow((-v + 0.055) / 1.055, 2.4); + return (v <= 0.04045) ? v / 12.92 : pow((v + 0.055) / 1.055, 2.4); +} + +static double transfer_rgb_to_srgb(double v) +{ + if (v <= -0.0031308) + return -1.055 * pow(-v, 1.0 / 2.4) + 0.055; + if (v <= 0.0031308) + return v * 12.92; + return 1.055 * pow(v, 1.0 / 2.4) - 0.055; +} + +static double transfer_rgb_to_smpte240m(double v) +{ + return (v <= 0.0228) ? v * 4.0 : 1.1115 * pow(v, 0.45) - 0.1115; +} + +static double transfer_rgb_to_rec709(double v) +{ + if (v <= -0.018) + return -1.099 * pow(-v, 0.45) + 0.099; + return (v < 0.018) ? v * 4.5 : 1.099 * pow(v, 0.45) - 0.099; +} + +static double transfer_rec709_to_rgb(double v) +{ + return (v < 0.081) ? v / 4.5 : pow((v + 0.099) / 1.099, 1.0 / 0.45); +} + +static double transfer_rgb_to_oprgb(double v) +{ + return pow(v, 1.0 / 2.19921875); +} + +static double transfer_rgb_to_dcip3(double v) +{ + return pow(v, 1.0 / 2.6); +} + +static double transfer_rgb_to_smpte2084(double v) +{ + const double m1 = (2610.0 / 4096.0) / 4.0; + const double m2 = 128.0 * 2523.0 / 4096.0; + const double c1 = 3424.0 / 4096.0; + const double c2 = 32.0 * 2413.0 / 4096.0; + const double c3 = 32.0 * 2392.0 / 4096.0; + + /* + * The RGB input maps to the luminance range 0-100 cd/m^2, while + * SMPTE-2084 maps values to the luminance range of 0-10000 cd/m^2. + * Hence the factor 100. + */ + v /= 100.0; + v = pow(v, m1); + return pow((c1 + c2 * v) / (1 + c3 * v), m2); +} + +static double transfer_srgb_to_rec709(double v) +{ + return transfer_rgb_to_rec709(transfer_srgb_to_rgb(v)); +} + +static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func, + double *r, double *g, double *b) +{ + int clamp = 1; + + *r = transfer_srgb_to_rgb(*r); + *g = transfer_srgb_to_rgb(*g); + *b = transfer_srgb_to_rgb(*b); + + /* Convert the primaries of Rec. 709 Linear RGB */ + switch (colorspace) { + case V4L2_COLORSPACE_SMPTE240M: + mult_matrix(r, g, b, rec709_to_240m); + break; + case V4L2_COLORSPACE_SMPTE170M: + mult_matrix(r, g, b, rec709_to_170m); + break; + case V4L2_COLORSPACE_470_SYSTEM_BG: + mult_matrix(r, g, b, rec709_to_ebu); + break; + case V4L2_COLORSPACE_470_SYSTEM_M: + mult_matrix(r, g, b, rec709_to_ntsc1953); + break; + case V4L2_COLORSPACE_OPRGB: + mult_matrix(r, g, b, rec709_to_oprgb); + break; + case V4L2_COLORSPACE_BT2020: + mult_matrix(r, g, b, rec709_to_bt2020); + break; + case V4L2_COLORSPACE_DCI_P3: + mult_matrix(r, g, b, rec709_to_dcip3); + break; + case V4L2_COLORSPACE_SRGB: + case V4L2_COLORSPACE_REC709: + break; + default: + break; + } + + if (clamp) { + *r = ((*r) < 0) ? 0 : (((*r) > 1) ? 1 : (*r)); + *g = ((*g) < 0) ? 0 : (((*g) > 1) ? 1 : (*g)); + *b = ((*b) < 0) ? 0 : (((*b) > 1) ? 1 : (*b)); + } + + switch (xfer_func) { + case V4L2_XFER_FUNC_709: + *r = transfer_rgb_to_rec709(*r); + *g = transfer_rgb_to_rec709(*g); + *b = transfer_rgb_to_rec709(*b); + break; + case V4L2_XFER_FUNC_SRGB: + *r = transfer_rgb_to_srgb(*r); + *g = transfer_rgb_to_srgb(*g); + *b = transfer_rgb_to_srgb(*b); + break; + case V4L2_XFER_FUNC_OPRGB: + *r = transfer_rgb_to_oprgb(*r); + *g = transfer_rgb_to_oprgb(*g); + *b = transfer_rgb_to_oprgb(*b); + break; + case V4L2_XFER_FUNC_DCI_P3: + *r = transfer_rgb_to_dcip3(*r); + *g = transfer_rgb_to_dcip3(*g); + *b = transfer_rgb_to_dcip3(*b); + break; + case V4L2_XFER_FUNC_SMPTE2084: + *r = transfer_rgb_to_smpte2084(*r); + *g = transfer_rgb_to_smpte2084(*g); + *b = transfer_rgb_to_smpte2084(*b); + break; + case V4L2_XFER_FUNC_SMPTE240M: + *r = transfer_rgb_to_smpte240m(*r); + *g = transfer_rgb_to_smpte240m(*g); + *b = transfer_rgb_to_smpte240m(*b); + break; + case V4L2_XFER_FUNC_NONE: + break; + } +} + +int main(int argc, char **argv) +{ + static const unsigned colorspaces[] = { + 0, + V4L2_COLORSPACE_SMPTE170M, + V4L2_COLORSPACE_SMPTE240M, + V4L2_COLORSPACE_REC709, + 0, + V4L2_COLORSPACE_470_SYSTEM_M, + V4L2_COLORSPACE_470_SYSTEM_BG, + 0, + V4L2_COLORSPACE_SRGB, + V4L2_COLORSPACE_OPRGB, + V4L2_COLORSPACE_BT2020, + 0, + V4L2_COLORSPACE_DCI_P3, + }; + static const char * const colorspace_names[] = { + "", + "V4L2_COLORSPACE_SMPTE170M", + "V4L2_COLORSPACE_SMPTE240M", + "V4L2_COLORSPACE_REC709", + "", + "V4L2_COLORSPACE_470_SYSTEM_M", + "V4L2_COLORSPACE_470_SYSTEM_BG", + "", + "V4L2_COLORSPACE_SRGB", + "V4L2_COLORSPACE_OPRGB", + "V4L2_COLORSPACE_BT2020", + "", + "V4L2_COLORSPACE_DCI_P3", + }; + static const char * const xfer_func_names[] = { + "", + "V4L2_XFER_FUNC_709", + "V4L2_XFER_FUNC_SRGB", + "V4L2_XFER_FUNC_OPRGB", + "V4L2_XFER_FUNC_SMPTE240M", + "V4L2_XFER_FUNC_NONE", + "V4L2_XFER_FUNC_DCI_P3", + "V4L2_XFER_FUNC_SMPTE2084", + }; + int i; + int x; + int c; + + printf("/* Generated table */\n"); + printf("const unsigned short tpg_rec709_to_linear[255 * 16 + 1] = {"); + for (i = 0; i <= 255 * 16; i++) { + if (i % 16 == 0) + printf("\n\t"); + printf("%4d,%s", + (int)(0.5 + 16.0 * 255.0 * + transfer_rec709_to_rgb(i / (16.0 * 255.0))), + i % 16 == 15 || i == 255 * 16 ? "" : " "); + } + printf("\n};\n\n"); + + printf("/* Generated table */\n"); + printf("const unsigned short tpg_linear_to_rec709[255 * 16 + 1] = {"); + for (i = 0; i <= 255 * 16; i++) { + if (i % 16 == 0) + printf("\n\t"); + printf("%4d,%s", + (int)(0.5 + 16.0 * 255.0 * + transfer_rgb_to_rec709(i / (16.0 * 255.0))), + i % 16 == 15 || i == 255 * 16 ? "" : " "); + } + printf("\n};\n\n"); + + printf("/* Generated table */\n"); + printf("const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_SMPTE2084 + 1][TPG_COLOR_CSC_BLACK + 1] = {\n"); + for (c = 0; c <= V4L2_COLORSPACE_DCI_P3; c++) { + for (x = 1; x <= V4L2_XFER_FUNC_SMPTE2084; x++) { + for (i = 0; i <= TPG_COLOR_CSC_BLACK; i++) { + double r, g, b; + + if (colorspaces[c] == 0) + continue; + + r = tpg_colors[i].r / 255.0; + g = tpg_colors[i].g / 255.0; + b = tpg_colors[i].b / 255.0; + + csc(c, x, &r, &g, &b); + + printf("\t[%s][%s][%d] = { %d, %d, %d },\n", + colorspace_names[c], + xfer_func_names[x], i, + (int)(r * 4080), (int)(g * 4080), (int)(b * 4080)); + } + } + } + printf("};\n\n"); + return 0; +} + +#endif diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c new file mode 100644 index 0000000000..a366566f22 --- /dev/null +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -0,0 +1,2697 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * v4l2-tpg-core.c - Test Pattern Generator + * + * Note: gen_twopix and tpg_gen_text are based on code from vivi.c. See the + * vivi.c source for the copyright information of those functions. + * + * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. + */ + +#include +#include + +/* Must remain in sync with enum tpg_pattern */ +const char * const tpg_pattern_strings[] = { + "75% Colorbar", + "100% Colorbar", + "CSC Colorbar", + "Horizontal 100% Colorbar", + "100% Color Squares", + "100% Black", + "100% White", + "100% Red", + "100% Green", + "100% Blue", + "16x16 Checkers", + "2x2 Checkers", + "1x1 Checkers", + "2x2 Red/Green Checkers", + "1x1 Red/Green Checkers", + "Alternating Hor Lines", + "Alternating Vert Lines", + "One Pixel Wide Cross", + "Two Pixels Wide Cross", + "Ten Pixels Wide Cross", + "Gray Ramp", + "Noise", + NULL +}; +EXPORT_SYMBOL_GPL(tpg_pattern_strings); + +/* Must remain in sync with enum tpg_aspect */ +const char * const tpg_aspect_strings[] = { + "Source Width x Height", + "4x3", + "14x9", + "16x9", + "16x9 Anamorphic", + NULL +}; +EXPORT_SYMBOL_GPL(tpg_aspect_strings); + +/* + * Sine table: sin[0] = 127 * sin(-180 degrees) + * sin[128] = 127 * sin(0 degrees) + * sin[256] = 127 * sin(180 degrees) + */ +static const s8 sin[257] = { + 0, -4, -7, -11, -13, -18, -20, -22, -26, -29, -33, -35, -37, -41, -43, -48, + -50, -52, -56, -58, -62, -63, -65, -69, -71, -75, -76, -78, -82, -83, -87, -88, + -90, -93, -94, -97, -99, -101, -103, -104, -107, -108, -110, -111, -112, -114, -115, -117, + -118, -119, -120, -121, -122, -123, -123, -124, -125, -125, -126, -126, -127, -127, -127, -127, + -127, -127, -127, -127, -126, -126, -125, -125, -124, -124, -123, -122, -121, -120, -119, -118, + -117, -116, -114, -113, -111, -110, -109, -107, -105, -103, -101, -100, -97, -96, -93, -91, + -90, -87, -85, -82, -80, -76, -75, -73, -69, -67, -63, -62, -60, -56, -54, -50, + -48, -46, -41, -39, -35, -33, -31, -26, -24, -20, -18, -15, -11, -9, -4, -2, + 0, 2, 4, 9, 11, 15, 18, 20, 24, 26, 31, 33, 35, 39, 41, 46, + 48, 50, 54, 56, 60, 62, 64, 67, 69, 73, 75, 76, 80, 82, 85, 87, + 90, 91, 93, 96, 97, 100, 101, 103, 105, 107, 109, 110, 111, 113, 114, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 124, 125, 125, 126, 126, 127, 127, 127, + 127, 127, 127, 127, 127, 126, 126, 125, 125, 124, 123, 123, 122, 121, 120, 119, + 118, 117, 115, 114, 112, 111, 110, 108, 107, 104, 103, 101, 99, 97, 94, 93, + 90, 88, 87, 83, 82, 78, 76, 75, 71, 69, 65, 64, 62, 58, 56, 52, + 50, 48, 43, 41, 37, 35, 33, 29, 26, 22, 20, 18, 13, 11, 7, 4, + 0, +}; + +#define cos(idx) sin[((idx) + 64) % sizeof(sin)] + +/* Global font descriptor */ +static const u8 *font8x16; + +void tpg_set_font(const u8 *f) +{ + font8x16 = f; +} +EXPORT_SYMBOL_GPL(tpg_set_font); + +void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h) +{ + memset(tpg, 0, sizeof(*tpg)); + tpg->scaled_width = tpg->src_width = w; + tpg->src_height = tpg->buf_height = h; + tpg->crop.width = tpg->compose.width = w; + tpg->crop.height = tpg->compose.height = h; + tpg->recalc_colors = true; + tpg->recalc_square_border = true; + tpg->brightness = 128; + tpg->contrast = 128; + tpg->saturation = 128; + tpg->hue = 0; + tpg->mv_hor_mode = TPG_MOVE_NONE; + tpg->mv_vert_mode = TPG_MOVE_NONE; + tpg->field = V4L2_FIELD_NONE; + tpg_s_fourcc(tpg, V4L2_PIX_FMT_RGB24); + tpg->colorspace = V4L2_COLORSPACE_SRGB; + tpg->perc_fill = 100; + tpg->hsv_enc = V4L2_HSV_ENC_180; +} +EXPORT_SYMBOL_GPL(tpg_init); + +int tpg_alloc(struct tpg_data *tpg, unsigned max_w) +{ + unsigned pat; + unsigned plane; + + tpg->max_line_width = max_w; + for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) { + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + unsigned pixelsz = plane ? 2 : 4; + + tpg->lines[pat][plane] = + vzalloc(array3_size(max_w, 2, pixelsz)); + if (!tpg->lines[pat][plane]) + return -ENOMEM; + if (plane == 0) + continue; + tpg->downsampled_lines[pat][plane] = + vzalloc(array3_size(max_w, 2, pixelsz)); + if (!tpg->downsampled_lines[pat][plane]) + return -ENOMEM; + } + } + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + unsigned pixelsz = plane ? 2 : 4; + + tpg->contrast_line[plane] = + vzalloc(array_size(pixelsz, max_w)); + if (!tpg->contrast_line[plane]) + return -ENOMEM; + tpg->black_line[plane] = + vzalloc(array_size(pixelsz, max_w)); + if (!tpg->black_line[plane]) + return -ENOMEM; + tpg->random_line[plane] = + vzalloc(array3_size(max_w, 2, pixelsz)); + if (!tpg->random_line[plane]) + return -ENOMEM; + } + return 0; +} +EXPORT_SYMBOL_GPL(tpg_alloc); + +void tpg_free(struct tpg_data *tpg) +{ + unsigned pat; + unsigned plane; + + for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + vfree(tpg->lines[pat][plane]); + tpg->lines[pat][plane] = NULL; + if (plane == 0) + continue; + vfree(tpg->downsampled_lines[pat][plane]); + tpg->downsampled_lines[pat][plane] = NULL; + } + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + vfree(tpg->contrast_line[plane]); + vfree(tpg->black_line[plane]); + vfree(tpg->random_line[plane]); + tpg->contrast_line[plane] = NULL; + tpg->black_line[plane] = NULL; + tpg->random_line[plane] = NULL; + } +} +EXPORT_SYMBOL_GPL(tpg_free); + +bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) +{ + tpg->fourcc = fourcc; + tpg->planes = 1; + tpg->buffers = 1; + tpg->recalc_colors = true; + tpg->interleaved = false; + tpg->vdownsampling[0] = 1; + tpg->hdownsampling[0] = 1; + tpg->hmask[0] = ~0; + tpg->hmask[1] = ~0; + tpg->hmask[2] = ~0; + + switch (fourcc) { + case V4L2_PIX_FMT_SBGGR8: + case V4L2_PIX_FMT_SGBRG8: + case V4L2_PIX_FMT_SGRBG8: + case V4L2_PIX_FMT_SRGGB8: + case V4L2_PIX_FMT_SBGGR10: + case V4L2_PIX_FMT_SGBRG10: + case V4L2_PIX_FMT_SGRBG10: + case V4L2_PIX_FMT_SRGGB10: + case V4L2_PIX_FMT_SBGGR12: + case V4L2_PIX_FMT_SGBRG12: + case V4L2_PIX_FMT_SGRBG12: + case V4L2_PIX_FMT_SRGGB12: + case V4L2_PIX_FMT_SBGGR16: + case V4L2_PIX_FMT_SGBRG16: + case V4L2_PIX_FMT_SGRBG16: + case V4L2_PIX_FMT_SRGGB16: + tpg->interleaved = true; + tpg->vdownsampling[1] = 1; + tpg->hdownsampling[1] = 1; + tpg->planes = 2; + fallthrough; + case V4L2_PIX_FMT_RGB332: + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + case V4L2_PIX_FMT_RGB444: + case V4L2_PIX_FMT_XRGB444: + case V4L2_PIX_FMT_ARGB444: + case V4L2_PIX_FMT_RGBX444: + case V4L2_PIX_FMT_RGBA444: + case V4L2_PIX_FMT_XBGR444: + case V4L2_PIX_FMT_ABGR444: + case V4L2_PIX_FMT_BGRX444: + case V4L2_PIX_FMT_BGRA444: + case V4L2_PIX_FMT_RGB555: + case V4L2_PIX_FMT_XRGB555: + case V4L2_PIX_FMT_ARGB555: + case V4L2_PIX_FMT_RGBX555: + case V4L2_PIX_FMT_RGBA555: + case V4L2_PIX_FMT_XBGR555: + case V4L2_PIX_FMT_ABGR555: + case V4L2_PIX_FMT_BGRX555: + case V4L2_PIX_FMT_BGRA555: + case V4L2_PIX_FMT_RGB555X: + case V4L2_PIX_FMT_XRGB555X: + case V4L2_PIX_FMT_ARGB555X: + case V4L2_PIX_FMT_BGR666: + case V4L2_PIX_FMT_RGB24: + case V4L2_PIX_FMT_BGR24: + case V4L2_PIX_FMT_RGB32: + case V4L2_PIX_FMT_BGR32: + case V4L2_PIX_FMT_XRGB32: + case V4L2_PIX_FMT_XBGR32: + case V4L2_PIX_FMT_ARGB32: + case V4L2_PIX_FMT_ABGR32: + case V4L2_PIX_FMT_RGBX32: + case V4L2_PIX_FMT_BGRX32: + case V4L2_PIX_FMT_RGBA32: + case V4L2_PIX_FMT_BGRA32: + tpg->color_enc = TGP_COLOR_ENC_RGB; + break; + case V4L2_PIX_FMT_GREY: + case V4L2_PIX_FMT_Y10: + case V4L2_PIX_FMT_Y12: + case V4L2_PIX_FMT_Y16: + case V4L2_PIX_FMT_Y16_BE: + case V4L2_PIX_FMT_Z16: + tpg->color_enc = TGP_COLOR_ENC_LUMA; + break; + case V4L2_PIX_FMT_YUV444: + case V4L2_PIX_FMT_YUV555: + case V4L2_PIX_FMT_YUV565: + case V4L2_PIX_FMT_YUV32: + case V4L2_PIX_FMT_AYUV32: + case V4L2_PIX_FMT_XYUV32: + case V4L2_PIX_FMT_VUYA32: + case V4L2_PIX_FMT_VUYX32: + case V4L2_PIX_FMT_YUVA32: + case V4L2_PIX_FMT_YUVX32: + tpg->color_enc = TGP_COLOR_ENC_YCBCR; + break; + case V4L2_PIX_FMT_YUV420M: + case V4L2_PIX_FMT_YVU420M: + tpg->buffers = 3; + fallthrough; + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: + tpg->vdownsampling[1] = 2; + tpg->vdownsampling[2] = 2; + tpg->hdownsampling[1] = 2; + tpg->hdownsampling[2] = 2; + tpg->planes = 3; + tpg->color_enc = TGP_COLOR_ENC_YCBCR; + break; + case V4L2_PIX_FMT_YUV422M: + case V4L2_PIX_FMT_YVU422M: + tpg->buffers = 3; + fallthrough; + case V4L2_PIX_FMT_YUV422P: + tpg->vdownsampling[1] = 1; + tpg->vdownsampling[2] = 1; + tpg->hdownsampling[1] = 2; + tpg->hdownsampling[2] = 2; + tpg->planes = 3; + tpg->color_enc = TGP_COLOR_ENC_YCBCR; + break; + case V4L2_PIX_FMT_NV16M: + case V4L2_PIX_FMT_NV61M: + tpg->buffers = 2; + fallthrough; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + tpg->vdownsampling[1] = 1; + tpg->hdownsampling[1] = 1; + tpg->hmask[1] = ~1; + tpg->planes = 2; + tpg->color_enc = TGP_COLOR_ENC_YCBCR; + break; + case V4L2_PIX_FMT_NV12M: + case V4L2_PIX_FMT_NV21M: + tpg->buffers = 2; + fallthrough; + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + tpg->vdownsampling[1] = 2; + tpg->hdownsampling[1] = 1; + tpg->hmask[1] = ~1; + tpg->planes = 2; + tpg->color_enc = TGP_COLOR_ENC_YCBCR; + break; + case V4L2_PIX_FMT_YUV444M: + case V4L2_PIX_FMT_YVU444M: + tpg->buffers = 3; + tpg->planes = 3; + tpg->vdownsampling[1] = 1; + tpg->vdownsampling[2] = 1; + tpg->hdownsampling[1] = 1; + tpg->hdownsampling[2] = 1; + tpg->color_enc = TGP_COLOR_ENC_YCBCR; + break; + case V4L2_PIX_FMT_NV24: + case V4L2_PIX_FMT_NV42: + tpg->vdownsampling[1] = 1; + tpg->hdownsampling[1] = 1; + tpg->planes = 2; + tpg->color_enc = TGP_COLOR_ENC_YCBCR; + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_VYUY: + tpg->hmask[0] = ~1; + tpg->color_enc = TGP_COLOR_ENC_YCBCR; + break; + case V4L2_PIX_FMT_HSV24: + case V4L2_PIX_FMT_HSV32: + tpg->color_enc = TGP_COLOR_ENC_HSV; + break; + default: + return false; + } + + switch (fourcc) { + case V4L2_PIX_FMT_GREY: + case V4L2_PIX_FMT_RGB332: + tpg->twopixelsize[0] = 2; + break; + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + case V4L2_PIX_FMT_RGB444: + case V4L2_PIX_FMT_XRGB444: + case V4L2_PIX_FMT_ARGB444: + case V4L2_PIX_FMT_RGBX444: + case V4L2_PIX_FMT_RGBA444: + case V4L2_PIX_FMT_XBGR444: + case V4L2_PIX_FMT_ABGR444: + case V4L2_PIX_FMT_BGRX444: + case V4L2_PIX_FMT_BGRA444: + case V4L2_PIX_FMT_RGB555: + case V4L2_PIX_FMT_XRGB555: + case V4L2_PIX_FMT_ARGB555: + case V4L2_PIX_FMT_RGBX555: + case V4L2_PIX_FMT_RGBA555: + case V4L2_PIX_FMT_XBGR555: + case V4L2_PIX_FMT_ABGR555: + case V4L2_PIX_FMT_BGRX555: + case V4L2_PIX_FMT_BGRA555: + case V4L2_PIX_FMT_RGB555X: + case V4L2_PIX_FMT_XRGB555X: + case V4L2_PIX_FMT_ARGB555X: + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_VYUY: + case V4L2_PIX_FMT_YUV444: + case V4L2_PIX_FMT_YUV555: + case V4L2_PIX_FMT_YUV565: + case V4L2_PIX_FMT_Y10: + case V4L2_PIX_FMT_Y12: + case V4L2_PIX_FMT_Y16: + case V4L2_PIX_FMT_Y16_BE: + case V4L2_PIX_FMT_Z16: + tpg->twopixelsize[0] = 2 * 2; + break; + case V4L2_PIX_FMT_RGB24: + case V4L2_PIX_FMT_BGR24: + case V4L2_PIX_FMT_HSV24: + tpg->twopixelsize[0] = 2 * 3; + break; + case V4L2_PIX_FMT_BGR666: + case V4L2_PIX_FMT_RGB32: + case V4L2_PIX_FMT_BGR32: + case V4L2_PIX_FMT_XRGB32: + case V4L2_PIX_FMT_XBGR32: + case V4L2_PIX_FMT_ARGB32: + case V4L2_PIX_FMT_ABGR32: + case V4L2_PIX_FMT_RGBX32: + case V4L2_PIX_FMT_BGRX32: + case V4L2_PIX_FMT_RGBA32: + case V4L2_PIX_FMT_BGRA32: + case V4L2_PIX_FMT_YUV32: + case V4L2_PIX_FMT_AYUV32: + case V4L2_PIX_FMT_XYUV32: + case V4L2_PIX_FMT_VUYA32: + case V4L2_PIX_FMT_VUYX32: + case V4L2_PIX_FMT_YUVA32: + case V4L2_PIX_FMT_YUVX32: + case V4L2_PIX_FMT_HSV32: + tpg->twopixelsize[0] = 2 * 4; + break; + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV12M: + case V4L2_PIX_FMT_NV21M: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + case V4L2_PIX_FMT_NV16M: + case V4L2_PIX_FMT_NV61M: + case V4L2_PIX_FMT_SBGGR8: + case V4L2_PIX_FMT_SGBRG8: + case V4L2_PIX_FMT_SGRBG8: + case V4L2_PIX_FMT_SRGGB8: + tpg->twopixelsize[0] = 2; + tpg->twopixelsize[1] = 2; + break; + case V4L2_PIX_FMT_SRGGB10: + case V4L2_PIX_FMT_SGRBG10: + case V4L2_PIX_FMT_SGBRG10: + case V4L2_PIX_FMT_SBGGR10: + case V4L2_PIX_FMT_SRGGB12: + case V4L2_PIX_FMT_SGRBG12: + case V4L2_PIX_FMT_SGBRG12: + case V4L2_PIX_FMT_SBGGR12: + case V4L2_PIX_FMT_SRGGB16: + case V4L2_PIX_FMT_SGRBG16: + case V4L2_PIX_FMT_SGBRG16: + case V4L2_PIX_FMT_SBGGR16: + tpg->twopixelsize[0] = 4; + tpg->twopixelsize[1] = 4; + break; + case V4L2_PIX_FMT_YUV444M: + case V4L2_PIX_FMT_YVU444M: + case V4L2_PIX_FMT_YUV422M: + case V4L2_PIX_FMT_YVU422M: + case V4L2_PIX_FMT_YUV422P: + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YVU420: + case V4L2_PIX_FMT_YUV420M: + case V4L2_PIX_FMT_YVU420M: + tpg->twopixelsize[0] = 2; + tpg->twopixelsize[1] = 2; + tpg->twopixelsize[2] = 2; + break; + case V4L2_PIX_FMT_NV24: + case V4L2_PIX_FMT_NV42: + tpg->twopixelsize[0] = 2; + tpg->twopixelsize[1] = 4; + break; + } + return true; +} +EXPORT_SYMBOL_GPL(tpg_s_fourcc); + +void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop, + const struct v4l2_rect *compose) +{ + tpg->crop = *crop; + tpg->compose = *compose; + tpg->scaled_width = (tpg->src_width * tpg->compose.width + + tpg->crop.width - 1) / tpg->crop.width; + tpg->scaled_width &= ~1; + if (tpg->scaled_width > tpg->max_line_width) + tpg->scaled_width = tpg->max_line_width; + if (tpg->scaled_width < 2) + tpg->scaled_width = 2; + tpg->recalc_lines = true; +} +EXPORT_SYMBOL_GPL(tpg_s_crop_compose); + +void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height, + u32 field) +{ + unsigned p; + + tpg->src_width = width; + tpg->src_height = height; + tpg->field = field; + tpg->buf_height = height; + if (V4L2_FIELD_HAS_T_OR_B(field)) + tpg->buf_height /= 2; + tpg->scaled_width = width; + tpg->crop.top = tpg->crop.left = 0; + tpg->crop.width = width; + tpg->crop.height = height; + tpg->compose.top = tpg->compose.left = 0; + tpg->compose.width = width; + tpg->compose.height = tpg->buf_height; + for (p = 0; p < tpg->planes; p++) + tpg->bytesperline[p] = (width * tpg->twopixelsize[p]) / + (2 * tpg->hdownsampling[p]); + tpg->recalc_square_border = true; +} +EXPORT_SYMBOL_GPL(tpg_reset_source); + +static enum tpg_color tpg_get_textbg_color(struct tpg_data *tpg) +{ + switch (tpg->pattern) { + case TPG_PAT_BLACK: + return TPG_COLOR_100_WHITE; + case TPG_PAT_CSC_COLORBAR: + return TPG_COLOR_CSC_BLACK; + default: + return TPG_COLOR_100_BLACK; + } +} + +static enum tpg_color tpg_get_textfg_color(struct tpg_data *tpg) +{ + switch (tpg->pattern) { + case TPG_PAT_75_COLORBAR: + case TPG_PAT_CSC_COLORBAR: + return TPG_COLOR_CSC_WHITE; + case TPG_PAT_BLACK: + return TPG_COLOR_100_BLACK; + default: + return TPG_COLOR_100_WHITE; + } +} + +static inline int rec709_to_linear(int v) +{ + v = clamp(v, 0, 0xff0); + return tpg_rec709_to_linear[v]; +} + +static inline int linear_to_rec709(int v) +{ + v = clamp(v, 0, 0xff0); + return tpg_linear_to_rec709[v]; +} + +static void color_to_hsv(struct tpg_data *tpg, int r, int g, int b, + int *h, int *s, int *v) +{ + int max_rgb, min_rgb, diff_rgb; + int aux; + int third; + int third_size; + + r >>= 4; + g >>= 4; + b >>= 4; + + /* Value */ + max_rgb = max3(r, g, b); + *v = max_rgb; + if (!max_rgb) { + *h = 0; + *s = 0; + return; + } + + /* Saturation */ + min_rgb = min3(r, g, b); + diff_rgb = max_rgb - min_rgb; + aux = 255 * diff_rgb; + aux += max_rgb / 2; + aux /= max_rgb; + *s = aux; + if (!aux) { + *h = 0; + return; + } + + third_size = (tpg->real_hsv_enc == V4L2_HSV_ENC_180) ? 60 : 85; + + /* Hue */ + if (max_rgb == r) { + aux = g - b; + third = 0; + } else if (max_rgb == g) { + aux = b - r; + third = third_size; + } else { + aux = r - g; + third = third_size * 2; + } + + aux *= third_size / 2; + aux += diff_rgb / 2; + aux /= diff_rgb; + aux += third; + + /* Clamp Hue */ + if (tpg->real_hsv_enc == V4L2_HSV_ENC_180) { + if (aux < 0) + aux += 180; + else if (aux > 180) + aux -= 180; + } else { + aux = aux & 0xff; + } + + *h = aux; +} + +static void rgb2ycbcr(const int m[3][3], int r, int g, int b, + int y_offset, int *y, int *cb, int *cr) +{ + *y = ((m[0][0] * r + m[0][1] * g + m[0][2] * b) >> 16) + (y_offset << 4); + *cb = ((m[1][0] * r + m[1][1] * g + m[1][2] * b) >> 16) + (128 << 4); + *cr = ((m[2][0] * r + m[2][1] * g + m[2][2] * b) >> 16) + (128 << 4); +} + +static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b, + int *y, int *cb, int *cr) +{ +#define COEFF(v, r) ((int)(0.5 + (v) * (r) * 256.0)) + + static const int bt601[3][3] = { + { COEFF(0.299, 219), COEFF(0.587, 219), COEFF(0.114, 219) }, + { COEFF(-0.1687, 224), COEFF(-0.3313, 224), COEFF(0.5, 224) }, + { COEFF(0.5, 224), COEFF(-0.4187, 224), COEFF(-0.0813, 224) }, + }; + static const int bt601_full[3][3] = { + { COEFF(0.299, 255), COEFF(0.587, 255), COEFF(0.114, 255) }, + { COEFF(-0.1687, 255), COEFF(-0.3313, 255), COEFF(0.5, 255) }, + { COEFF(0.5, 255), COEFF(-0.4187, 255), COEFF(-0.0813, 255) }, + }; + static const int rec709[3][3] = { + { COEFF(0.2126, 219), COEFF(0.7152, 219), COEFF(0.0722, 219) }, + { COEFF(-0.1146, 224), COEFF(-0.3854, 224), COEFF(0.5, 224) }, + { COEFF(0.5, 224), COEFF(-0.4542, 224), COEFF(-0.0458, 224) }, + }; + static const int rec709_full[3][3] = { + { COEFF(0.2126, 255), COEFF(0.7152, 255), COEFF(0.0722, 255) }, + { COEFF(-0.1146, 255), COEFF(-0.3854, 255), COEFF(0.5, 255) }, + { COEFF(0.5, 255), COEFF(-0.4542, 255), COEFF(-0.0458, 255) }, + }; + static const int smpte240m[3][3] = { + { COEFF(0.212, 219), COEFF(0.701, 219), COEFF(0.087, 219) }, + { COEFF(-0.116, 224), COEFF(-0.384, 224), COEFF(0.5, 224) }, + { COEFF(0.5, 224), COEFF(-0.445, 224), COEFF(-0.055, 224) }, + }; + static const int smpte240m_full[3][3] = { + { COEFF(0.212, 255), COEFF(0.701, 255), COEFF(0.087, 255) }, + { COEFF(-0.116, 255), COEFF(-0.384, 255), COEFF(0.5, 255) }, + { COEFF(0.5, 255), COEFF(-0.445, 255), COEFF(-0.055, 255) }, + }; + static const int bt2020[3][3] = { + { COEFF(0.2627, 219), COEFF(0.6780, 219), COEFF(0.0593, 219) }, + { COEFF(-0.1396, 224), COEFF(-0.3604, 224), COEFF(0.5, 224) }, + { COEFF(0.5, 224), COEFF(-0.4598, 224), COEFF(-0.0402, 224) }, + }; + static const int bt2020_full[3][3] = { + { COEFF(0.2627, 255), COEFF(0.6780, 255), COEFF(0.0593, 255) }, + { COEFF(-0.1396, 255), COEFF(-0.3604, 255), COEFF(0.5, 255) }, + { COEFF(0.5, 255), COEFF(-0.4598, 255), COEFF(-0.0402, 255) }, + }; + static const int bt2020c[4] = { + COEFF(1.0 / 1.9404, 224), COEFF(1.0 / 1.5816, 224), + COEFF(1.0 / 1.7184, 224), COEFF(1.0 / 0.9936, 224), + }; + static const int bt2020c_full[4] = { + COEFF(1.0 / 1.9404, 255), COEFF(1.0 / 1.5816, 255), + COEFF(1.0 / 1.7184, 255), COEFF(1.0 / 0.9936, 255), + }; + + bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE; + unsigned y_offset = full ? 0 : 16; + int lin_y, yc; + + switch (tpg->real_ycbcr_enc) { + case V4L2_YCBCR_ENC_601: + rgb2ycbcr(full ? bt601_full : bt601, r, g, b, y_offset, y, cb, cr); + break; + case V4L2_YCBCR_ENC_XV601: + /* Ignore quantization range, there is only one possible + * Y'CbCr encoding. */ + rgb2ycbcr(bt601, r, g, b, 16, y, cb, cr); + break; + case V4L2_YCBCR_ENC_XV709: + /* Ignore quantization range, there is only one possible + * Y'CbCr encoding. */ + rgb2ycbcr(rec709, r, g, b, 16, y, cb, cr); + break; + case V4L2_YCBCR_ENC_BT2020: + rgb2ycbcr(full ? bt2020_full : bt2020, r, g, b, y_offset, y, cb, cr); + break; + case V4L2_YCBCR_ENC_BT2020_CONST_LUM: + lin_y = (COEFF(0.2627, 255) * rec709_to_linear(r) + + COEFF(0.6780, 255) * rec709_to_linear(g) + + COEFF(0.0593, 255) * rec709_to_linear(b)) >> 16; + yc = linear_to_rec709(lin_y); + *y = full ? yc : (yc * 219) / 255 + (16 << 4); + if (b <= yc) + *cb = (((b - yc) * (full ? bt2020c_full[0] : bt2020c[0])) >> 16) + (128 << 4); + else + *cb = (((b - yc) * (full ? bt2020c_full[1] : bt2020c[1])) >> 16) + (128 << 4); + if (r <= yc) + *cr = (((r - yc) * (full ? bt2020c_full[2] : bt2020c[2])) >> 16) + (128 << 4); + else + *cr = (((r - yc) * (full ? bt2020c_full[3] : bt2020c[3])) >> 16) + (128 << 4); + break; + case V4L2_YCBCR_ENC_SMPTE240M: + rgb2ycbcr(full ? smpte240m_full : smpte240m, r, g, b, y_offset, y, cb, cr); + break; + case V4L2_YCBCR_ENC_709: + default: + rgb2ycbcr(full ? rec709_full : rec709, r, g, b, y_offset, y, cb, cr); + break; + } +} + +static void ycbcr2rgb(const int m[3][3], int y, int cb, int cr, + int y_offset, int *r, int *g, int *b) +{ + y -= y_offset << 4; + cb -= 128 << 4; + cr -= 128 << 4; + *r = m[0][0] * y + m[0][1] * cb + m[0][2] * cr; + *g = m[1][0] * y + m[1][1] * cb + m[1][2] * cr; + *b = m[2][0] * y + m[2][1] * cb + m[2][2] * cr; + *r = clamp(*r >> 12, 0, 0xff0); + *g = clamp(*g >> 12, 0, 0xff0); + *b = clamp(*b >> 12, 0, 0xff0); +} + +static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr, + int *r, int *g, int *b) +{ +#undef COEFF +#define COEFF(v, r) ((int)(0.5 + (v) * ((255.0 * 255.0 * 16.0) / (r)))) + static const int bt601[3][3] = { + { COEFF(1, 219), COEFF(0, 224), COEFF(1.4020, 224) }, + { COEFF(1, 219), COEFF(-0.3441, 224), COEFF(-0.7141, 224) }, + { COEFF(1, 219), COEFF(1.7720, 224), COEFF(0, 224) }, + }; + static const int bt601_full[3][3] = { + { COEFF(1, 255), COEFF(0, 255), COEFF(1.4020, 255) }, + { COEFF(1, 255), COEFF(-0.3441, 255), COEFF(-0.7141, 255) }, + { COEFF(1, 255), COEFF(1.7720, 255), COEFF(0, 255) }, + }; + static const int rec709[3][3] = { + { COEFF(1, 219), COEFF(0, 224), COEFF(1.5748, 224) }, + { COEFF(1, 219), COEFF(-0.1873, 224), COEFF(-0.4681, 224) }, + { COEFF(1, 219), COEFF(1.8556, 224), COEFF(0, 224) }, + }; + static const int rec709_full[3][3] = { + { COEFF(1, 255), COEFF(0, 255), COEFF(1.5748, 255) }, + { COEFF(1, 255), COEFF(-0.1873, 255), COEFF(-0.4681, 255) }, + { COEFF(1, 255), COEFF(1.8556, 255), COEFF(0, 255) }, + }; + static const int smpte240m[3][3] = { + { COEFF(1, 219), COEFF(0, 224), COEFF(1.5756, 224) }, + { COEFF(1, 219), COEFF(-0.2253, 224), COEFF(-0.4767, 224) }, + { COEFF(1, 219), COEFF(1.8270, 224), COEFF(0, 224) }, + }; + static const int smpte240m_full[3][3] = { + { COEFF(1, 255), COEFF(0, 255), COEFF(1.5756, 255) }, + { COEFF(1, 255), COEFF(-0.2253, 255), COEFF(-0.4767, 255) }, + { COEFF(1, 255), COEFF(1.8270, 255), COEFF(0, 255) }, + }; + static const int bt2020[3][3] = { + { COEFF(1, 219), COEFF(0, 224), COEFF(1.4746, 224) }, + { COEFF(1, 219), COEFF(-0.1646, 224), COEFF(-0.5714, 224) }, + { COEFF(1, 219), COEFF(1.8814, 224), COEFF(0, 224) }, + }; + static const int bt2020_full[3][3] = { + { COEFF(1, 255), COEFF(0, 255), COEFF(1.4746, 255) }, + { COEFF(1, 255), COEFF(-0.1646, 255), COEFF(-0.5714, 255) }, + { COEFF(1, 255), COEFF(1.8814, 255), COEFF(0, 255) }, + }; + static const int bt2020c[4] = { + COEFF(1.9404, 224), COEFF(1.5816, 224), + COEFF(1.7184, 224), COEFF(0.9936, 224), + }; + static const int bt2020c_full[4] = { + COEFF(1.9404, 255), COEFF(1.5816, 255), + COEFF(1.7184, 255), COEFF(0.9936, 255), + }; + + bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE; + unsigned y_offset = full ? 0 : 16; + int y_fac = full ? COEFF(1.0, 255) : COEFF(1.0, 219); + int lin_r, lin_g, lin_b, lin_y; + + switch (tpg->real_ycbcr_enc) { + case V4L2_YCBCR_ENC_601: + ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, y_offset, r, g, b); + break; + case V4L2_YCBCR_ENC_XV601: + /* Ignore quantization range, there is only one possible + * Y'CbCr encoding. */ + ycbcr2rgb(bt601, y, cb, cr, 16, r, g, b); + break; + case V4L2_YCBCR_ENC_XV709: + /* Ignore quantization range, there is only one possible + * Y'CbCr encoding. */ + ycbcr2rgb(rec709, y, cb, cr, 16, r, g, b); + break; + case V4L2_YCBCR_ENC_BT2020: + ycbcr2rgb(full ? bt2020_full : bt2020, y, cb, cr, y_offset, r, g, b); + break; + case V4L2_YCBCR_ENC_BT2020_CONST_LUM: + y -= full ? 0 : 16 << 4; + cb -= 128 << 4; + cr -= 128 << 4; + + if (cb <= 0) + *b = y_fac * y + (full ? bt2020c_full[0] : bt2020c[0]) * cb; + else + *b = y_fac * y + (full ? bt2020c_full[1] : bt2020c[1]) * cb; + *b = *b >> 12; + if (cr <= 0) + *r = y_fac * y + (full ? bt2020c_full[2] : bt2020c[2]) * cr; + else + *r = y_fac * y + (full ? bt2020c_full[3] : bt2020c[3]) * cr; + *r = *r >> 12; + lin_r = rec709_to_linear(*r); + lin_b = rec709_to_linear(*b); + lin_y = rec709_to_linear((y * 255) / (full ? 255 : 219)); + + lin_g = COEFF(1.0 / 0.6780, 255) * lin_y - + COEFF(0.2627 / 0.6780, 255) * lin_r - + COEFF(0.0593 / 0.6780, 255) * lin_b; + *g = linear_to_rec709(lin_g >> 12); + break; + case V4L2_YCBCR_ENC_SMPTE240M: + ycbcr2rgb(full ? smpte240m_full : smpte240m, y, cb, cr, y_offset, r, g, b); + break; + case V4L2_YCBCR_ENC_709: + default: + ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, y_offset, r, g, b); + break; + } +} + +/* precalculate color bar values to speed up rendering */ +static void precalculate_color(struct tpg_data *tpg, int k) +{ + int col = k; + int r = tpg_colors[col].r; + int g = tpg_colors[col].g; + int b = tpg_colors[col].b; + int y, cb, cr; + bool ycbcr_valid = false; + + if (k == TPG_COLOR_TEXTBG) { + col = tpg_get_textbg_color(tpg); + + r = tpg_colors[col].r; + g = tpg_colors[col].g; + b = tpg_colors[col].b; + } else if (k == TPG_COLOR_TEXTFG) { + col = tpg_get_textfg_color(tpg); + + r = tpg_colors[col].r; + g = tpg_colors[col].g; + b = tpg_colors[col].b; + } else if (tpg->pattern == TPG_PAT_NOISE) { + r = g = b = get_random_u8(); + } else if (k == TPG_COLOR_RANDOM) { + r = g = b = tpg->qual_offset + get_random_u32_below(196); + } else if (k >= TPG_COLOR_RAMP) { + r = g = b = k - TPG_COLOR_RAMP; + } + + if (tpg->pattern == TPG_PAT_CSC_COLORBAR && col <= TPG_COLOR_CSC_BLACK) { + r = tpg_csc_colors[tpg->colorspace][tpg->real_xfer_func][col].r; + g = tpg_csc_colors[tpg->colorspace][tpg->real_xfer_func][col].g; + b = tpg_csc_colors[tpg->colorspace][tpg->real_xfer_func][col].b; + } else { + r <<= 4; + g <<= 4; + b <<= 4; + } + + if (tpg->qual == TPG_QUAL_GRAY || + tpg->color_enc == TGP_COLOR_ENC_LUMA) { + /* Rec. 709 Luma function */ + /* (0.2126, 0.7152, 0.0722) * (255 * 256) */ + r = g = b = (13879 * r + 46688 * g + 4713 * b) >> 16; + } + + /* + * The assumption is that the RGB output is always full range, + * so only if the rgb_range overrides the 'real' rgb range do + * we need to convert the RGB values. + * + * Remember that r, g and b are still in the 0 - 0xff0 range. + */ + if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED && + tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL && + tpg->color_enc == TGP_COLOR_ENC_RGB) { + /* + * Convert from full range (which is what r, g and b are) + * to limited range (which is the 'real' RGB range), which + * is then interpreted as full range. + */ + r = (r * 219) / 255 + (16 << 4); + g = (g * 219) / 255 + (16 << 4); + b = (b * 219) / 255 + (16 << 4); + } else if (tpg->real_rgb_range != V4L2_DV_RGB_RANGE_LIMITED && + tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED && + tpg->color_enc == TGP_COLOR_ENC_RGB) { + + /* + * Clamp r, g and b to the limited range and convert to full + * range since that's what we deliver. + */ + r = clamp(r, 16 << 4, 235 << 4); + g = clamp(g, 16 << 4, 235 << 4); + b = clamp(b, 16 << 4, 235 << 4); + r = (r - (16 << 4)) * 255 / 219; + g = (g - (16 << 4)) * 255 / 219; + b = (b - (16 << 4)) * 255 / 219; + } + + if ((tpg->brightness != 128 || tpg->contrast != 128 || + tpg->saturation != 128 || tpg->hue) && + tpg->color_enc != TGP_COLOR_ENC_LUMA) { + /* Implement these operations */ + int tmp_cb, tmp_cr; + + /* First convert to YCbCr */ + + color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr); + + y = (16 << 4) + ((y - (16 << 4)) * tpg->contrast) / 128; + y += (tpg->brightness << 4) - (128 << 4); + + cb -= 128 << 4; + cr -= 128 << 4; + tmp_cb = (cb * cos(128 + tpg->hue)) / 127 + (cr * sin[128 + tpg->hue]) / 127; + tmp_cr = (cr * cos(128 + tpg->hue)) / 127 - (cb * sin[128 + tpg->hue]) / 127; + + cb = (128 << 4) + (tmp_cb * tpg->contrast * tpg->saturation) / (128 * 128); + cr = (128 << 4) + (tmp_cr * tpg->contrast * tpg->saturation) / (128 * 128); + if (tpg->color_enc == TGP_COLOR_ENC_YCBCR) + ycbcr_valid = true; + else + ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b); + } else if ((tpg->brightness != 128 || tpg->contrast != 128) && + tpg->color_enc == TGP_COLOR_ENC_LUMA) { + r = (16 << 4) + ((r - (16 << 4)) * tpg->contrast) / 128; + r += (tpg->brightness << 4) - (128 << 4); + } + + switch (tpg->color_enc) { + case TGP_COLOR_ENC_HSV: + { + int h, s, v; + + color_to_hsv(tpg, r, g, b, &h, &s, &v); + tpg->colors[k][0] = h; + tpg->colors[k][1] = s; + tpg->colors[k][2] = v; + break; + } + case TGP_COLOR_ENC_YCBCR: + { + /* Convert to YCbCr */ + if (!ycbcr_valid) + color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr); + + y >>= 4; + cb >>= 4; + cr >>= 4; + /* + * XV601/709 use the header/footer margins to encode R', G' + * and B' values outside the range [0-1]. So do not clamp + * XV601/709 values. + */ + if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE && + tpg->real_ycbcr_enc != V4L2_YCBCR_ENC_XV601 && + tpg->real_ycbcr_enc != V4L2_YCBCR_ENC_XV709) { + y = clamp(y, 16, 235); + cb = clamp(cb, 16, 240); + cr = clamp(cr, 16, 240); + } else { + y = clamp(y, 1, 254); + cb = clamp(cb, 1, 254); + cr = clamp(cr, 1, 254); + } + switch (tpg->fourcc) { + case V4L2_PIX_FMT_YUV444: + y >>= 4; + cb >>= 4; + cr >>= 4; + break; + case V4L2_PIX_FMT_YUV555: + y >>= 3; + cb >>= 3; + cr >>= 3; + break; + case V4L2_PIX_FMT_YUV565: + y >>= 3; + cb >>= 2; + cr >>= 3; + break; + } + tpg->colors[k][0] = y; + tpg->colors[k][1] = cb; + tpg->colors[k][2] = cr; + break; + } + case TGP_COLOR_ENC_LUMA: + { + tpg->colors[k][0] = r >> 4; + break; + } + case TGP_COLOR_ENC_RGB: + { + if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) { + r = (r * 219) / 255 + (16 << 4); + g = (g * 219) / 255 + (16 << 4); + b = (b * 219) / 255 + (16 << 4); + } + switch (tpg->fourcc) { + case V4L2_PIX_FMT_RGB332: + r >>= 9; + g >>= 9; + b >>= 10; + break; + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + r >>= 7; + g >>= 6; + b >>= 7; + break; + case V4L2_PIX_FMT_RGB444: + case V4L2_PIX_FMT_XRGB444: + case V4L2_PIX_FMT_ARGB444: + case V4L2_PIX_FMT_RGBX444: + case V4L2_PIX_FMT_RGBA444: + case V4L2_PIX_FMT_XBGR444: + case V4L2_PIX_FMT_ABGR444: + case V4L2_PIX_FMT_BGRX444: + case V4L2_PIX_FMT_BGRA444: + r >>= 8; + g >>= 8; + b >>= 8; + break; + case V4L2_PIX_FMT_RGB555: + case V4L2_PIX_FMT_XRGB555: + case V4L2_PIX_FMT_ARGB555: + case V4L2_PIX_FMT_RGBX555: + case V4L2_PIX_FMT_RGBA555: + case V4L2_PIX_FMT_XBGR555: + case V4L2_PIX_FMT_ABGR555: + case V4L2_PIX_FMT_BGRX555: + case V4L2_PIX_FMT_BGRA555: + case V4L2_PIX_FMT_RGB555X: + case V4L2_PIX_FMT_XRGB555X: + case V4L2_PIX_FMT_ARGB555X: + r >>= 7; + g >>= 7; + b >>= 7; + break; + case V4L2_PIX_FMT_BGR666: + r >>= 6; + g >>= 6; + b >>= 6; + break; + default: + r >>= 4; + g >>= 4; + b >>= 4; + break; + } + + tpg->colors[k][0] = r; + tpg->colors[k][1] = g; + tpg->colors[k][2] = b; + break; + } + } +} + +static void tpg_precalculate_colors(struct tpg_data *tpg) +{ + int k; + + for (k = 0; k < TPG_COLOR_MAX; k++) + precalculate_color(tpg, k); +} + +/* 'odd' is true for pixels 1, 3, 5, etc. and false for pixels 0, 2, 4, etc. */ +static void gen_twopix(struct tpg_data *tpg, + u8 buf[TPG_MAX_PLANES][8], int color, bool odd) +{ + unsigned offset = odd * tpg->twopixelsize[0] / 2; + u8 alpha = tpg->alpha_component; + u8 r_y_h, g_u_s, b_v; + + if (tpg->alpha_red_only && color != TPG_COLOR_CSC_RED && + color != TPG_COLOR_100_RED && + color != TPG_COLOR_75_RED) + alpha = 0; + if (color == TPG_COLOR_RANDOM) + precalculate_color(tpg, color); + r_y_h = tpg->colors[color][0]; /* R or precalculated Y, H */ + g_u_s = tpg->colors[color][1]; /* G or precalculated U, V */ + b_v = tpg->colors[color][2]; /* B or precalculated V */ + + switch (tpg->fourcc) { + case V4L2_PIX_FMT_GREY: + buf[0][offset] = r_y_h; + break; + case V4L2_PIX_FMT_Y10: + buf[0][offset] = (r_y_h << 2) & 0xff; + buf[0][offset+1] = r_y_h >> 6; + break; + case V4L2_PIX_FMT_Y12: + buf[0][offset] = (r_y_h << 4) & 0xff; + buf[0][offset+1] = r_y_h >> 4; + break; + case V4L2_PIX_FMT_Y16: + case V4L2_PIX_FMT_Z16: + /* + * Ideally both bytes should be set to r_y_h, but then you won't + * be able to detect endian problems. So keep it 0 except for + * the corner case where r_y_h is 0xff so white really will be + * white (0xffff). + */ + buf[0][offset] = r_y_h == 0xff ? r_y_h : 0; + buf[0][offset+1] = r_y_h; + break; + case V4L2_PIX_FMT_Y16_BE: + /* See comment for V4L2_PIX_FMT_Y16 above */ + buf[0][offset] = r_y_h; + buf[0][offset+1] = r_y_h == 0xff ? r_y_h : 0; + break; + case V4L2_PIX_FMT_YUV422M: + case V4L2_PIX_FMT_YUV422P: + case V4L2_PIX_FMT_YUV420: + case V4L2_PIX_FMT_YUV420M: + buf[0][offset] = r_y_h; + if (odd) { + buf[1][0] = (buf[1][0] + g_u_s) / 2; + buf[2][0] = (buf[2][0] + b_v) / 2; + buf[1][1] = buf[1][0]; + buf[2][1] = buf[2][0]; + break; + } + buf[1][0] = g_u_s; + buf[2][0] = b_v; + break; + case V4L2_PIX_FMT_YVU422M: + case V4L2_PIX_FMT_YVU420: + case V4L2_PIX_FMT_YVU420M: + buf[0][offset] = r_y_h; + if (odd) { + buf[1][0] = (buf[1][0] + b_v) / 2; + buf[2][0] = (buf[2][0] + g_u_s) / 2; + buf[1][1] = buf[1][0]; + buf[2][1] = buf[2][0]; + break; + } + buf[1][0] = b_v; + buf[2][0] = g_u_s; + break; + + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV12M: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV16M: + buf[0][offset] = r_y_h; + if (odd) { + buf[1][0] = (buf[1][0] + g_u_s) / 2; + buf[1][1] = (buf[1][1] + b_v) / 2; + break; + } + buf[1][0] = g_u_s; + buf[1][1] = b_v; + break; + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV21M: + case V4L2_PIX_FMT_NV61: + case V4L2_PIX_FMT_NV61M: + buf[0][offset] = r_y_h; + if (odd) { + buf[1][0] = (buf[1][0] + b_v) / 2; + buf[1][1] = (buf[1][1] + g_u_s) / 2; + break; + } + buf[1][0] = b_v; + buf[1][1] = g_u_s; + break; + + case V4L2_PIX_FMT_YUV444M: + buf[0][offset] = r_y_h; + buf[1][offset] = g_u_s; + buf[2][offset] = b_v; + break; + + case V4L2_PIX_FMT_YVU444M: + buf[0][offset] = r_y_h; + buf[1][offset] = b_v; + buf[2][offset] = g_u_s; + break; + + case V4L2_PIX_FMT_NV24: + buf[0][offset] = r_y_h; + buf[1][2 * offset] = g_u_s; + buf[1][(2 * offset + 1) % 8] = b_v; + break; + + case V4L2_PIX_FMT_NV42: + buf[0][offset] = r_y_h; + buf[1][2 * offset] = b_v; + buf[1][(2 * offset + 1) % 8] = g_u_s; + break; + + case V4L2_PIX_FMT_YUYV: + buf[0][offset] = r_y_h; + if (odd) { + buf[0][1] = (buf[0][1] + g_u_s) / 2; + buf[0][3] = (buf[0][3] + b_v) / 2; + break; + } + buf[0][1] = g_u_s; + buf[0][3] = b_v; + break; + case V4L2_PIX_FMT_UYVY: + buf[0][offset + 1] = r_y_h; + if (odd) { + buf[0][0] = (buf[0][0] + g_u_s) / 2; + buf[0][2] = (buf[0][2] + b_v) / 2; + break; + } + buf[0][0] = g_u_s; + buf[0][2] = b_v; + break; + case V4L2_PIX_FMT_YVYU: + buf[0][offset] = r_y_h; + if (odd) { + buf[0][1] = (buf[0][1] + b_v) / 2; + buf[0][3] = (buf[0][3] + g_u_s) / 2; + break; + } + buf[0][1] = b_v; + buf[0][3] = g_u_s; + break; + case V4L2_PIX_FMT_VYUY: + buf[0][offset + 1] = r_y_h; + if (odd) { + buf[0][0] = (buf[0][0] + b_v) / 2; + buf[0][2] = (buf[0][2] + g_u_s) / 2; + break; + } + buf[0][0] = b_v; + buf[0][2] = g_u_s; + break; + case V4L2_PIX_FMT_RGB332: + buf[0][offset] = (r_y_h << 5) | (g_u_s << 2) | b_v; + break; + case V4L2_PIX_FMT_YUV565: + case V4L2_PIX_FMT_RGB565: + buf[0][offset] = (g_u_s << 5) | b_v; + buf[0][offset + 1] = (r_y_h << 3) | (g_u_s >> 3); + break; + case V4L2_PIX_FMT_RGB565X: + buf[0][offset] = (r_y_h << 3) | (g_u_s >> 3); + buf[0][offset + 1] = (g_u_s << 5) | b_v; + break; + case V4L2_PIX_FMT_RGB444: + case V4L2_PIX_FMT_XRGB444: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_YUV444: + case V4L2_PIX_FMT_ARGB444: + buf[0][offset] = (g_u_s << 4) | b_v; + buf[0][offset + 1] = (alpha & 0xf0) | r_y_h; + break; + case V4L2_PIX_FMT_RGBX444: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_RGBA444: + buf[0][offset] = (b_v << 4) | (alpha >> 4); + buf[0][offset + 1] = (r_y_h << 4) | g_u_s; + break; + case V4L2_PIX_FMT_XBGR444: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_ABGR444: + buf[0][offset] = (g_u_s << 4) | r_y_h; + buf[0][offset + 1] = (alpha & 0xf0) | b_v; + break; + case V4L2_PIX_FMT_BGRX444: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_BGRA444: + buf[0][offset] = (r_y_h << 4) | (alpha >> 4); + buf[0][offset + 1] = (b_v << 4) | g_u_s; + break; + case V4L2_PIX_FMT_RGB555: + case V4L2_PIX_FMT_XRGB555: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_YUV555: + case V4L2_PIX_FMT_ARGB555: + buf[0][offset] = (g_u_s << 5) | b_v; + buf[0][offset + 1] = (alpha & 0x80) | (r_y_h << 2) + | (g_u_s >> 3); + break; + case V4L2_PIX_FMT_RGBX555: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_RGBA555: + buf[0][offset] = (g_u_s << 6) | (b_v << 1) | + ((alpha & 0x80) >> 7); + buf[0][offset + 1] = (r_y_h << 3) | (g_u_s >> 2); + break; + case V4L2_PIX_FMT_XBGR555: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_ABGR555: + buf[0][offset] = (g_u_s << 5) | r_y_h; + buf[0][offset + 1] = (alpha & 0x80) | (b_v << 2) + | (g_u_s >> 3); + break; + case V4L2_PIX_FMT_BGRX555: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_BGRA555: + buf[0][offset] = (g_u_s << 6) | (r_y_h << 1) | + ((alpha & 0x80) >> 7); + buf[0][offset + 1] = (b_v << 3) | (g_u_s >> 2); + break; + case V4L2_PIX_FMT_RGB555X: + case V4L2_PIX_FMT_XRGB555X: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_ARGB555X: + buf[0][offset] = (alpha & 0x80) | (r_y_h << 2) | (g_u_s >> 3); + buf[0][offset + 1] = (g_u_s << 5) | b_v; + break; + case V4L2_PIX_FMT_RGB24: + case V4L2_PIX_FMT_HSV24: + buf[0][offset] = r_y_h; + buf[0][offset + 1] = g_u_s; + buf[0][offset + 2] = b_v; + break; + case V4L2_PIX_FMT_BGR24: + buf[0][offset] = b_v; + buf[0][offset + 1] = g_u_s; + buf[0][offset + 2] = r_y_h; + break; + case V4L2_PIX_FMT_BGR666: + buf[0][offset] = (b_v << 2) | (g_u_s >> 4); + buf[0][offset + 1] = (g_u_s << 4) | (r_y_h >> 2); + buf[0][offset + 2] = r_y_h << 6; + buf[0][offset + 3] = 0; + break; + case V4L2_PIX_FMT_RGB32: + case V4L2_PIX_FMT_XRGB32: + case V4L2_PIX_FMT_HSV32: + case V4L2_PIX_FMT_XYUV32: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_YUV32: + case V4L2_PIX_FMT_ARGB32: + case V4L2_PIX_FMT_AYUV32: + buf[0][offset] = alpha; + buf[0][offset + 1] = r_y_h; + buf[0][offset + 2] = g_u_s; + buf[0][offset + 3] = b_v; + break; + case V4L2_PIX_FMT_RGBX32: + case V4L2_PIX_FMT_YUVX32: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_RGBA32: + case V4L2_PIX_FMT_YUVA32: + buf[0][offset] = r_y_h; + buf[0][offset + 1] = g_u_s; + buf[0][offset + 2] = b_v; + buf[0][offset + 3] = alpha; + break; + case V4L2_PIX_FMT_BGR32: + case V4L2_PIX_FMT_XBGR32: + case V4L2_PIX_FMT_VUYX32: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_ABGR32: + case V4L2_PIX_FMT_VUYA32: + buf[0][offset] = b_v; + buf[0][offset + 1] = g_u_s; + buf[0][offset + 2] = r_y_h; + buf[0][offset + 3] = alpha; + break; + case V4L2_PIX_FMT_BGRX32: + alpha = 0; + fallthrough; + case V4L2_PIX_FMT_BGRA32: + buf[0][offset] = alpha; + buf[0][offset + 1] = b_v; + buf[0][offset + 2] = g_u_s; + buf[0][offset + 3] = r_y_h; + break; + case V4L2_PIX_FMT_SBGGR8: + buf[0][offset] = odd ? g_u_s : b_v; + buf[1][offset] = odd ? r_y_h : g_u_s; + break; + case V4L2_PIX_FMT_SGBRG8: + buf[0][offset] = odd ? b_v : g_u_s; + buf[1][offset] = odd ? g_u_s : r_y_h; + break; + case V4L2_PIX_FMT_SGRBG8: + buf[0][offset] = odd ? r_y_h : g_u_s; + buf[1][offset] = odd ? g_u_s : b_v; + break; + case V4L2_PIX_FMT_SRGGB8: + buf[0][offset] = odd ? g_u_s : r_y_h; + buf[1][offset] = odd ? b_v : g_u_s; + break; + case V4L2_PIX_FMT_SBGGR10: + buf[0][offset] = odd ? g_u_s << 2 : b_v << 2; + buf[0][offset + 1] = odd ? g_u_s >> 6 : b_v >> 6; + buf[1][offset] = odd ? r_y_h << 2 : g_u_s << 2; + buf[1][offset + 1] = odd ? r_y_h >> 6 : g_u_s >> 6; + buf[0][offset] |= (buf[0][offset] >> 2) & 3; + buf[1][offset] |= (buf[1][offset] >> 2) & 3; + break; + case V4L2_PIX_FMT_SGBRG10: + buf[0][offset] = odd ? b_v << 2 : g_u_s << 2; + buf[0][offset + 1] = odd ? b_v >> 6 : g_u_s >> 6; + buf[1][offset] = odd ? g_u_s << 2 : r_y_h << 2; + buf[1][offset + 1] = odd ? g_u_s >> 6 : r_y_h >> 6; + buf[0][offset] |= (buf[0][offset] >> 2) & 3; + buf[1][offset] |= (buf[1][offset] >> 2) & 3; + break; + case V4L2_PIX_FMT_SGRBG10: + buf[0][offset] = odd ? r_y_h << 2 : g_u_s << 2; + buf[0][offset + 1] = odd ? r_y_h >> 6 : g_u_s >> 6; + buf[1][offset] = odd ? g_u_s << 2 : b_v << 2; + buf[1][offset + 1] = odd ? g_u_s >> 6 : b_v >> 6; + buf[0][offset] |= (buf[0][offset] >> 2) & 3; + buf[1][offset] |= (buf[1][offset] >> 2) & 3; + break; + case V4L2_PIX_FMT_SRGGB10: + buf[0][offset] = odd ? g_u_s << 2 : r_y_h << 2; + buf[0][offset + 1] = odd ? g_u_s >> 6 : r_y_h >> 6; + buf[1][offset] = odd ? b_v << 2 : g_u_s << 2; + buf[1][offset + 1] = odd ? b_v >> 6 : g_u_s >> 6; + buf[0][offset] |= (buf[0][offset] >> 2) & 3; + buf[1][offset] |= (buf[1][offset] >> 2) & 3; + break; + case V4L2_PIX_FMT_SBGGR12: + buf[0][offset] = odd ? g_u_s << 4 : b_v << 4; + buf[0][offset + 1] = odd ? g_u_s >> 4 : b_v >> 4; + buf[1][offset] = odd ? r_y_h << 4 : g_u_s << 4; + buf[1][offset + 1] = odd ? r_y_h >> 4 : g_u_s >> 4; + buf[0][offset] |= (buf[0][offset] >> 4) & 0xf; + buf[1][offset] |= (buf[1][offset] >> 4) & 0xf; + break; + case V4L2_PIX_FMT_SGBRG12: + buf[0][offset] = odd ? b_v << 4 : g_u_s << 4; + buf[0][offset + 1] = odd ? b_v >> 4 : g_u_s >> 4; + buf[1][offset] = odd ? g_u_s << 4 : r_y_h << 4; + buf[1][offset + 1] = odd ? g_u_s >> 4 : r_y_h >> 4; + buf[0][offset] |= (buf[0][offset] >> 4) & 0xf; + buf[1][offset] |= (buf[1][offset] >> 4) & 0xf; + break; + case V4L2_PIX_FMT_SGRBG12: + buf[0][offset] = odd ? r_y_h << 4 : g_u_s << 4; + buf[0][offset + 1] = odd ? r_y_h >> 4 : g_u_s >> 4; + buf[1][offset] = odd ? g_u_s << 4 : b_v << 4; + buf[1][offset + 1] = odd ? g_u_s >> 4 : b_v >> 4; + buf[0][offset] |= (buf[0][offset] >> 4) & 0xf; + buf[1][offset] |= (buf[1][offset] >> 4) & 0xf; + break; + case V4L2_PIX_FMT_SRGGB12: + buf[0][offset] = odd ? g_u_s << 4 : r_y_h << 4; + buf[0][offset + 1] = odd ? g_u_s >> 4 : r_y_h >> 4; + buf[1][offset] = odd ? b_v << 4 : g_u_s << 4; + buf[1][offset + 1] = odd ? b_v >> 4 : g_u_s >> 4; + buf[0][offset] |= (buf[0][offset] >> 4) & 0xf; + buf[1][offset] |= (buf[1][offset] >> 4) & 0xf; + break; + case V4L2_PIX_FMT_SBGGR16: + buf[0][offset] = buf[0][offset + 1] = odd ? g_u_s : b_v; + buf[1][offset] = buf[1][offset + 1] = odd ? r_y_h : g_u_s; + break; + case V4L2_PIX_FMT_SGBRG16: + buf[0][offset] = buf[0][offset + 1] = odd ? b_v : g_u_s; + buf[1][offset] = buf[1][offset + 1] = odd ? g_u_s : r_y_h; + break; + case V4L2_PIX_FMT_SGRBG16: + buf[0][offset] = buf[0][offset + 1] = odd ? r_y_h : g_u_s; + buf[1][offset] = buf[1][offset + 1] = odd ? g_u_s : b_v; + break; + case V4L2_PIX_FMT_SRGGB16: + buf[0][offset] = buf[0][offset + 1] = odd ? g_u_s : r_y_h; + buf[1][offset] = buf[1][offset + 1] = odd ? b_v : g_u_s; + break; + } +} + +unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line) +{ + switch (tpg->fourcc) { + case V4L2_PIX_FMT_SBGGR8: + case V4L2_PIX_FMT_SGBRG8: + case V4L2_PIX_FMT_SGRBG8: + case V4L2_PIX_FMT_SRGGB8: + case V4L2_PIX_FMT_SBGGR10: + case V4L2_PIX_FMT_SGBRG10: + case V4L2_PIX_FMT_SGRBG10: + case V4L2_PIX_FMT_SRGGB10: + case V4L2_PIX_FMT_SBGGR12: + case V4L2_PIX_FMT_SGBRG12: + case V4L2_PIX_FMT_SGRBG12: + case V4L2_PIX_FMT_SRGGB12: + case V4L2_PIX_FMT_SBGGR16: + case V4L2_PIX_FMT_SGBRG16: + case V4L2_PIX_FMT_SGRBG16: + case V4L2_PIX_FMT_SRGGB16: + return buf_line & 1; + default: + return 0; + } +} +EXPORT_SYMBOL_GPL(tpg_g_interleaved_plane); + +/* Return how many pattern lines are used by the current pattern. */ +static unsigned tpg_get_pat_lines(const struct tpg_data *tpg) +{ + switch (tpg->pattern) { + case TPG_PAT_CHECKERS_16X16: + case TPG_PAT_CHECKERS_2X2: + case TPG_PAT_CHECKERS_1X1: + case TPG_PAT_COLOR_CHECKERS_2X2: + case TPG_PAT_COLOR_CHECKERS_1X1: + case TPG_PAT_ALTERNATING_HLINES: + case TPG_PAT_CROSS_1_PIXEL: + case TPG_PAT_CROSS_2_PIXELS: + case TPG_PAT_CROSS_10_PIXELS: + return 2; + case TPG_PAT_100_COLORSQUARES: + case TPG_PAT_100_HCOLORBAR: + return 8; + default: + return 1; + } +} + +/* Which pattern line should be used for the given frame line. */ +static unsigned tpg_get_pat_line(const struct tpg_data *tpg, unsigned line) +{ + switch (tpg->pattern) { + case TPG_PAT_CHECKERS_16X16: + return (line >> 4) & 1; + case TPG_PAT_CHECKERS_1X1: + case TPG_PAT_COLOR_CHECKERS_1X1: + case TPG_PAT_ALTERNATING_HLINES: + return line & 1; + case TPG_PAT_CHECKERS_2X2: + case TPG_PAT_COLOR_CHECKERS_2X2: + return (line & 2) >> 1; + case TPG_PAT_100_COLORSQUARES: + case TPG_PAT_100_HCOLORBAR: + return (line * 8) / tpg->src_height; + case TPG_PAT_CROSS_1_PIXEL: + return line == tpg->src_height / 2; + case TPG_PAT_CROSS_2_PIXELS: + return (line + 1) / 2 == tpg->src_height / 4; + case TPG_PAT_CROSS_10_PIXELS: + return (line + 10) / 20 == tpg->src_height / 40; + default: + return 0; + } +} + +/* + * Which color should be used for the given pattern line and X coordinate. + * Note: x is in the range 0 to 2 * tpg->src_width. + */ +static enum tpg_color tpg_get_color(const struct tpg_data *tpg, + unsigned pat_line, unsigned x) +{ + /* Maximum number of bars are TPG_COLOR_MAX - otherwise, the input print code + should be modified */ + static const enum tpg_color bars[3][8] = { + /* Standard ITU-R 75% color bar sequence */ + { TPG_COLOR_CSC_WHITE, TPG_COLOR_75_YELLOW, + TPG_COLOR_75_CYAN, TPG_COLOR_75_GREEN, + TPG_COLOR_75_MAGENTA, TPG_COLOR_75_RED, + TPG_COLOR_75_BLUE, TPG_COLOR_100_BLACK, }, + /* Standard ITU-R 100% color bar sequence */ + { TPG_COLOR_100_WHITE, TPG_COLOR_100_YELLOW, + TPG_COLOR_100_CYAN, TPG_COLOR_100_GREEN, + TPG_COLOR_100_MAGENTA, TPG_COLOR_100_RED, + TPG_COLOR_100_BLUE, TPG_COLOR_100_BLACK, }, + /* Color bar sequence suitable to test CSC */ + { TPG_COLOR_CSC_WHITE, TPG_COLOR_CSC_YELLOW, + TPG_COLOR_CSC_CYAN, TPG_COLOR_CSC_GREEN, + TPG_COLOR_CSC_MAGENTA, TPG_COLOR_CSC_RED, + TPG_COLOR_CSC_BLUE, TPG_COLOR_CSC_BLACK, }, + }; + + switch (tpg->pattern) { + case TPG_PAT_75_COLORBAR: + case TPG_PAT_100_COLORBAR: + case TPG_PAT_CSC_COLORBAR: + return bars[tpg->pattern][((x * 8) / tpg->src_width) % 8]; + case TPG_PAT_100_COLORSQUARES: + return bars[1][(pat_line + (x * 8) / tpg->src_width) % 8]; + case TPG_PAT_100_HCOLORBAR: + return bars[1][pat_line]; + case TPG_PAT_BLACK: + return TPG_COLOR_100_BLACK; + case TPG_PAT_WHITE: + return TPG_COLOR_100_WHITE; + case TPG_PAT_RED: + return TPG_COLOR_100_RED; + case TPG_PAT_GREEN: + return TPG_COLOR_100_GREEN; + case TPG_PAT_BLUE: + return TPG_COLOR_100_BLUE; + case TPG_PAT_CHECKERS_16X16: + return (((x >> 4) & 1) ^ (pat_line & 1)) ? + TPG_COLOR_100_BLACK : TPG_COLOR_100_WHITE; + case TPG_PAT_CHECKERS_1X1: + return ((x & 1) ^ (pat_line & 1)) ? + TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK; + case TPG_PAT_COLOR_CHECKERS_1X1: + return ((x & 1) ^ (pat_line & 1)) ? + TPG_COLOR_100_RED : TPG_COLOR_100_BLUE; + case TPG_PAT_CHECKERS_2X2: + return (((x >> 1) & 1) ^ (pat_line & 1)) ? + TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK; + case TPG_PAT_COLOR_CHECKERS_2X2: + return (((x >> 1) & 1) ^ (pat_line & 1)) ? + TPG_COLOR_100_RED : TPG_COLOR_100_BLUE; + case TPG_PAT_ALTERNATING_HLINES: + return pat_line ? TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK; + case TPG_PAT_ALTERNATING_VLINES: + return (x & 1) ? TPG_COLOR_100_WHITE : TPG_COLOR_100_BLACK; + case TPG_PAT_CROSS_1_PIXEL: + if (pat_line || (x % tpg->src_width) == tpg->src_width / 2) + return TPG_COLOR_100_BLACK; + return TPG_COLOR_100_WHITE; + case TPG_PAT_CROSS_2_PIXELS: + if (pat_line || ((x % tpg->src_width) + 1) / 2 == tpg->src_width / 4) + return TPG_COLOR_100_BLACK; + return TPG_COLOR_100_WHITE; + case TPG_PAT_CROSS_10_PIXELS: + if (pat_line || ((x % tpg->src_width) + 10) / 20 == tpg->src_width / 40) + return TPG_COLOR_100_BLACK; + return TPG_COLOR_100_WHITE; + case TPG_PAT_GRAY_RAMP: + return TPG_COLOR_RAMP + ((x % tpg->src_width) * 256) / tpg->src_width; + default: + return TPG_COLOR_100_RED; + } +} + +/* + * Given the pixel aspect ratio and video aspect ratio calculate the + * coordinates of a centered square and the coordinates of the border of + * the active video area. The coordinates are relative to the source + * frame rectangle. + */ +static void tpg_calculate_square_border(struct tpg_data *tpg) +{ + unsigned w = tpg->src_width; + unsigned h = tpg->src_height; + unsigned sq_w, sq_h; + + sq_w = (w * 2 / 5) & ~1; + if (((w - sq_w) / 2) & 1) + sq_w += 2; + sq_h = sq_w; + tpg->square.width = sq_w; + if (tpg->vid_aspect == TPG_VIDEO_ASPECT_16X9_ANAMORPHIC) { + unsigned ana_sq_w = (sq_w / 4) * 3; + + if (((w - ana_sq_w) / 2) & 1) + ana_sq_w += 2; + tpg->square.width = ana_sq_w; + } + tpg->square.left = (w - tpg->square.width) / 2; + if (tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC) + sq_h = sq_w * 10 / 11; + else if (tpg->pix_aspect == TPG_PIXEL_ASPECT_PAL) + sq_h = sq_w * 59 / 54; + tpg->square.height = sq_h; + tpg->square.top = (h - sq_h) / 2; + tpg->border.left = 0; + tpg->border.width = w; + tpg->border.top = 0; + tpg->border.height = h; + switch (tpg->vid_aspect) { + case TPG_VIDEO_ASPECT_4X3: + if (tpg->pix_aspect) + return; + if (3 * w >= 4 * h) { + tpg->border.width = ((4 * h) / 3) & ~1; + if (((w - tpg->border.width) / 2) & ~1) + tpg->border.width -= 2; + tpg->border.left = (w - tpg->border.width) / 2; + break; + } + tpg->border.height = ((3 * w) / 4) & ~1; + tpg->border.top = (h - tpg->border.height) / 2; + break; + case TPG_VIDEO_ASPECT_14X9_CENTRE: + if (tpg->pix_aspect) { + tpg->border.height = tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC ? 420 : 506; + tpg->border.top = (h - tpg->border.height) / 2; + break; + } + if (9 * w >= 14 * h) { + tpg->border.width = ((14 * h) / 9) & ~1; + if (((w - tpg->border.width) / 2) & ~1) + tpg->border.width -= 2; + tpg->border.left = (w - tpg->border.width) / 2; + break; + } + tpg->border.height = ((9 * w) / 14) & ~1; + tpg->border.top = (h - tpg->border.height) / 2; + break; + case TPG_VIDEO_ASPECT_16X9_CENTRE: + if (tpg->pix_aspect) { + tpg->border.height = tpg->pix_aspect == TPG_PIXEL_ASPECT_NTSC ? 368 : 442; + tpg->border.top = (h - tpg->border.height) / 2; + break; + } + if (9 * w >= 16 * h) { + tpg->border.width = ((16 * h) / 9) & ~1; + if (((w - tpg->border.width) / 2) & ~1) + tpg->border.width -= 2; + tpg->border.left = (w - tpg->border.width) / 2; + break; + } + tpg->border.height = ((9 * w) / 16) & ~1; + tpg->border.top = (h - tpg->border.height) / 2; + break; + default: + break; + } +} + +static void tpg_precalculate_line(struct tpg_data *tpg) +{ + enum tpg_color contrast; + u8 pix[TPG_MAX_PLANES][8]; + unsigned pat; + unsigned p; + unsigned x; + + switch (tpg->pattern) { + case TPG_PAT_GREEN: + contrast = TPG_COLOR_100_RED; + break; + case TPG_PAT_CSC_COLORBAR: + contrast = TPG_COLOR_CSC_GREEN; + break; + default: + contrast = TPG_COLOR_100_GREEN; + break; + } + + for (pat = 0; pat < tpg_get_pat_lines(tpg); pat++) { + /* Coarse scaling with Bresenham */ + unsigned int_part = tpg->src_width / tpg->scaled_width; + unsigned fract_part = tpg->src_width % tpg->scaled_width; + unsigned src_x = 0; + unsigned error = 0; + + for (x = 0; x < tpg->scaled_width * 2; x += 2) { + unsigned real_x = src_x; + enum tpg_color color1, color2; + + real_x = tpg->hflip ? tpg->src_width * 2 - real_x - 2 : real_x; + color1 = tpg_get_color(tpg, pat, real_x); + + src_x += int_part; + error += fract_part; + if (error >= tpg->scaled_width) { + error -= tpg->scaled_width; + src_x++; + } + + real_x = src_x; + real_x = tpg->hflip ? tpg->src_width * 2 - real_x - 2 : real_x; + color2 = tpg_get_color(tpg, pat, real_x); + + src_x += int_part; + error += fract_part; + if (error >= tpg->scaled_width) { + error -= tpg->scaled_width; + src_x++; + } + + gen_twopix(tpg, pix, tpg->hflip ? color2 : color1, 0); + gen_twopix(tpg, pix, tpg->hflip ? color1 : color2, 1); + for (p = 0; p < tpg->planes; p++) { + unsigned twopixsize = tpg->twopixelsize[p]; + unsigned hdiv = tpg->hdownsampling[p]; + u8 *pos = tpg->lines[pat][p] + tpg_hdiv(tpg, p, x); + + memcpy(pos, pix[p], twopixsize / hdiv); + } + } + } + + if (tpg->vdownsampling[tpg->planes - 1] > 1) { + unsigned pat_lines = tpg_get_pat_lines(tpg); + + for (pat = 0; pat < pat_lines; pat++) { + unsigned next_pat = (pat + 1) % pat_lines; + + for (p = 1; p < tpg->planes; p++) { + unsigned w = tpg_hdiv(tpg, p, tpg->scaled_width * 2); + u8 *pos1 = tpg->lines[pat][p]; + u8 *pos2 = tpg->lines[next_pat][p]; + u8 *dest = tpg->downsampled_lines[pat][p]; + + for (x = 0; x < w; x++, pos1++, pos2++, dest++) + *dest = ((u16)*pos1 + (u16)*pos2) / 2; + } + } + } + + gen_twopix(tpg, pix, contrast, 0); + gen_twopix(tpg, pix, contrast, 1); + for (p = 0; p < tpg->planes; p++) { + unsigned twopixsize = tpg->twopixelsize[p]; + u8 *pos = tpg->contrast_line[p]; + + for (x = 0; x < tpg->scaled_width; x += 2, pos += twopixsize) + memcpy(pos, pix[p], twopixsize); + } + + gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 0); + gen_twopix(tpg, pix, TPG_COLOR_100_BLACK, 1); + for (p = 0; p < tpg->planes; p++) { + unsigned twopixsize = tpg->twopixelsize[p]; + u8 *pos = tpg->black_line[p]; + + for (x = 0; x < tpg->scaled_width; x += 2, pos += twopixsize) + memcpy(pos, pix[p], twopixsize); + } + + for (x = 0; x < tpg->scaled_width * 2; x += 2) { + gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 0); + gen_twopix(tpg, pix, TPG_COLOR_RANDOM, 1); + for (p = 0; p < tpg->planes; p++) { + unsigned twopixsize = tpg->twopixelsize[p]; + u8 *pos = tpg->random_line[p] + x * twopixsize / 2; + + memcpy(pos, pix[p], twopixsize); + } + } + + gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 0); + gen_twopix(tpg, tpg->textbg, TPG_COLOR_TEXTBG, 1); + gen_twopix(tpg, tpg->textfg, TPG_COLOR_TEXTFG, 0); + gen_twopix(tpg, tpg->textfg, TPG_COLOR_TEXTFG, 1); +} + +/* need this to do rgb24 rendering */ +typedef struct { u16 __; u8 _; } __packed x24; + +#define PRINTSTR(PIXTYPE) do { \ + unsigned vdiv = tpg->vdownsampling[p]; \ + unsigned hdiv = tpg->hdownsampling[p]; \ + int line; \ + PIXTYPE fg; \ + PIXTYPE bg; \ + memcpy(&fg, tpg->textfg[p], sizeof(PIXTYPE)); \ + memcpy(&bg, tpg->textbg[p], sizeof(PIXTYPE)); \ + \ + for (line = first; line < 16; line += vdiv * step) { \ + int l = tpg->vflip ? 15 - line : line; \ + PIXTYPE *pos = (PIXTYPE *)(basep[p][(line / vdiv) & 1] + \ + ((y * step + l) / (vdiv * div)) * tpg->bytesperline[p] + \ + (x / hdiv) * sizeof(PIXTYPE)); \ + unsigned s; \ + \ + for (s = 0; s < len; s++) { \ + u8 chr = font8x16[(u8)text[s] * 16 + line]; \ + \ + if (hdiv == 2 && tpg->hflip) { \ + pos[3] = (chr & (0x01 << 6) ? fg : bg); \ + pos[2] = (chr & (0x01 << 4) ? fg : bg); \ + pos[1] = (chr & (0x01 << 2) ? fg : bg); \ + pos[0] = (chr & (0x01 << 0) ? fg : bg); \ + } else if (hdiv == 2) { \ + pos[0] = (chr & (0x01 << 7) ? fg : bg); \ + pos[1] = (chr & (0x01 << 5) ? fg : bg); \ + pos[2] = (chr & (0x01 << 3) ? fg : bg); \ + pos[3] = (chr & (0x01 << 1) ? fg : bg); \ + } else if (tpg->hflip) { \ + pos[7] = (chr & (0x01 << 7) ? fg : bg); \ + pos[6] = (chr & (0x01 << 6) ? fg : bg); \ + pos[5] = (chr & (0x01 << 5) ? fg : bg); \ + pos[4] = (chr & (0x01 << 4) ? fg : bg); \ + pos[3] = (chr & (0x01 << 3) ? fg : bg); \ + pos[2] = (chr & (0x01 << 2) ? fg : bg); \ + pos[1] = (chr & (0x01 << 1) ? fg : bg); \ + pos[0] = (chr & (0x01 << 0) ? fg : bg); \ + } else { \ + pos[0] = (chr & (0x01 << 7) ? fg : bg); \ + pos[1] = (chr & (0x01 << 6) ? fg : bg); \ + pos[2] = (chr & (0x01 << 5) ? fg : bg); \ + pos[3] = (chr & (0x01 << 4) ? fg : bg); \ + pos[4] = (chr & (0x01 << 3) ? fg : bg); \ + pos[5] = (chr & (0x01 << 2) ? fg : bg); \ + pos[6] = (chr & (0x01 << 1) ? fg : bg); \ + pos[7] = (chr & (0x01 << 0) ? fg : bg); \ + } \ + \ + pos += (tpg->hflip ? -8 : 8) / (int)hdiv; \ + } \ + } \ +} while (0) + +static noinline void tpg_print_str_2(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], + unsigned p, unsigned first, unsigned div, unsigned step, + int y, int x, const char *text, unsigned len) +{ + PRINTSTR(u8); +} + +static noinline void tpg_print_str_4(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], + unsigned p, unsigned first, unsigned div, unsigned step, + int y, int x, const char *text, unsigned len) +{ + PRINTSTR(u16); +} + +static noinline void tpg_print_str_6(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], + unsigned p, unsigned first, unsigned div, unsigned step, + int y, int x, const char *text, unsigned len) +{ + PRINTSTR(x24); +} + +static noinline void tpg_print_str_8(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], + unsigned p, unsigned first, unsigned div, unsigned step, + int y, int x, const char *text, unsigned len) +{ + PRINTSTR(u32); +} + +void tpg_gen_text(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2], + int y, int x, const char *text) +{ + unsigned step = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1; + unsigned div = step; + unsigned first = 0; + unsigned len; + unsigned p; + + if (font8x16 == NULL || basep == NULL || text == NULL) + return; + + len = strlen(text); + + /* Checks if it is possible to show string */ + if (y + 16 >= tpg->compose.height || x + 8 >= tpg->compose.width) + return; + + if (len > (tpg->compose.width - x) / 8) + len = (tpg->compose.width - x) / 8; + if (tpg->vflip) + y = tpg->compose.height - y - 16; + if (tpg->hflip) + x = tpg->compose.width - x - 8; + y += tpg->compose.top; + x += tpg->compose.left; + if (tpg->field == V4L2_FIELD_BOTTOM) + first = 1; + else if (tpg->field == V4L2_FIELD_SEQ_TB || tpg->field == V4L2_FIELD_SEQ_BT) + div = 2; + + for (p = 0; p < tpg->planes; p++) { + /* Print text */ + switch (tpg->twopixelsize[p]) { + case 2: + tpg_print_str_2(tpg, basep, p, first, div, step, y, x, + text, len); + break; + case 4: + tpg_print_str_4(tpg, basep, p, first, div, step, y, x, + text, len); + break; + case 6: + tpg_print_str_6(tpg, basep, p, first, div, step, y, x, + text, len); + break; + case 8: + tpg_print_str_8(tpg, basep, p, first, div, step, y, x, + text, len); + break; + } + } +} +EXPORT_SYMBOL_GPL(tpg_gen_text); + +const char *tpg_g_color_order(const struct tpg_data *tpg) +{ + switch (tpg->pattern) { + case TPG_PAT_75_COLORBAR: + case TPG_PAT_100_COLORBAR: + case TPG_PAT_CSC_COLORBAR: + case TPG_PAT_100_HCOLORBAR: + return "White, yellow, cyan, green, magenta, red, blue, black"; + case TPG_PAT_BLACK: + return "Black"; + case TPG_PAT_WHITE: + return "White"; + case TPG_PAT_RED: + return "Red"; + case TPG_PAT_GREEN: + return "Green"; + case TPG_PAT_BLUE: + return "Blue"; + default: + return NULL; + } +} +EXPORT_SYMBOL_GPL(tpg_g_color_order); + +void tpg_update_mv_step(struct tpg_data *tpg) +{ + int factor = tpg->mv_hor_mode > TPG_MOVE_NONE ? -1 : 1; + + if (tpg->hflip) + factor = -factor; + switch (tpg->mv_hor_mode) { + case TPG_MOVE_NEG_FAST: + case TPG_MOVE_POS_FAST: + tpg->mv_hor_step = ((tpg->src_width + 319) / 320) * 4; + break; + case TPG_MOVE_NEG: + case TPG_MOVE_POS: + tpg->mv_hor_step = ((tpg->src_width + 639) / 640) * 4; + break; + case TPG_MOVE_NEG_SLOW: + case TPG_MOVE_POS_SLOW: + tpg->mv_hor_step = 2; + break; + case TPG_MOVE_NONE: + tpg->mv_hor_step = 0; + break; + } + if (factor < 0) + tpg->mv_hor_step = tpg->src_width - tpg->mv_hor_step; + + factor = tpg->mv_vert_mode > TPG_MOVE_NONE ? -1 : 1; + switch (tpg->mv_vert_mode) { + case TPG_MOVE_NEG_FAST: + case TPG_MOVE_POS_FAST: + tpg->mv_vert_step = ((tpg->src_width + 319) / 320) * 4; + break; + case TPG_MOVE_NEG: + case TPG_MOVE_POS: + tpg->mv_vert_step = ((tpg->src_width + 639) / 640) * 4; + break; + case TPG_MOVE_NEG_SLOW: + case TPG_MOVE_POS_SLOW: + tpg->mv_vert_step = 1; + break; + case TPG_MOVE_NONE: + tpg->mv_vert_step = 0; + break; + } + if (factor < 0) + tpg->mv_vert_step = tpg->src_height - tpg->mv_vert_step; +} +EXPORT_SYMBOL_GPL(tpg_update_mv_step); + +/* Map the line number relative to the crop rectangle to a frame line number */ +static unsigned tpg_calc_frameline(const struct tpg_data *tpg, unsigned src_y, + unsigned field) +{ + switch (field) { + case V4L2_FIELD_TOP: + return tpg->crop.top + src_y * 2; + case V4L2_FIELD_BOTTOM: + return tpg->crop.top + src_y * 2 + 1; + default: + return src_y + tpg->crop.top; + } +} + +/* + * Map the line number relative to the compose rectangle to a destination + * buffer line number. + */ +static unsigned tpg_calc_buffer_line(const struct tpg_data *tpg, unsigned y, + unsigned field) +{ + y += tpg->compose.top; + switch (field) { + case V4L2_FIELD_SEQ_TB: + if (y & 1) + return tpg->buf_height / 2 + y / 2; + return y / 2; + case V4L2_FIELD_SEQ_BT: + if (y & 1) + return y / 2; + return tpg->buf_height / 2 + y / 2; + default: + return y; + } +} + +static void tpg_recalc(struct tpg_data *tpg) +{ + if (tpg->recalc_colors) { + tpg->recalc_colors = false; + tpg->recalc_lines = true; + tpg->real_xfer_func = tpg->xfer_func; + tpg->real_ycbcr_enc = tpg->ycbcr_enc; + tpg->real_hsv_enc = tpg->hsv_enc; + tpg->real_quantization = tpg->quantization; + + if (tpg->xfer_func == V4L2_XFER_FUNC_DEFAULT) + tpg->real_xfer_func = + V4L2_MAP_XFER_FUNC_DEFAULT(tpg->colorspace); + + if (tpg->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) + tpg->real_ycbcr_enc = + V4L2_MAP_YCBCR_ENC_DEFAULT(tpg->colorspace); + + if (tpg->quantization == V4L2_QUANTIZATION_DEFAULT) + tpg->real_quantization = + V4L2_MAP_QUANTIZATION_DEFAULT( + tpg->color_enc != TGP_COLOR_ENC_YCBCR, + tpg->colorspace, tpg->real_ycbcr_enc); + + tpg_precalculate_colors(tpg); + } + if (tpg->recalc_square_border) { + tpg->recalc_square_border = false; + tpg_calculate_square_border(tpg); + } + if (tpg->recalc_lines) { + tpg->recalc_lines = false; + tpg_precalculate_line(tpg); + } +} + +void tpg_calc_text_basep(struct tpg_data *tpg, + u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf) +{ + unsigned stride = tpg->bytesperline[p]; + unsigned h = tpg->buf_height; + + tpg_recalc(tpg); + + basep[p][0] = vbuf; + basep[p][1] = vbuf; + h /= tpg->vdownsampling[p]; + if (tpg->field == V4L2_FIELD_SEQ_TB) + basep[p][1] += h * stride / 2; + else if (tpg->field == V4L2_FIELD_SEQ_BT) + basep[p][0] += h * stride / 2; + if (p == 0 && tpg->interleaved) + tpg_calc_text_basep(tpg, basep, 1, vbuf); +} +EXPORT_SYMBOL_GPL(tpg_calc_text_basep); + +static int tpg_pattern_avg(const struct tpg_data *tpg, + unsigned pat1, unsigned pat2) +{ + unsigned pat_lines = tpg_get_pat_lines(tpg); + + if (pat1 == (pat2 + 1) % pat_lines) + return pat2; + if (pat2 == (pat1 + 1) % pat_lines) + return pat1; + return -1; +} + +static const char *tpg_color_enc_str(enum tgp_color_enc + color_enc) +{ + switch (color_enc) { + case TGP_COLOR_ENC_HSV: + return "HSV"; + case TGP_COLOR_ENC_YCBCR: + return "Y'CbCr"; + case TGP_COLOR_ENC_LUMA: + return "Luma"; + case TGP_COLOR_ENC_RGB: + default: + return "R'G'B"; + + } +} + +void tpg_log_status(struct tpg_data *tpg) +{ + pr_info("tpg source WxH: %ux%u (%s)\n", + tpg->src_width, tpg->src_height, + tpg_color_enc_str(tpg->color_enc)); + pr_info("tpg field: %u\n", tpg->field); + pr_info("tpg crop: %ux%u@%dx%d\n", tpg->crop.width, tpg->crop.height, + tpg->crop.left, tpg->crop.top); + pr_info("tpg compose: %ux%u@%dx%d\n", tpg->compose.width, tpg->compose.height, + tpg->compose.left, tpg->compose.top); + pr_info("tpg colorspace: %d\n", tpg->colorspace); + pr_info("tpg transfer function: %d/%d\n", tpg->xfer_func, tpg->real_xfer_func); + if (tpg->color_enc == TGP_COLOR_ENC_HSV) + pr_info("tpg HSV encoding: %d/%d\n", + tpg->hsv_enc, tpg->real_hsv_enc); + else if (tpg->color_enc == TGP_COLOR_ENC_YCBCR) + pr_info("tpg Y'CbCr encoding: %d/%d\n", + tpg->ycbcr_enc, tpg->real_ycbcr_enc); + pr_info("tpg quantization: %d/%d\n", tpg->quantization, tpg->real_quantization); + pr_info("tpg RGB range: %d/%d\n", tpg->rgb_range, tpg->real_rgb_range); +} +EXPORT_SYMBOL_GPL(tpg_log_status); + +/* + * This struct contains common parameters used by both the drawing of the + * test pattern and the drawing of the extras (borders, square, etc.) + */ +struct tpg_draw_params { + /* common data */ + bool is_tv; + bool is_60hz; + unsigned twopixsize; + unsigned img_width; + unsigned stride; + unsigned hmax; + unsigned frame_line; + unsigned frame_line_next; + + /* test pattern */ + unsigned mv_hor_old; + unsigned mv_hor_new; + unsigned mv_vert_old; + unsigned mv_vert_new; + + /* extras */ + unsigned wss_width; + unsigned wss_random_offset; + unsigned sav_eav_f; + unsigned left_pillar_width; + unsigned right_pillar_start; +}; + +static void tpg_fill_params_pattern(const struct tpg_data *tpg, unsigned p, + struct tpg_draw_params *params) +{ + params->mv_hor_old = + tpg_hscale_div(tpg, p, tpg->mv_hor_count % tpg->src_width); + params->mv_hor_new = + tpg_hscale_div(tpg, p, (tpg->mv_hor_count + tpg->mv_hor_step) % + tpg->src_width); + params->mv_vert_old = tpg->mv_vert_count % tpg->src_height; + params->mv_vert_new = + (tpg->mv_vert_count + tpg->mv_vert_step) % tpg->src_height; +} + +static void tpg_fill_params_extras(const struct tpg_data *tpg, + unsigned p, + struct tpg_draw_params *params) +{ + unsigned left_pillar_width = 0; + unsigned right_pillar_start = params->img_width; + + params->wss_width = tpg->crop.left < tpg->src_width / 2 ? + tpg->src_width / 2 - tpg->crop.left : 0; + if (params->wss_width > tpg->crop.width) + params->wss_width = tpg->crop.width; + params->wss_width = tpg_hscale_div(tpg, p, params->wss_width); + params->wss_random_offset = + params->twopixsize * get_random_u32_below(tpg->src_width / 2); + + if (tpg->crop.left < tpg->border.left) { + left_pillar_width = tpg->border.left - tpg->crop.left; + if (left_pillar_width > tpg->crop.width) + left_pillar_width = tpg->crop.width; + left_pillar_width = tpg_hscale_div(tpg, p, left_pillar_width); + } + params->left_pillar_width = left_pillar_width; + + if (tpg->crop.left + tpg->crop.width > + tpg->border.left + tpg->border.width) { + right_pillar_start = + tpg->border.left + tpg->border.width - tpg->crop.left; + right_pillar_start = + tpg_hscale_div(tpg, p, right_pillar_start); + if (right_pillar_start > params->img_width) + right_pillar_start = params->img_width; + } + params->right_pillar_start = right_pillar_start; + + params->sav_eav_f = tpg->field == + (params->is_60hz ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); +} + +static void tpg_fill_plane_extras(const struct tpg_data *tpg, + const struct tpg_draw_params *params, + unsigned p, unsigned h, u8 *vbuf) +{ + unsigned twopixsize = params->twopixsize; + unsigned img_width = params->img_width; + unsigned frame_line = params->frame_line; + const struct v4l2_rect *sq = &tpg->square; + const struct v4l2_rect *b = &tpg->border; + const struct v4l2_rect *c = &tpg->crop; + + if (params->is_tv && !params->is_60hz && + frame_line == 0 && params->wss_width) { + /* + * Replace the first half of the top line of a 50 Hz frame + * with random data to simulate a WSS signal. + */ + u8 *wss = tpg->random_line[p] + params->wss_random_offset; + + memcpy(vbuf, wss, params->wss_width); + } + + if (tpg->show_border && frame_line >= b->top && + frame_line < b->top + b->height) { + unsigned bottom = b->top + b->height - 1; + unsigned left = params->left_pillar_width; + unsigned right = params->right_pillar_start; + + if (frame_line == b->top || frame_line == b->top + 1 || + frame_line == bottom || frame_line == bottom - 1) { + memcpy(vbuf + left, tpg->contrast_line[p], + right - left); + } else { + if (b->left >= c->left && + b->left < c->left + c->width) + memcpy(vbuf + left, + tpg->contrast_line[p], twopixsize); + if (b->left + b->width > c->left && + b->left + b->width <= c->left + c->width) + memcpy(vbuf + right - twopixsize, + tpg->contrast_line[p], twopixsize); + } + } + if (tpg->qual != TPG_QUAL_NOISE && frame_line >= b->top && + frame_line < b->top + b->height) { + memcpy(vbuf, tpg->black_line[p], params->left_pillar_width); + memcpy(vbuf + params->right_pillar_start, tpg->black_line[p], + img_width - params->right_pillar_start); + } + if (tpg->show_square && frame_line >= sq->top && + frame_line < sq->top + sq->height && + sq->left < c->left + c->width && + sq->left + sq->width >= c->left) { + unsigned left = sq->left; + unsigned width = sq->width; + + if (c->left > left) { + width -= c->left - left; + left = c->left; + } + if (c->left + c->width < left + width) + width -= left + width - c->left - c->width; + left -= c->left; + left = tpg_hscale_div(tpg, p, left); + width = tpg_hscale_div(tpg, p, width); + memcpy(vbuf + left, tpg->contrast_line[p], width); + } + if (tpg->insert_sav) { + unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width / 3); + u8 *p = vbuf + offset; + unsigned vact = 0, hact = 0; + + p[0] = 0xff; + p[1] = 0; + p[2] = 0; + p[3] = 0x80 | (params->sav_eav_f << 6) | + (vact << 5) | (hact << 4) | + ((hact ^ vact) << 3) | + ((hact ^ params->sav_eav_f) << 2) | + ((params->sav_eav_f ^ vact) << 1) | + (hact ^ vact ^ params->sav_eav_f); + } + if (tpg->insert_eav) { + unsigned offset = tpg_hdiv(tpg, p, tpg->compose.width * 2 / 3); + u8 *p = vbuf + offset; + unsigned vact = 0, hact = 1; + + p[0] = 0xff; + p[1] = 0; + p[2] = 0; + p[3] = 0x80 | (params->sav_eav_f << 6) | + (vact << 5) | (hact << 4) | + ((hact ^ vact) << 3) | + ((hact ^ params->sav_eav_f) << 2) | + ((params->sav_eav_f ^ vact) << 1) | + (hact ^ vact ^ params->sav_eav_f); + } + if (tpg->insert_hdmi_video_guard_band) { + unsigned int i; + + switch (tpg->fourcc) { + case V4L2_PIX_FMT_BGR24: + case V4L2_PIX_FMT_RGB24: + for (i = 0; i < 3 * 4; i += 3) { + vbuf[i] = 0xab; + vbuf[i + 1] = 0x55; + vbuf[i + 2] = 0xab; + } + break; + case V4L2_PIX_FMT_RGB32: + case V4L2_PIX_FMT_ARGB32: + case V4L2_PIX_FMT_XRGB32: + case V4L2_PIX_FMT_BGRX32: + case V4L2_PIX_FMT_BGRA32: + for (i = 0; i < 4 * 4; i += 4) { + vbuf[i] = 0x00; + vbuf[i + 1] = 0xab; + vbuf[i + 2] = 0x55; + vbuf[i + 3] = 0xab; + } + break; + case V4L2_PIX_FMT_BGR32: + case V4L2_PIX_FMT_XBGR32: + case V4L2_PIX_FMT_ABGR32: + case V4L2_PIX_FMT_RGBX32: + case V4L2_PIX_FMT_RGBA32: + for (i = 0; i < 4 * 4; i += 4) { + vbuf[i] = 0xab; + vbuf[i + 1] = 0x55; + vbuf[i + 2] = 0xab; + vbuf[i + 3] = 0x00; + } + break; + } + } +} + +static void tpg_fill_plane_pattern(const struct tpg_data *tpg, + const struct tpg_draw_params *params, + unsigned p, unsigned h, u8 *vbuf) +{ + unsigned twopixsize = params->twopixsize; + unsigned img_width = params->img_width; + unsigned mv_hor_old = params->mv_hor_old; + unsigned mv_hor_new = params->mv_hor_new; + unsigned mv_vert_old = params->mv_vert_old; + unsigned mv_vert_new = params->mv_vert_new; + unsigned frame_line = params->frame_line; + unsigned frame_line_next = params->frame_line_next; + unsigned line_offset = tpg_hscale_div(tpg, p, tpg->crop.left); + bool even; + bool fill_blank = false; + unsigned pat_line_old; + unsigned pat_line_new; + u8 *linestart_older; + u8 *linestart_newer; + u8 *linestart_top; + u8 *linestart_bottom; + + even = !(frame_line & 1); + + if (h >= params->hmax) { + if (params->hmax == tpg->compose.height) + return; + if (!tpg->perc_fill_blank) + return; + fill_blank = true; + } + + if (tpg->vflip) { + frame_line = tpg->src_height - frame_line - 1; + frame_line_next = tpg->src_height - frame_line_next - 1; + } + + if (fill_blank) { + linestart_older = tpg->contrast_line[p]; + linestart_newer = tpg->contrast_line[p]; + } else if (tpg->qual != TPG_QUAL_NOISE && + (frame_line < tpg->border.top || + frame_line >= tpg->border.top + tpg->border.height)) { + linestart_older = tpg->black_line[p]; + linestart_newer = tpg->black_line[p]; + } else if (tpg->pattern == TPG_PAT_NOISE || tpg->qual == TPG_QUAL_NOISE) { + linestart_older = tpg->random_line[p] + + twopixsize * get_random_u32_below(tpg->src_width / 2); + linestart_newer = tpg->random_line[p] + + twopixsize * get_random_u32_below(tpg->src_width / 2); + } else { + unsigned frame_line_old = + (frame_line + mv_vert_old) % tpg->src_height; + unsigned frame_line_new = + (frame_line + mv_vert_new) % tpg->src_height; + unsigned pat_line_next_old; + unsigned pat_line_next_new; + + pat_line_old = tpg_get_pat_line(tpg, frame_line_old); + pat_line_new = tpg_get_pat_line(tpg, frame_line_new); + linestart_older = tpg->lines[pat_line_old][p] + mv_hor_old; + linestart_newer = tpg->lines[pat_line_new][p] + mv_hor_new; + + if (tpg->vdownsampling[p] > 1 && frame_line != frame_line_next) { + int avg_pat; + + /* + * Now decide whether we need to use downsampled_lines[]. + * That's necessary if the two lines use different patterns. + */ + pat_line_next_old = tpg_get_pat_line(tpg, + (frame_line_next + mv_vert_old) % tpg->src_height); + pat_line_next_new = tpg_get_pat_line(tpg, + (frame_line_next + mv_vert_new) % tpg->src_height); + + switch (tpg->field) { + case V4L2_FIELD_INTERLACED: + case V4L2_FIELD_INTERLACED_BT: + case V4L2_FIELD_INTERLACED_TB: + avg_pat = tpg_pattern_avg(tpg, pat_line_old, pat_line_new); + if (avg_pat < 0) + break; + linestart_older = tpg->downsampled_lines[avg_pat][p] + mv_hor_old; + linestart_newer = linestart_older; + break; + case V4L2_FIELD_NONE: + case V4L2_FIELD_TOP: + case V4L2_FIELD_BOTTOM: + case V4L2_FIELD_SEQ_BT: + case V4L2_FIELD_SEQ_TB: + avg_pat = tpg_pattern_avg(tpg, pat_line_old, pat_line_next_old); + if (avg_pat >= 0) + linestart_older = tpg->downsampled_lines[avg_pat][p] + + mv_hor_old; + avg_pat = tpg_pattern_avg(tpg, pat_line_new, pat_line_next_new); + if (avg_pat >= 0) + linestart_newer = tpg->downsampled_lines[avg_pat][p] + + mv_hor_new; + break; + } + } + linestart_older += line_offset; + linestart_newer += line_offset; + } + if (tpg->field_alternate) { + linestart_top = linestart_bottom = linestart_older; + } else if (params->is_60hz) { + linestart_top = linestart_newer; + linestart_bottom = linestart_older; + } else { + linestart_top = linestart_older; + linestart_bottom = linestart_newer; + } + + switch (tpg->field) { + case V4L2_FIELD_INTERLACED: + case V4L2_FIELD_INTERLACED_TB: + case V4L2_FIELD_SEQ_TB: + case V4L2_FIELD_SEQ_BT: + if (even) + memcpy(vbuf, linestart_top, img_width); + else + memcpy(vbuf, linestart_bottom, img_width); + break; + case V4L2_FIELD_INTERLACED_BT: + if (even) + memcpy(vbuf, linestart_bottom, img_width); + else + memcpy(vbuf, linestart_top, img_width); + break; + case V4L2_FIELD_TOP: + memcpy(vbuf, linestart_top, img_width); + break; + case V4L2_FIELD_BOTTOM: + memcpy(vbuf, linestart_bottom, img_width); + break; + case V4L2_FIELD_NONE: + default: + memcpy(vbuf, linestart_older, img_width); + break; + } +} + +void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std, + unsigned p, u8 *vbuf) +{ + struct tpg_draw_params params; + unsigned factor = V4L2_FIELD_HAS_T_OR_B(tpg->field) ? 2 : 1; + + /* Coarse scaling with Bresenham */ + unsigned int_part = (tpg->crop.height / factor) / tpg->compose.height; + unsigned fract_part = (tpg->crop.height / factor) % tpg->compose.height; + unsigned src_y = 0; + unsigned error = 0; + unsigned h; + + tpg_recalc(tpg); + + params.is_tv = std; + params.is_60hz = std & V4L2_STD_525_60; + params.twopixsize = tpg->twopixelsize[p]; + params.img_width = tpg_hdiv(tpg, p, tpg->compose.width); + params.stride = tpg->bytesperline[p]; + params.hmax = (tpg->compose.height * tpg->perc_fill) / 100; + + tpg_fill_params_pattern(tpg, p, ¶ms); + tpg_fill_params_extras(tpg, p, ¶ms); + + vbuf += tpg_hdiv(tpg, p, tpg->compose.left); + + for (h = 0; h < tpg->compose.height; h++) { + unsigned buf_line; + + params.frame_line = tpg_calc_frameline(tpg, src_y, tpg->field); + params.frame_line_next = params.frame_line; + buf_line = tpg_calc_buffer_line(tpg, h, tpg->field); + src_y += int_part; + error += fract_part; + if (error >= tpg->compose.height) { + error -= tpg->compose.height; + src_y++; + } + + /* + * For line-interleaved formats determine the 'plane' + * based on the buffer line. + */ + if (tpg_g_interleaved(tpg)) + p = tpg_g_interleaved_plane(tpg, buf_line); + + if (tpg->vdownsampling[p] > 1) { + /* + * When doing vertical downsampling the field setting + * matters: for SEQ_BT/TB we downsample each field + * separately (i.e. lines 0+2 are combined, as are + * lines 1+3), for the other field settings we combine + * odd and even lines. Doing that for SEQ_BT/TB would + * be really weird. + */ + if (tpg->field == V4L2_FIELD_SEQ_BT || + tpg->field == V4L2_FIELD_SEQ_TB) { + unsigned next_src_y = src_y; + + if ((h & 3) >= 2) + continue; + next_src_y += int_part; + if (error + fract_part >= tpg->compose.height) + next_src_y++; + params.frame_line_next = + tpg_calc_frameline(tpg, next_src_y, tpg->field); + } else { + if (h & 1) + continue; + params.frame_line_next = + tpg_calc_frameline(tpg, src_y, tpg->field); + } + + buf_line /= tpg->vdownsampling[p]; + } + tpg_fill_plane_pattern(tpg, ¶ms, p, h, + vbuf + buf_line * params.stride); + tpg_fill_plane_extras(tpg, ¶ms, p, h, + vbuf + buf_line * params.stride); + } +} +EXPORT_SYMBOL_GPL(tpg_fill_plane_buffer); + +void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf) +{ + unsigned offset = 0; + unsigned i; + + if (tpg->buffers > 1) { + tpg_fill_plane_buffer(tpg, std, p, vbuf); + return; + } + + for (i = 0; i < tpg_g_planes(tpg); i++) { + tpg_fill_plane_buffer(tpg, std, i, vbuf + offset); + offset += tpg_calc_plane_size(tpg, i); + } +} +EXPORT_SYMBOL_GPL(tpg_fillbuffer); + +MODULE_DESCRIPTION("V4L2 Test Pattern Generator"); +MODULE_AUTHOR("Hans Verkuil"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/common/videobuf2/Kconfig b/drivers/media/common/videobuf2/Kconfig new file mode 100644 index 0000000000..d2223a12c9 --- /dev/null +++ b/drivers/media/common/videobuf2/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Used by drivers that need Videobuf2 modules +config VIDEOBUF2_CORE + select DMA_SHARED_BUFFER + tristate + +config VIDEOBUF2_V4L2 + tristate + +config VIDEOBUF2_MEMOPS + tristate + +config VIDEOBUF2_DMA_CONTIG + tristate + select VIDEOBUF2_CORE + select VIDEOBUF2_MEMOPS + select DMA_SHARED_BUFFER + +config VIDEOBUF2_VMALLOC + tristate + select VIDEOBUF2_CORE + select VIDEOBUF2_MEMOPS + select DMA_SHARED_BUFFER + +config VIDEOBUF2_DMA_SG + tristate + select VIDEOBUF2_CORE + select VIDEOBUF2_MEMOPS + +config VIDEOBUF2_DVB + tristate + select VIDEOBUF2_CORE diff --git a/drivers/media/common/videobuf2/Makefile b/drivers/media/common/videobuf2/Makefile new file mode 100644 index 0000000000..a6fe3f3046 --- /dev/null +++ b/drivers/media/common/videobuf2/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +videobuf2-common-objs := videobuf2-core.o +videobuf2-common-objs += frame_vector.o + +ifeq ($(CONFIG_TRACEPOINTS),y) + videobuf2-common-objs += vb2-trace.o +endif + +# Please keep it alphabetically sorted by Kconfig name +# (e. g. LC_ALL=C sort Makefile) +obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o +obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o +obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o +obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o +obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o +obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o +obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c new file mode 100644 index 0000000000..fd87747be9 --- /dev/null +++ b/drivers/media/common/videobuf2/frame_vector.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/** + * get_vaddr_frames() - map virtual addresses to pfns + * @start: starting user address + * @nr_frames: number of pages / pfns from start to map + * @write: the mapped address has write permission + * @vec: structure which receives pages / pfns of the addresses mapped. + * It should have space for at least nr_frames entries. + * + * This function maps virtual addresses from @start and fills @vec structure + * with page frame numbers or page pointers to corresponding pages (choice + * depends on the type of the vma underlying the virtual address). If @start + * belongs to a normal vma, the function grabs reference to each of the pages + * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't + * touch page structures and the caller must make sure pfns aren't reused for + * anything else while he is using them. + * + * The function returns number of pages mapped which may be less than + * @nr_frames. In particular we stop mapping if there are more vmas of + * different type underlying the specified range of virtual addresses. + * When the function isn't able to map a single page, it returns error. + * + * Note that get_vaddr_frames() cannot follow VM_IO mappings. It used + * to be able to do that, but that could (racily) return non-refcounted + * pfns. + * + * This function takes care of grabbing mmap_lock as necessary. + */ +int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write, + struct frame_vector *vec) +{ + int ret; + unsigned int gup_flags = FOLL_LONGTERM; + + if (nr_frames == 0) + return 0; + + if (WARN_ON_ONCE(nr_frames > vec->nr_allocated)) + nr_frames = vec->nr_allocated; + + start = untagged_addr(start); + + if (write) + gup_flags |= FOLL_WRITE; + + ret = pin_user_pages_fast(start, nr_frames, gup_flags, + (struct page **)(vec->ptrs)); + vec->got_ref = true; + vec->is_pfns = false; + vec->nr_frames = ret; + + if (likely(ret > 0)) + return ret; + + vec->nr_frames = 0; + return ret ? ret : -EFAULT; +} +EXPORT_SYMBOL(get_vaddr_frames); + +/** + * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired + * them + * @vec: frame vector to put + * + * Drop references to pages if get_vaddr_frames() acquired them. We also + * invalidate the frame vector so that it is prepared for the next call into + * get_vaddr_frames(). + */ +void put_vaddr_frames(struct frame_vector *vec) +{ + struct page **pages; + + if (!vec->got_ref) + goto out; + pages = frame_vector_pages(vec); + /* + * frame_vector_pages() might needed to do a conversion when + * get_vaddr_frames() got pages but vec was later converted to pfns. + * But it shouldn't really fail to convert pfns back... + */ + if (WARN_ON(IS_ERR(pages))) + goto out; + + unpin_user_pages(pages, vec->nr_frames); + vec->got_ref = false; +out: + vec->nr_frames = 0; +} +EXPORT_SYMBOL(put_vaddr_frames); + +/** + * frame_vector_to_pages - convert frame vector to contain page pointers + * @vec: frame vector to convert + * + * Convert @vec to contain array of page pointers. If the conversion is + * successful, return 0. Otherwise return an error. Note that we do not grab + * page references for the page structures. + */ +int frame_vector_to_pages(struct frame_vector *vec) +{ + int i; + unsigned long *nums; + struct page **pages; + + if (!vec->is_pfns) + return 0; + nums = frame_vector_pfns(vec); + for (i = 0; i < vec->nr_frames; i++) + if (!pfn_valid(nums[i])) + return -EINVAL; + pages = (struct page **)nums; + for (i = 0; i < vec->nr_frames; i++) + pages[i] = pfn_to_page(nums[i]); + vec->is_pfns = false; + return 0; +} +EXPORT_SYMBOL(frame_vector_to_pages); + +/** + * frame_vector_to_pfns - convert frame vector to contain pfns + * @vec: frame vector to convert + * + * Convert @vec to contain array of pfns. + */ +void frame_vector_to_pfns(struct frame_vector *vec) +{ + int i; + unsigned long *nums; + struct page **pages; + + if (vec->is_pfns) + return; + pages = (struct page **)(vec->ptrs); + nums = (unsigned long *)pages; + for (i = 0; i < vec->nr_frames; i++) + nums[i] = page_to_pfn(pages[i]); + vec->is_pfns = true; +} +EXPORT_SYMBOL(frame_vector_to_pfns); + +/** + * frame_vector_create() - allocate & initialize structure for pinned pfns + * @nr_frames: number of pfns slots we should reserve + * + * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns + * pfns. + */ +struct frame_vector *frame_vector_create(unsigned int nr_frames) +{ + struct frame_vector *vec; + int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames; + + if (WARN_ON_ONCE(nr_frames == 0)) + return NULL; + /* + * This is absurdly high. It's here just to avoid strange effects when + * arithmetics overflows. + */ + if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2)) + return NULL; + /* + * Avoid higher order allocations, use vmalloc instead. It should + * be rare anyway. + */ + vec = kvmalloc(size, GFP_KERNEL); + if (!vec) + return NULL; + vec->nr_allocated = nr_frames; + vec->nr_frames = 0; + return vec; +} +EXPORT_SYMBOL(frame_vector_create); + +/** + * frame_vector_destroy() - free memory allocated to carry frame vector + * @vec: Frame vector to free + * + * Free structure allocated by frame_vector_create() to carry frames. + */ +void frame_vector_destroy(struct frame_vector *vec) +{ + /* Make sure put_vaddr_frames() got called properly... */ + VM_BUG_ON(vec->nr_frames > 0); + kvfree(vec); +} +EXPORT_SYMBOL(frame_vector_destroy); diff --git a/drivers/media/common/videobuf2/vb2-trace.c b/drivers/media/common/videobuf2/vb2-trace.c new file mode 100644 index 0000000000..4c0f39d271 --- /dev/null +++ b/drivers/media/common/videobuf2/vb2-trace.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#define CREATE_TRACE_POINTS +#include + +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_done); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_queue); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_dqbuf); +EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_qbuf); diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c new file mode 100644 index 0000000000..cf6727d9c8 --- /dev/null +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -0,0 +1,3076 @@ +/* + * videobuf2-core.c - video buffer 2 core framework + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * Marek Szyprowski + * + * The vb2_thread implementation was based on code from videobuf-dvb.c: + * (c) 2004 Gerd Knorr [SUSE Labs] + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +static int debug; +module_param(debug, int, 0644); + +#define dprintk(q, level, fmt, arg...) \ + do { \ + if (debug >= level) \ + pr_info("[%s] %s: " fmt, (q)->name, __func__, \ + ## arg); \ + } while (0) + +#ifdef CONFIG_VIDEO_ADV_DEBUG + +/* + * If advanced debugging is on, then count how often each op is called + * successfully, which can either be per-buffer or per-queue. + * + * This makes it easy to check that the 'init' and 'cleanup' + * (and variations thereof) stay balanced. + */ + +#define log_memop(vb, op) \ + dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \ + (vb)->index, #op, \ + (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") + +#define call_memop(vb, op, args...) \ +({ \ + struct vb2_queue *_q = (vb)->vb2_queue; \ + int err; \ + \ + log_memop(vb, op); \ + err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ + if (!err) \ + (vb)->cnt_mem_ ## op++; \ + err; \ +}) + +#define call_ptr_memop(op, vb, args...) \ +({ \ + struct vb2_queue *_q = (vb)->vb2_queue; \ + void *ptr; \ + \ + log_memop(vb, op); \ + ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \ + if (!IS_ERR_OR_NULL(ptr)) \ + (vb)->cnt_mem_ ## op++; \ + ptr; \ +}) + +#define call_void_memop(vb, op, args...) \ +({ \ + struct vb2_queue *_q = (vb)->vb2_queue; \ + \ + log_memop(vb, op); \ + if (_q->mem_ops->op) \ + _q->mem_ops->op(args); \ + (vb)->cnt_mem_ ## op++; \ +}) + +#define log_qop(q, op) \ + dprintk(q, 2, "call_qop(%s)%s\n", #op, \ + (q)->ops->op ? "" : " (nop)") + +#define call_qop(q, op, args...) \ +({ \ + int err; \ + \ + log_qop(q, op); \ + err = (q)->ops->op ? (q)->ops->op(args) : 0; \ + if (!err) \ + (q)->cnt_ ## op++; \ + err; \ +}) + +#define call_void_qop(q, op, args...) \ +({ \ + log_qop(q, op); \ + if ((q)->ops->op) \ + (q)->ops->op(args); \ + (q)->cnt_ ## op++; \ +}) + +#define log_vb_qop(vb, op, args...) \ + dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \ + (vb)->index, #op, \ + (vb)->vb2_queue->ops->op ? "" : " (nop)") + +#define call_vb_qop(vb, op, args...) \ +({ \ + int err; \ + \ + log_vb_qop(vb, op); \ + err = (vb)->vb2_queue->ops->op ? \ + (vb)->vb2_queue->ops->op(args) : 0; \ + if (!err) \ + (vb)->cnt_ ## op++; \ + err; \ +}) + +#define call_void_vb_qop(vb, op, args...) \ +({ \ + log_vb_qop(vb, op); \ + if ((vb)->vb2_queue->ops->op) \ + (vb)->vb2_queue->ops->op(args); \ + (vb)->cnt_ ## op++; \ +}) + +#else + +#define call_memop(vb, op, args...) \ + ((vb)->vb2_queue->mem_ops->op ? \ + (vb)->vb2_queue->mem_ops->op(args) : 0) + +#define call_ptr_memop(op, vb, args...) \ + ((vb)->vb2_queue->mem_ops->op ? \ + (vb)->vb2_queue->mem_ops->op(vb, args) : NULL) + +#define call_void_memop(vb, op, args...) \ + do { \ + if ((vb)->vb2_queue->mem_ops->op) \ + (vb)->vb2_queue->mem_ops->op(args); \ + } while (0) + +#define call_qop(q, op, args...) \ + ((q)->ops->op ? (q)->ops->op(args) : 0) + +#define call_void_qop(q, op, args...) \ + do { \ + if ((q)->ops->op) \ + (q)->ops->op(args); \ + } while (0) + +#define call_vb_qop(vb, op, args...) \ + ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) + +#define call_void_vb_qop(vb, op, args...) \ + do { \ + if ((vb)->vb2_queue->ops->op) \ + (vb)->vb2_queue->ops->op(args); \ + } while (0) + +#endif + +#define call_bufop(q, op, args...) \ +({ \ + int ret = 0; \ + if (q && q->buf_ops && q->buf_ops->op) \ + ret = q->buf_ops->op(args); \ + ret; \ +}) + +#define call_void_bufop(q, op, args...) \ +({ \ + if (q && q->buf_ops && q->buf_ops->op) \ + q->buf_ops->op(args); \ +}) + +static void __vb2_queue_cancel(struct vb2_queue *q); +static void __enqueue_in_driver(struct vb2_buffer *vb); + +static const char *vb2_state_name(enum vb2_buffer_state s) +{ + static const char * const state_names[] = { + [VB2_BUF_STATE_DEQUEUED] = "dequeued", + [VB2_BUF_STATE_IN_REQUEST] = "in request", + [VB2_BUF_STATE_PREPARING] = "preparing", + [VB2_BUF_STATE_QUEUED] = "queued", + [VB2_BUF_STATE_ACTIVE] = "active", + [VB2_BUF_STATE_DONE] = "done", + [VB2_BUF_STATE_ERROR] = "error", + }; + + if ((unsigned int)(s) < ARRAY_SIZE(state_names)) + return state_names[s]; + return "unknown"; +} + +/* + * __vb2_buf_mem_alloc() - allocate video memory for the given buffer + */ +static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) +{ + struct vb2_queue *q = vb->vb2_queue; + void *mem_priv; + int plane; + int ret = -ENOMEM; + + /* + * Allocate memory for all planes in this buffer + * NOTE: mmapped areas should be page aligned + */ + for (plane = 0; plane < vb->num_planes; ++plane) { + /* Memops alloc requires size to be page aligned. */ + unsigned long size = PAGE_ALIGN(vb->planes[plane].length); + + /* Did it wrap around? */ + if (size < vb->planes[plane].length) + goto free; + + mem_priv = call_ptr_memop(alloc, + vb, + q->alloc_devs[plane] ? : q->dev, + size); + if (IS_ERR_OR_NULL(mem_priv)) { + if (mem_priv) + ret = PTR_ERR(mem_priv); + goto free; + } + + /* Associate allocator private data with this plane */ + vb->planes[plane].mem_priv = mem_priv; + } + + return 0; +free: + /* Free already allocated memory if one of the allocations failed */ + for (; plane > 0; --plane) { + call_void_memop(vb, put, vb->planes[plane - 1].mem_priv); + vb->planes[plane - 1].mem_priv = NULL; + } + + return ret; +} + +/* + * __vb2_buf_mem_free() - free memory of the given buffer + */ +static void __vb2_buf_mem_free(struct vb2_buffer *vb) +{ + unsigned int plane; + + for (plane = 0; plane < vb->num_planes; ++plane) { + call_void_memop(vb, put, vb->planes[plane].mem_priv); + vb->planes[plane].mem_priv = NULL; + dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n", + plane, vb->index); + } +} + +/* + * __vb2_buf_userptr_put() - release userspace memory associated with + * a USERPTR buffer + */ +static void __vb2_buf_userptr_put(struct vb2_buffer *vb) +{ + unsigned int plane; + + for (plane = 0; plane < vb->num_planes; ++plane) { + if (vb->planes[plane].mem_priv) + call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); + vb->planes[plane].mem_priv = NULL; + } +} + +/* + * __vb2_plane_dmabuf_put() - release memory associated with + * a DMABUF shared plane + */ +static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) +{ + if (!p->mem_priv) + return; + + if (p->dbuf_mapped) + call_void_memop(vb, unmap_dmabuf, p->mem_priv); + + call_void_memop(vb, detach_dmabuf, p->mem_priv); + dma_buf_put(p->dbuf); + p->mem_priv = NULL; + p->dbuf = NULL; + p->dbuf_mapped = 0; +} + +/* + * __vb2_buf_dmabuf_put() - release memory associated with + * a DMABUF shared buffer + */ +static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) +{ + unsigned int plane; + + for (plane = 0; plane < vb->num_planes; ++plane) + __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); +} + +/* + * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory + * to sync caches + */ +static void __vb2_buf_mem_prepare(struct vb2_buffer *vb) +{ + unsigned int plane; + + if (vb->synced) + return; + + vb->synced = 1; + for (plane = 0; plane < vb->num_planes; ++plane) + call_void_memop(vb, prepare, vb->planes[plane].mem_priv); +} + +/* + * __vb2_buf_mem_finish() - call ->finish on buffer's private memory + * to sync caches + */ +static void __vb2_buf_mem_finish(struct vb2_buffer *vb) +{ + unsigned int plane; + + if (!vb->synced) + return; + + vb->synced = 0; + for (plane = 0; plane < vb->num_planes; ++plane) + call_void_memop(vb, finish, vb->planes[plane].mem_priv); +} + +/* + * __setup_offsets() - setup unique offsets ("cookies") for every plane in + * the buffer. + */ +static void __setup_offsets(struct vb2_buffer *vb) +{ + struct vb2_queue *q = vb->vb2_queue; + unsigned int plane; + unsigned long off = 0; + + if (vb->index) { + struct vb2_buffer *prev = q->bufs[vb->index - 1]; + struct vb2_plane *p = &prev->planes[prev->num_planes - 1]; + + off = PAGE_ALIGN(p->m.offset + p->length); + } + + for (plane = 0; plane < vb->num_planes; ++plane) { + vb->planes[plane].m.offset = off; + + dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n", + vb->index, plane, off); + + off += vb->planes[plane].length; + off = PAGE_ALIGN(off); + } +} + +static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb) +{ + /* + * DMA exporter should take care of cache syncs, so we can avoid + * explicit ->prepare()/->finish() syncs. For other ->memory types + * we always need ->prepare() or/and ->finish() cache sync. + */ + if (q->memory == VB2_MEMORY_DMABUF) { + vb->skip_cache_sync_on_finish = 1; + vb->skip_cache_sync_on_prepare = 1; + return; + } + + /* + * ->finish() cache sync can be avoided when queue direction is + * TO_DEVICE. + */ + if (q->dma_dir == DMA_TO_DEVICE) + vb->skip_cache_sync_on_finish = 1; +} + +/* + * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type) + * video buffer memory for all buffers/planes on the queue and initializes the + * queue + * + * Returns the number of buffers successfully allocated. + */ +static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, + unsigned int num_buffers, unsigned int num_planes, + const unsigned plane_sizes[VB2_MAX_PLANES]) +{ + unsigned int buffer, plane; + struct vb2_buffer *vb; + int ret; + + /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ + num_buffers = min_t(unsigned int, num_buffers, + VB2_MAX_FRAME - q->num_buffers); + + for (buffer = 0; buffer < num_buffers; ++buffer) { + /* Allocate vb2 buffer structures */ + vb = kzalloc(q->buf_struct_size, GFP_KERNEL); + if (!vb) { + dprintk(q, 1, "memory alloc for buffer struct failed\n"); + break; + } + + vb->state = VB2_BUF_STATE_DEQUEUED; + vb->vb2_queue = q; + vb->num_planes = num_planes; + vb->index = q->num_buffers + buffer; + vb->type = q->type; + vb->memory = memory; + init_buffer_cache_hints(q, vb); + for (plane = 0; plane < num_planes; ++plane) { + vb->planes[plane].length = plane_sizes[plane]; + vb->planes[plane].min_length = plane_sizes[plane]; + } + call_void_bufop(q, init_buffer, vb); + + q->bufs[vb->index] = vb; + + /* Allocate video buffer memory for the MMAP type */ + if (memory == VB2_MEMORY_MMAP) { + ret = __vb2_buf_mem_alloc(vb); + if (ret) { + dprintk(q, 1, "failed allocating memory for buffer %d\n", + buffer); + q->bufs[vb->index] = NULL; + kfree(vb); + break; + } + __setup_offsets(vb); + /* + * Call the driver-provided buffer initialization + * callback, if given. An error in initialization + * results in queue setup failure. + */ + ret = call_vb_qop(vb, buf_init, vb); + if (ret) { + dprintk(q, 1, "buffer %d %p initialization failed\n", + buffer, vb); + __vb2_buf_mem_free(vb); + q->bufs[vb->index] = NULL; + kfree(vb); + break; + } + } + } + + dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n", + buffer, num_planes); + + return buffer; +} + +/* + * __vb2_free_mem() - release all video buffer memory for a given queue + */ +static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) +{ + unsigned int buffer; + struct vb2_buffer *vb; + + for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; + ++buffer) { + vb = q->bufs[buffer]; + if (!vb) + continue; + + /* Free MMAP buffers or release USERPTR buffers */ + if (q->memory == VB2_MEMORY_MMAP) + __vb2_buf_mem_free(vb); + else if (q->memory == VB2_MEMORY_DMABUF) + __vb2_buf_dmabuf_put(vb); + else + __vb2_buf_userptr_put(vb); + } +} + +/* + * __vb2_queue_free() - free buffers at the end of the queue - video memory and + * related information, if no buffers are left return the queue to an + * uninitialized state. Might be called even if the queue has already been freed. + */ +static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) +{ + unsigned int buffer; + + lockdep_assert_held(&q->mmap_lock); + + /* Call driver-provided cleanup function for each buffer, if provided */ + for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; + ++buffer) { + struct vb2_buffer *vb = q->bufs[buffer]; + + if (vb && vb->planes[0].mem_priv) + call_void_vb_qop(vb, buf_cleanup, vb); + } + + /* Release video buffer memory */ + __vb2_free_mem(q, buffers); + +#ifdef CONFIG_VIDEO_ADV_DEBUG + /* + * Check that all the calls were balances during the life-time of this + * queue. If not (or if the debug level is 1 or up), then dump the + * counters to the kernel log. + */ + if (q->num_buffers) { + bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || + q->cnt_prepare_streaming != q->cnt_unprepare_streaming || + q->cnt_wait_prepare != q->cnt_wait_finish; + + if (unbalanced || debug) { + pr_info("counters for queue %p:%s\n", q, + unbalanced ? " UNBALANCED!" : ""); + pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n", + q->cnt_queue_setup, q->cnt_start_streaming, + q->cnt_stop_streaming); + pr_info(" prepare_streaming: %u unprepare_streaming: %u\n", + q->cnt_prepare_streaming, q->cnt_unprepare_streaming); + pr_info(" wait_prepare: %u wait_finish: %u\n", + q->cnt_wait_prepare, q->cnt_wait_finish); + } + q->cnt_queue_setup = 0; + q->cnt_wait_prepare = 0; + q->cnt_wait_finish = 0; + q->cnt_prepare_streaming = 0; + q->cnt_start_streaming = 0; + q->cnt_stop_streaming = 0; + q->cnt_unprepare_streaming = 0; + } + for (buffer = 0; buffer < q->num_buffers; ++buffer) { + struct vb2_buffer *vb = q->bufs[buffer]; + bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put || + vb->cnt_mem_prepare != vb->cnt_mem_finish || + vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr || + vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf || + vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf || + vb->cnt_buf_queue != vb->cnt_buf_done || + vb->cnt_buf_prepare != vb->cnt_buf_finish || + vb->cnt_buf_init != vb->cnt_buf_cleanup; + + if (unbalanced || debug) { + pr_info(" counters for queue %p, buffer %d:%s\n", + q, buffer, unbalanced ? " UNBALANCED!" : ""); + pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n", + vb->cnt_buf_init, vb->cnt_buf_cleanup, + vb->cnt_buf_prepare, vb->cnt_buf_finish); + pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n", + vb->cnt_buf_out_validate, vb->cnt_buf_queue, + vb->cnt_buf_done, vb->cnt_buf_request_complete); + pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n", + vb->cnt_mem_alloc, vb->cnt_mem_put, + vb->cnt_mem_prepare, vb->cnt_mem_finish, + vb->cnt_mem_mmap); + pr_info(" get_userptr: %u put_userptr: %u\n", + vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr); + pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n", + vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf, + vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf); + pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n", + vb->cnt_mem_get_dmabuf, + vb->cnt_mem_num_users, + vb->cnt_mem_vaddr, + vb->cnt_mem_cookie); + } + } +#endif + + /* Free vb2 buffers */ + for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; + ++buffer) { + kfree(q->bufs[buffer]); + q->bufs[buffer] = NULL; + } + + q->num_buffers -= buffers; + if (!q->num_buffers) { + q->memory = VB2_MEMORY_UNKNOWN; + INIT_LIST_HEAD(&q->queued_list); + } +} + +bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) +{ + unsigned int plane; + for (plane = 0; plane < vb->num_planes; ++plane) { + void *mem_priv = vb->planes[plane].mem_priv; + /* + * If num_users() has not been provided, call_memop + * will return 0, apparently nobody cares about this + * case anyway. If num_users() returns more than 1, + * we are not the only user of the plane's memory. + */ + if (mem_priv && call_memop(vb, num_users, mem_priv) > 1) + return true; + } + return false; +} +EXPORT_SYMBOL(vb2_buffer_in_use); + +/* + * __buffers_in_use() - return true if any buffers on the queue are in use and + * the queue cannot be freed (by the means of REQBUFS(0)) call + */ +static bool __buffers_in_use(struct vb2_queue *q) +{ + unsigned int buffer; + for (buffer = 0; buffer < q->num_buffers; ++buffer) { + if (vb2_buffer_in_use(q, q->bufs[buffer])) + return true; + } + return false; +} + +void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb) +{ + call_void_bufop(q, fill_user_buffer, q->bufs[index], pb); +} +EXPORT_SYMBOL_GPL(vb2_core_querybuf); + +/* + * __verify_userptr_ops() - verify that all memory operations required for + * USERPTR queue type have been provided + */ +static int __verify_userptr_ops(struct vb2_queue *q) +{ + if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || + !q->mem_ops->put_userptr) + return -EINVAL; + + return 0; +} + +/* + * __verify_mmap_ops() - verify that all memory operations required for + * MMAP queue type have been provided + */ +static int __verify_mmap_ops(struct vb2_queue *q) +{ + if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || + !q->mem_ops->put || !q->mem_ops->mmap) + return -EINVAL; + + return 0; +} + +/* + * __verify_dmabuf_ops() - verify that all memory operations required for + * DMABUF queue type have been provided + */ +static int __verify_dmabuf_ops(struct vb2_queue *q) +{ + if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || + !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || + !q->mem_ops->unmap_dmabuf) + return -EINVAL; + + return 0; +} + +int vb2_verify_memory_type(struct vb2_queue *q, + enum vb2_memory memory, unsigned int type) +{ + if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && + memory != VB2_MEMORY_DMABUF) { + dprintk(q, 1, "unsupported memory type\n"); + return -EINVAL; + } + + if (type != q->type) { + dprintk(q, 1, "requested type is incorrect\n"); + return -EINVAL; + } + + /* + * Make sure all the required memory ops for given memory type + * are available. + */ + if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { + dprintk(q, 1, "MMAP for current setup unsupported\n"); + return -EINVAL; + } + + if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { + dprintk(q, 1, "USERPTR for current setup unsupported\n"); + return -EINVAL; + } + + if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { + dprintk(q, 1, "DMABUF for current setup unsupported\n"); + return -EINVAL; + } + + /* + * Place the busy tests at the end: -EBUSY can be ignored when + * create_bufs is called with count == 0, but count == 0 should still + * do the memory and type validation. + */ + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "file io in progress\n"); + return -EBUSY; + } + return 0; +} +EXPORT_SYMBOL(vb2_verify_memory_type); + +static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem) +{ + q->non_coherent_mem = 0; + + if (!vb2_queue_allows_cache_hints(q)) + return; + q->non_coherent_mem = non_coherent_mem; +} + +static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem) +{ + if (non_coherent_mem != q->non_coherent_mem) { + dprintk(q, 1, "memory coherency model mismatch\n"); + return false; + } + return true; +} + +int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int flags, unsigned int *count) +{ + unsigned int num_buffers, allocated_buffers, num_planes = 0; + unsigned plane_sizes[VB2_MAX_PLANES] = { }; + bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; + unsigned int i; + int ret; + + if (q->streaming) { + dprintk(q, 1, "streaming active\n"); + return -EBUSY; + } + + if (q->waiting_in_dqbuf && *count) { + dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); + return -EBUSY; + } + + if (*count == 0 || q->num_buffers != 0 || + (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || + !verify_coherency_flags(q, non_coherent_mem)) { + /* + * We already have buffers allocated, so first check if they + * are not in use and can be freed. + */ + mutex_lock(&q->mmap_lock); + if (debug && q->memory == VB2_MEMORY_MMAP && + __buffers_in_use(q)) + dprintk(q, 1, "memory in use, orphaning buffers\n"); + + /* + * Call queue_cancel to clean up any buffers in the + * QUEUED state which is possible if buffers were prepared or + * queued without ever calling STREAMON. + */ + __vb2_queue_cancel(q); + __vb2_queue_free(q, q->num_buffers); + mutex_unlock(&q->mmap_lock); + + /* + * In case of REQBUFS(0) return immediately without calling + * driver's queue_setup() callback and allocating resources. + */ + if (*count == 0) + return 0; + } + + /* + * Make sure the requested values and current defaults are sane. + */ + WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME); + num_buffers = max_t(unsigned int, *count, q->min_buffers_needed); + num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); + memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); + /* + * Set this now to ensure that drivers see the correct q->memory value + * in the queue_setup op. + */ + mutex_lock(&q->mmap_lock); + q->memory = memory; + mutex_unlock(&q->mmap_lock); + set_queue_coherency(q, non_coherent_mem); + + /* + * Ask the driver how many buffers and planes per buffer it requires. + * Driver also sets the size and allocator context for each plane. + */ + ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, + plane_sizes, q->alloc_devs); + if (ret) + goto error; + + /* Check that driver has set sane values */ + if (WARN_ON(!num_planes)) { + ret = -EINVAL; + goto error; + } + + for (i = 0; i < num_planes; i++) + if (WARN_ON(!plane_sizes[i])) { + ret = -EINVAL; + goto error; + } + + /* Finally, allocate buffers and video memory */ + allocated_buffers = + __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); + if (allocated_buffers == 0) { + dprintk(q, 1, "memory allocation failed\n"); + ret = -ENOMEM; + goto error; + } + + /* + * There is no point in continuing if we can't allocate the minimum + * number of buffers needed by this vb2_queue. + */ + if (allocated_buffers < q->min_buffers_needed) + ret = -ENOMEM; + + /* + * Check if driver can handle the allocated number of buffers. + */ + if (!ret && allocated_buffers < num_buffers) { + num_buffers = allocated_buffers; + /* + * num_planes is set by the previous queue_setup(), but since it + * signals to queue_setup() whether it is called from create_bufs() + * vs reqbufs() we zero it here to signal that queue_setup() is + * called for the reqbufs() case. + */ + num_planes = 0; + + ret = call_qop(q, queue_setup, q, &num_buffers, + &num_planes, plane_sizes, q->alloc_devs); + + if (!ret && allocated_buffers < num_buffers) + ret = -ENOMEM; + + /* + * Either the driver has accepted a smaller number of buffers, + * or .queue_setup() returned an error + */ + } + + mutex_lock(&q->mmap_lock); + q->num_buffers = allocated_buffers; + + if (ret < 0) { + /* + * Note: __vb2_queue_free() will subtract 'allocated_buffers' + * from q->num_buffers and it will reset q->memory to + * VB2_MEMORY_UNKNOWN. + */ + __vb2_queue_free(q, allocated_buffers); + mutex_unlock(&q->mmap_lock); + return ret; + } + mutex_unlock(&q->mmap_lock); + + /* + * Return the number of successfully allocated buffers + * to the userspace. + */ + *count = allocated_buffers; + q->waiting_for_buffers = !q->is_output; + + return 0; + +error: + mutex_lock(&q->mmap_lock); + q->memory = VB2_MEMORY_UNKNOWN; + mutex_unlock(&q->mmap_lock); + return ret; +} +EXPORT_SYMBOL_GPL(vb2_core_reqbufs); + +int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, + unsigned int flags, unsigned int *count, + unsigned int requested_planes, + const unsigned int requested_sizes[]) +{ + unsigned int num_planes = 0, num_buffers, allocated_buffers; + unsigned plane_sizes[VB2_MAX_PLANES] = { }; + bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; + bool no_previous_buffers = !q->num_buffers; + int ret; + + if (q->num_buffers == VB2_MAX_FRAME) { + dprintk(q, 1, "maximum number of buffers already allocated\n"); + return -ENOBUFS; + } + + if (no_previous_buffers) { + if (q->waiting_in_dqbuf && *count) { + dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); + return -EBUSY; + } + memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); + /* + * Set this now to ensure that drivers see the correct q->memory + * value in the queue_setup op. + */ + mutex_lock(&q->mmap_lock); + q->memory = memory; + mutex_unlock(&q->mmap_lock); + q->waiting_for_buffers = !q->is_output; + set_queue_coherency(q, non_coherent_mem); + } else { + if (q->memory != memory) { + dprintk(q, 1, "memory model mismatch\n"); + return -EINVAL; + } + if (!verify_coherency_flags(q, non_coherent_mem)) + return -EINVAL; + } + + num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); + + if (requested_planes && requested_sizes) { + num_planes = requested_planes; + memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes)); + } + + /* + * Ask the driver, whether the requested number of buffers, planes per + * buffer and their sizes are acceptable + */ + ret = call_qop(q, queue_setup, q, &num_buffers, + &num_planes, plane_sizes, q->alloc_devs); + if (ret) + goto error; + + /* Finally, allocate buffers and video memory */ + allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, + num_planes, plane_sizes); + if (allocated_buffers == 0) { + dprintk(q, 1, "memory allocation failed\n"); + ret = -ENOMEM; + goto error; + } + + /* + * Check if driver can handle the so far allocated number of buffers. + */ + if (allocated_buffers < num_buffers) { + num_buffers = allocated_buffers; + + /* + * q->num_buffers contains the total number of buffers, that the + * queue driver has set up + */ + ret = call_qop(q, queue_setup, q, &num_buffers, + &num_planes, plane_sizes, q->alloc_devs); + + if (!ret && allocated_buffers < num_buffers) + ret = -ENOMEM; + + /* + * Either the driver has accepted a smaller number of buffers, + * or .queue_setup() returned an error + */ + } + + mutex_lock(&q->mmap_lock); + q->num_buffers += allocated_buffers; + + if (ret < 0) { + /* + * Note: __vb2_queue_free() will subtract 'allocated_buffers' + * from q->num_buffers and it will reset q->memory to + * VB2_MEMORY_UNKNOWN. + */ + __vb2_queue_free(q, allocated_buffers); + mutex_unlock(&q->mmap_lock); + return -ENOMEM; + } + mutex_unlock(&q->mmap_lock); + + /* + * Return the number of successfully allocated buffers + * to the userspace. + */ + *count = allocated_buffers; + + return 0; + +error: + if (no_previous_buffers) { + mutex_lock(&q->mmap_lock); + q->memory = VB2_MEMORY_UNKNOWN; + mutex_unlock(&q->mmap_lock); + } + return ret; +} +EXPORT_SYMBOL_GPL(vb2_core_create_bufs); + +void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) +{ + if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) + return NULL; + + return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv); + +} +EXPORT_SYMBOL_GPL(vb2_plane_vaddr); + +void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) +{ + if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) + return NULL; + + return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv); +} +EXPORT_SYMBOL_GPL(vb2_plane_cookie); + +void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) +{ + struct vb2_queue *q = vb->vb2_queue; + unsigned long flags; + + if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) + return; + + if (WARN_ON(state != VB2_BUF_STATE_DONE && + state != VB2_BUF_STATE_ERROR && + state != VB2_BUF_STATE_QUEUED)) + state = VB2_BUF_STATE_ERROR; + +#ifdef CONFIG_VIDEO_ADV_DEBUG + /* + * Although this is not a callback, it still does have to balance + * with the buf_queue op. So update this counter manually. + */ + vb->cnt_buf_done++; +#endif + dprintk(q, 4, "done processing on buffer %d, state: %s\n", + vb->index, vb2_state_name(state)); + + if (state != VB2_BUF_STATE_QUEUED) + __vb2_buf_mem_finish(vb); + + spin_lock_irqsave(&q->done_lock, flags); + if (state == VB2_BUF_STATE_QUEUED) { + vb->state = VB2_BUF_STATE_QUEUED; + } else { + /* Add the buffer to the done buffers list */ + list_add_tail(&vb->done_entry, &q->done_list); + vb->state = state; + } + atomic_dec(&q->owned_by_drv_count); + + if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) { + media_request_object_unbind(&vb->req_obj); + media_request_object_put(&vb->req_obj); + } + + spin_unlock_irqrestore(&q->done_lock, flags); + + trace_vb2_buf_done(q, vb); + + switch (state) { + case VB2_BUF_STATE_QUEUED: + return; + default: + /* Inform any processes that may be waiting for buffers */ + wake_up(&q->done_wq); + break; + } +} +EXPORT_SYMBOL_GPL(vb2_buffer_done); + +void vb2_discard_done(struct vb2_queue *q) +{ + struct vb2_buffer *vb; + unsigned long flags; + + spin_lock_irqsave(&q->done_lock, flags); + list_for_each_entry(vb, &q->done_list, done_entry) + vb->state = VB2_BUF_STATE_ERROR; + spin_unlock_irqrestore(&q->done_lock, flags); +} +EXPORT_SYMBOL_GPL(vb2_discard_done); + +/* + * __prepare_mmap() - prepare an MMAP buffer + */ +static int __prepare_mmap(struct vb2_buffer *vb) +{ + int ret = 0; + + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, + vb, vb->planes); + return ret ? ret : call_vb_qop(vb, buf_prepare, vb); +} + +/* + * __prepare_userptr() - prepare a USERPTR buffer + */ +static int __prepare_userptr(struct vb2_buffer *vb) +{ + struct vb2_plane planes[VB2_MAX_PLANES]; + struct vb2_queue *q = vb->vb2_queue; + void *mem_priv; + unsigned int plane; + int ret = 0; + bool reacquired = vb->planes[0].mem_priv == NULL; + + memset(planes, 0, sizeof(planes[0]) * vb->num_planes); + /* Copy relevant information provided by the userspace */ + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, + vb, planes); + if (ret) + return ret; + + for (plane = 0; plane < vb->num_planes; ++plane) { + /* Skip the plane if already verified */ + if (vb->planes[plane].m.userptr && + vb->planes[plane].m.userptr == planes[plane].m.userptr + && vb->planes[plane].length == planes[plane].length) + continue; + + dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n", + plane); + + /* Check if the provided plane buffer is large enough */ + if (planes[plane].length < vb->planes[plane].min_length) { + dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n", + planes[plane].length, + vb->planes[plane].min_length, + plane); + ret = -EINVAL; + goto err; + } + + /* Release previously acquired memory if present */ + if (vb->planes[plane].mem_priv) { + if (!reacquired) { + reacquired = true; + vb->copied_timestamp = 0; + call_void_vb_qop(vb, buf_cleanup, vb); + } + call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); + } + + vb->planes[plane].mem_priv = NULL; + vb->planes[plane].bytesused = 0; + vb->planes[plane].length = 0; + vb->planes[plane].m.userptr = 0; + vb->planes[plane].data_offset = 0; + + /* Acquire each plane's memory */ + mem_priv = call_ptr_memop(get_userptr, + vb, + q->alloc_devs[plane] ? : q->dev, + planes[plane].m.userptr, + planes[plane].length); + if (IS_ERR(mem_priv)) { + dprintk(q, 1, "failed acquiring userspace memory for plane %d\n", + plane); + ret = PTR_ERR(mem_priv); + goto err; + } + vb->planes[plane].mem_priv = mem_priv; + } + + /* + * Now that everything is in order, copy relevant information + * provided by userspace. + */ + for (plane = 0; plane < vb->num_planes; ++plane) { + vb->planes[plane].bytesused = planes[plane].bytesused; + vb->planes[plane].length = planes[plane].length; + vb->planes[plane].m.userptr = planes[plane].m.userptr; + vb->planes[plane].data_offset = planes[plane].data_offset; + } + + if (reacquired) { + /* + * One or more planes changed, so we must call buf_init to do + * the driver-specific initialization on the newly acquired + * buffer, if provided. + */ + ret = call_vb_qop(vb, buf_init, vb); + if (ret) { + dprintk(q, 1, "buffer initialization failed\n"); + goto err; + } + } + + ret = call_vb_qop(vb, buf_prepare, vb); + if (ret) { + dprintk(q, 1, "buffer preparation failed\n"); + call_void_vb_qop(vb, buf_cleanup, vb); + goto err; + } + + return 0; +err: + /* In case of errors, release planes that were already acquired */ + for (plane = 0; plane < vb->num_planes; ++plane) { + if (vb->planes[plane].mem_priv) + call_void_memop(vb, put_userptr, + vb->planes[plane].mem_priv); + vb->planes[plane].mem_priv = NULL; + vb->planes[plane].m.userptr = 0; + vb->planes[plane].length = 0; + } + + return ret; +} + +/* + * __prepare_dmabuf() - prepare a DMABUF buffer + */ +static int __prepare_dmabuf(struct vb2_buffer *vb) +{ + struct vb2_plane planes[VB2_MAX_PLANES]; + struct vb2_queue *q = vb->vb2_queue; + void *mem_priv; + unsigned int plane; + int ret = 0; + bool reacquired = vb->planes[0].mem_priv == NULL; + + memset(planes, 0, sizeof(planes[0]) * vb->num_planes); + /* Copy relevant information provided by the userspace */ + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, + vb, planes); + if (ret) + return ret; + + for (plane = 0; plane < vb->num_planes; ++plane) { + struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); + + if (IS_ERR_OR_NULL(dbuf)) { + dprintk(q, 1, "invalid dmabuf fd for plane %d\n", + plane); + ret = -EINVAL; + goto err; + } + + /* use DMABUF size if length is not provided */ + if (planes[plane].length == 0) + planes[plane].length = dbuf->size; + + if (planes[plane].length < vb->planes[plane].min_length) { + dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n", + planes[plane].length, plane, + vb->planes[plane].min_length); + dma_buf_put(dbuf); + ret = -EINVAL; + goto err; + } + + /* Skip the plane if already verified */ + if (dbuf == vb->planes[plane].dbuf && + vb->planes[plane].length == planes[plane].length) { + dma_buf_put(dbuf); + continue; + } + + dprintk(q, 3, "buffer for plane %d changed\n", plane); + + if (!reacquired) { + reacquired = true; + vb->copied_timestamp = 0; + call_void_vb_qop(vb, buf_cleanup, vb); + } + + /* Release previously acquired memory if present */ + __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); + vb->planes[plane].bytesused = 0; + vb->planes[plane].length = 0; + vb->planes[plane].m.fd = 0; + vb->planes[plane].data_offset = 0; + + /* Acquire each plane's memory */ + mem_priv = call_ptr_memop(attach_dmabuf, + vb, + q->alloc_devs[plane] ? : q->dev, + dbuf, + planes[plane].length); + if (IS_ERR(mem_priv)) { + dprintk(q, 1, "failed to attach dmabuf\n"); + ret = PTR_ERR(mem_priv); + dma_buf_put(dbuf); + goto err; + } + + vb->planes[plane].dbuf = dbuf; + vb->planes[plane].mem_priv = mem_priv; + } + + /* + * This pins the buffer(s) with dma_buf_map_attachment()). It's done + * here instead just before the DMA, while queueing the buffer(s) so + * userspace knows sooner rather than later if the dma-buf map fails. + */ + for (plane = 0; plane < vb->num_planes; ++plane) { + if (vb->planes[plane].dbuf_mapped) + continue; + + ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); + if (ret) { + dprintk(q, 1, "failed to map dmabuf for plane %d\n", + plane); + goto err; + } + vb->planes[plane].dbuf_mapped = 1; + } + + /* + * Now that everything is in order, copy relevant information + * provided by userspace. + */ + for (plane = 0; plane < vb->num_planes; ++plane) { + vb->planes[plane].bytesused = planes[plane].bytesused; + vb->planes[plane].length = planes[plane].length; + vb->planes[plane].m.fd = planes[plane].m.fd; + vb->planes[plane].data_offset = planes[plane].data_offset; + } + + if (reacquired) { + /* + * Call driver-specific initialization on the newly acquired buffer, + * if provided. + */ + ret = call_vb_qop(vb, buf_init, vb); + if (ret) { + dprintk(q, 1, "buffer initialization failed\n"); + goto err; + } + } + + ret = call_vb_qop(vb, buf_prepare, vb); + if (ret) { + dprintk(q, 1, "buffer preparation failed\n"); + call_void_vb_qop(vb, buf_cleanup, vb); + goto err; + } + + return 0; +err: + /* In case of errors, release planes that were already acquired */ + __vb2_buf_dmabuf_put(vb); + + return ret; +} + +/* + * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing + */ +static void __enqueue_in_driver(struct vb2_buffer *vb) +{ + struct vb2_queue *q = vb->vb2_queue; + + vb->state = VB2_BUF_STATE_ACTIVE; + atomic_inc(&q->owned_by_drv_count); + + trace_vb2_buf_queue(q, vb); + + call_void_vb_qop(vb, buf_queue, vb); +} + +static int __buf_prepare(struct vb2_buffer *vb) +{ + struct vb2_queue *q = vb->vb2_queue; + enum vb2_buffer_state orig_state = vb->state; + int ret; + + if (q->error) { + dprintk(q, 1, "fatal error occurred on queue\n"); + return -EIO; + } + + if (vb->prepared) + return 0; + WARN_ON(vb->synced); + + if (q->is_output) { + ret = call_vb_qop(vb, buf_out_validate, vb); + if (ret) { + dprintk(q, 1, "buffer validation failed\n"); + return ret; + } + } + + vb->state = VB2_BUF_STATE_PREPARING; + + switch (q->memory) { + case VB2_MEMORY_MMAP: + ret = __prepare_mmap(vb); + break; + case VB2_MEMORY_USERPTR: + ret = __prepare_userptr(vb); + break; + case VB2_MEMORY_DMABUF: + ret = __prepare_dmabuf(vb); + break; + default: + WARN(1, "Invalid queue type\n"); + ret = -EINVAL; + break; + } + + if (ret) { + dprintk(q, 1, "buffer preparation failed: %d\n", ret); + vb->state = orig_state; + return ret; + } + + __vb2_buf_mem_prepare(vb); + vb->prepared = 1; + vb->state = orig_state; + + return 0; +} + +static int vb2_req_prepare(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + int ret; + + if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) + return -EINVAL; + + mutex_lock(vb->vb2_queue->lock); + ret = __buf_prepare(vb); + mutex_unlock(vb->vb2_queue->lock); + return ret; +} + +static void __vb2_dqbuf(struct vb2_buffer *vb); + +static void vb2_req_unprepare(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + + mutex_lock(vb->vb2_queue->lock); + __vb2_dqbuf(vb); + vb->state = VB2_BUF_STATE_IN_REQUEST; + mutex_unlock(vb->vb2_queue->lock); + WARN_ON(!vb->req_obj.req); +} + +int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, + struct media_request *req); + +static void vb2_req_queue(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + int err; + + mutex_lock(vb->vb2_queue->lock); + /* + * There is no method to propagate an error from vb2_core_qbuf(), + * so if this returns a non-0 value, then WARN. + * + * The only exception is -EIO which is returned if q->error is + * set. We just ignore that, and expect this will be caught the + * next time vb2_req_prepare() is called. + */ + err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL); + WARN_ON_ONCE(err && err != -EIO); + mutex_unlock(vb->vb2_queue->lock); +} + +static void vb2_req_unbind(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + + if (vb->state == VB2_BUF_STATE_IN_REQUEST) + call_void_bufop(vb->vb2_queue, init_buffer, vb); +} + +static void vb2_req_release(struct media_request_object *obj) +{ + struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); + + if (vb->state == VB2_BUF_STATE_IN_REQUEST) { + vb->state = VB2_BUF_STATE_DEQUEUED; + if (vb->request) + media_request_put(vb->request); + vb->request = NULL; + } +} + +static const struct media_request_object_ops vb2_core_req_ops = { + .prepare = vb2_req_prepare, + .unprepare = vb2_req_unprepare, + .queue = vb2_req_queue, + .unbind = vb2_req_unbind, + .release = vb2_req_release, +}; + +bool vb2_request_object_is_buffer(struct media_request_object *obj) +{ + return obj->ops == &vb2_core_req_ops; +} +EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); + +unsigned int vb2_request_buffer_cnt(struct media_request *req) +{ + struct media_request_object *obj; + unsigned long flags; + unsigned int buffer_cnt = 0; + + spin_lock_irqsave(&req->lock, flags); + list_for_each_entry(obj, &req->objects, list) + if (vb2_request_object_is_buffer(obj)) + buffer_cnt++; + spin_unlock_irqrestore(&req->lock, flags); + + return buffer_cnt; +} +EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); + +int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) +{ + struct vb2_buffer *vb; + int ret; + + vb = q->bufs[index]; + if (vb->state != VB2_BUF_STATE_DEQUEUED) { + dprintk(q, 1, "invalid buffer state %s\n", + vb2_state_name(vb->state)); + return -EINVAL; + } + if (vb->prepared) { + dprintk(q, 1, "buffer already prepared\n"); + return -EINVAL; + } + + ret = __buf_prepare(vb); + if (ret) + return ret; + + /* Fill buffer information for the userspace */ + call_void_bufop(q, fill_user_buffer, vb, pb); + + dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index); + + return 0; +} +EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); + +/* + * vb2_start_streaming() - Attempt to start streaming. + * @q: videobuf2 queue + * + * Attempt to start streaming. When this function is called there must be + * at least q->min_buffers_needed buffers queued up (i.e. the minimum + * number of buffers required for the DMA engine to function). If the + * @start_streaming op fails it is supposed to return all the driver-owned + * buffers back to vb2 in state QUEUED. Check if that happened and if + * not warn and reclaim them forcefully. + */ +static int vb2_start_streaming(struct vb2_queue *q) +{ + struct vb2_buffer *vb; + int ret; + + /* + * If any buffers were queued before streamon, + * we can now pass them to driver for processing. + */ + list_for_each_entry(vb, &q->queued_list, queued_entry) + __enqueue_in_driver(vb); + + /* Tell the driver to start streaming */ + q->start_streaming_called = 1; + ret = call_qop(q, start_streaming, q, + atomic_read(&q->owned_by_drv_count)); + if (!ret) + return 0; + + q->start_streaming_called = 0; + + dprintk(q, 1, "driver refused to start streaming\n"); + /* + * If you see this warning, then the driver isn't cleaning up properly + * after a failed start_streaming(). See the start_streaming() + * documentation in videobuf2-core.h for more information how buffers + * should be returned to vb2 in start_streaming(). + */ + if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { + unsigned i; + + /* + * Forcefully reclaim buffers if the driver did not + * correctly return them to vb2. + */ + for (i = 0; i < q->num_buffers; ++i) { + vb = q->bufs[i]; + if (vb->state == VB2_BUF_STATE_ACTIVE) + vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); + } + /* Must be zero now */ + WARN_ON(atomic_read(&q->owned_by_drv_count)); + } + /* + * If done_list is not empty, then start_streaming() didn't call + * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or + * STATE_DONE. + */ + WARN_ON(!list_empty(&q->done_list)); + return ret; +} + +int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, + struct media_request *req) +{ + struct vb2_buffer *vb; + enum vb2_buffer_state orig_state; + int ret; + + if (q->error) { + dprintk(q, 1, "fatal error occurred on queue\n"); + return -EIO; + } + + vb = q->bufs[index]; + + if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && + q->requires_requests) { + dprintk(q, 1, "qbuf requires a request\n"); + return -EBADR; + } + + if ((req && q->uses_qbuf) || + (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && + q->uses_requests)) { + dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n"); + return -EBUSY; + } + + if (req) { + int ret; + + q->uses_requests = 1; + if (vb->state != VB2_BUF_STATE_DEQUEUED) { + dprintk(q, 1, "buffer %d not in dequeued state\n", + vb->index); + return -EINVAL; + } + + if (q->is_output && !vb->prepared) { + ret = call_vb_qop(vb, buf_out_validate, vb); + if (ret) { + dprintk(q, 1, "buffer validation failed\n"); + return ret; + } + } + + media_request_object_init(&vb->req_obj); + + /* Make sure the request is in a safe state for updating. */ + ret = media_request_lock_for_update(req); + if (ret) + return ret; + ret = media_request_object_bind(req, &vb2_core_req_ops, + q, true, &vb->req_obj); + media_request_unlock_for_update(req); + if (ret) + return ret; + + vb->state = VB2_BUF_STATE_IN_REQUEST; + + /* + * Increment the refcount and store the request. + * The request refcount is decremented again when the + * buffer is dequeued. This is to prevent vb2_buffer_done() + * from freeing the request from interrupt context, which can + * happen if the application closed the request fd after + * queueing the request. + */ + media_request_get(req); + vb->request = req; + + /* Fill buffer information for the userspace */ + if (pb) { + call_void_bufop(q, copy_timestamp, vb, pb); + call_void_bufop(q, fill_user_buffer, vb, pb); + } + + dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); + return 0; + } + + if (vb->state != VB2_BUF_STATE_IN_REQUEST) + q->uses_qbuf = 1; + + switch (vb->state) { + case VB2_BUF_STATE_DEQUEUED: + case VB2_BUF_STATE_IN_REQUEST: + if (!vb->prepared) { + ret = __buf_prepare(vb); + if (ret) + return ret; + } + break; + case VB2_BUF_STATE_PREPARING: + dprintk(q, 1, "buffer still being prepared\n"); + return -EINVAL; + default: + dprintk(q, 1, "invalid buffer state %s\n", + vb2_state_name(vb->state)); + return -EINVAL; + } + + /* + * Add to the queued buffers list, a buffer will stay on it until + * dequeued in dqbuf. + */ + orig_state = vb->state; + list_add_tail(&vb->queued_entry, &q->queued_list); + q->queued_count++; + q->waiting_for_buffers = false; + vb->state = VB2_BUF_STATE_QUEUED; + + if (pb) + call_void_bufop(q, copy_timestamp, vb, pb); + + trace_vb2_qbuf(q, vb); + + /* + * If already streaming, give the buffer to driver for processing. + * If not, the buffer will be given to driver on next streamon. + */ + if (q->start_streaming_called) + __enqueue_in_driver(vb); + + /* Fill buffer information for the userspace */ + if (pb) + call_void_bufop(q, fill_user_buffer, vb, pb); + + /* + * If streamon has been called, and we haven't yet called + * start_streaming() since not enough buffers were queued, and + * we now have reached the minimum number of queued buffers, + * then we can finally call start_streaming(). + */ + if (q->streaming && !q->start_streaming_called && + q->queued_count >= q->min_buffers_needed) { + ret = vb2_start_streaming(q); + if (ret) { + /* + * Since vb2_core_qbuf will return with an error, + * we should return it to state DEQUEUED since + * the error indicates that the buffer wasn't queued. + */ + list_del(&vb->queued_entry); + q->queued_count--; + vb->state = orig_state; + return ret; + } + } + + dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); + return 0; +} +EXPORT_SYMBOL_GPL(vb2_core_qbuf); + +/* + * __vb2_wait_for_done_vb() - wait for a buffer to become available + * for dequeuing + * + * Will sleep if required for nonblocking == false. + */ +static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) +{ + /* + * All operations on vb_done_list are performed under done_lock + * spinlock protection. However, buffers may be removed from + * it and returned to userspace only while holding both driver's + * lock and the done_lock spinlock. Thus we can be sure that as + * long as we hold the driver's lock, the list will remain not + * empty if list_empty() check succeeds. + */ + + for (;;) { + int ret; + + if (q->waiting_in_dqbuf) { + dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); + return -EBUSY; + } + + if (!q->streaming) { + dprintk(q, 1, "streaming off, will not wait for buffers\n"); + return -EINVAL; + } + + if (q->error) { + dprintk(q, 1, "Queue in error state, will not wait for buffers\n"); + return -EIO; + } + + if (q->last_buffer_dequeued) { + dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n"); + return -EPIPE; + } + + if (!list_empty(&q->done_list)) { + /* + * Found a buffer that we were waiting for. + */ + break; + } + + if (nonblocking) { + dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n"); + return -EAGAIN; + } + + q->waiting_in_dqbuf = 1; + /* + * We are streaming and blocking, wait for another buffer to + * become ready or for streamoff. Driver's lock is released to + * allow streamoff or qbuf to be called while waiting. + */ + call_void_qop(q, wait_prepare, q); + + /* + * All locks have been released, it is safe to sleep now. + */ + dprintk(q, 3, "will sleep waiting for buffers\n"); + ret = wait_event_interruptible(q->done_wq, + !list_empty(&q->done_list) || !q->streaming || + q->error); + + /* + * We need to reevaluate both conditions again after reacquiring + * the locks or return an error if one occurred. + */ + call_void_qop(q, wait_finish, q); + q->waiting_in_dqbuf = 0; + if (ret) { + dprintk(q, 1, "sleep was interrupted\n"); + return ret; + } + } + return 0; +} + +/* + * __vb2_get_done_vb() - get a buffer ready for dequeuing + * + * Will sleep if required for nonblocking == false. + */ +static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, + void *pb, int nonblocking) +{ + unsigned long flags; + int ret = 0; + + /* + * Wait for at least one buffer to become available on the done_list. + */ + ret = __vb2_wait_for_done_vb(q, nonblocking); + if (ret) + return ret; + + /* + * Driver's lock has been held since we last verified that done_list + * is not empty, so no need for another list_empty(done_list) check. + */ + spin_lock_irqsave(&q->done_lock, flags); + *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); + /* + * Only remove the buffer from done_list if all planes can be + * handled. Some cases such as V4L2 file I/O and DVB have pb + * == NULL; skip the check then as there's nothing to verify. + */ + if (pb) + ret = call_bufop(q, verify_planes_array, *vb, pb); + if (!ret) + list_del(&(*vb)->done_entry); + spin_unlock_irqrestore(&q->done_lock, flags); + + return ret; +} + +int vb2_wait_for_all_buffers(struct vb2_queue *q) +{ + if (!q->streaming) { + dprintk(q, 1, "streaming off, will not wait for buffers\n"); + return -EINVAL; + } + + if (q->start_streaming_called) + wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count)); + return 0; +} +EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); + +/* + * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state + */ +static void __vb2_dqbuf(struct vb2_buffer *vb) +{ + struct vb2_queue *q = vb->vb2_queue; + + /* nothing to do if the buffer is already dequeued */ + if (vb->state == VB2_BUF_STATE_DEQUEUED) + return; + + vb->state = VB2_BUF_STATE_DEQUEUED; + + call_void_bufop(q, init_buffer, vb); +} + +int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, + bool nonblocking) +{ + struct vb2_buffer *vb = NULL; + int ret; + + ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); + if (ret < 0) + return ret; + + switch (vb->state) { + case VB2_BUF_STATE_DONE: + dprintk(q, 3, "returning done buffer\n"); + break; + case VB2_BUF_STATE_ERROR: + dprintk(q, 3, "returning done buffer with errors\n"); + break; + default: + dprintk(q, 1, "invalid buffer state %s\n", + vb2_state_name(vb->state)); + return -EINVAL; + } + + call_void_vb_qop(vb, buf_finish, vb); + vb->prepared = 0; + + if (pindex) + *pindex = vb->index; + + /* Fill buffer information for the userspace */ + if (pb) + call_void_bufop(q, fill_user_buffer, vb, pb); + + /* Remove from vb2 queue */ + list_del(&vb->queued_entry); + q->queued_count--; + + trace_vb2_dqbuf(q, vb); + + /* go back to dequeued state */ + __vb2_dqbuf(vb); + + if (WARN_ON(vb->req_obj.req)) { + media_request_object_unbind(&vb->req_obj); + media_request_object_put(&vb->req_obj); + } + if (vb->request) + media_request_put(vb->request); + vb->request = NULL; + + dprintk(q, 2, "dqbuf of buffer %d, state: %s\n", + vb->index, vb2_state_name(vb->state)); + + return 0; + +} +EXPORT_SYMBOL_GPL(vb2_core_dqbuf); + +/* + * __vb2_queue_cancel() - cancel and stop (pause) streaming + * + * Removes all queued buffers from driver's queue and all buffers queued by + * userspace from vb2's queue. Returns to state after reqbufs. + */ +static void __vb2_queue_cancel(struct vb2_queue *q) +{ + unsigned int i; + + /* + * Tell driver to stop all transactions and release all queued + * buffers. + */ + if (q->start_streaming_called) + call_void_qop(q, stop_streaming, q); + + if (q->streaming) + call_void_qop(q, unprepare_streaming, q); + + /* + * If you see this warning, then the driver isn't cleaning up properly + * in stop_streaming(). See the stop_streaming() documentation in + * videobuf2-core.h for more information how buffers should be returned + * to vb2 in stop_streaming(). + */ + if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { + for (i = 0; i < q->num_buffers; ++i) + if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) { + pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n", + q->bufs[i]); + vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR); + } + /* Must be zero now */ + WARN_ON(atomic_read(&q->owned_by_drv_count)); + } + + q->streaming = 0; + q->start_streaming_called = 0; + q->queued_count = 0; + q->error = 0; + q->uses_requests = 0; + q->uses_qbuf = 0; + + /* + * Remove all buffers from vb2's list... + */ + INIT_LIST_HEAD(&q->queued_list); + /* + * ...and done list; userspace will not receive any buffers it + * has not already dequeued before initiating cancel. + */ + INIT_LIST_HEAD(&q->done_list); + atomic_set(&q->owned_by_drv_count, 0); + wake_up_all(&q->done_wq); + + /* + * Reinitialize all buffers for next use. + * Make sure to call buf_finish for any queued buffers. Normally + * that's done in dqbuf, but that's not going to happen when we + * cancel the whole queue. Note: this code belongs here, not in + * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical + * call to __fill_user_buffer() after buf_finish(). That order can't + * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). + */ + for (i = 0; i < q->num_buffers; ++i) { + struct vb2_buffer *vb = q->bufs[i]; + struct media_request *req = vb->req_obj.req; + + /* + * If a request is associated with this buffer, then + * call buf_request_cancel() to give the driver to complete() + * related request objects. Otherwise those objects would + * never complete. + */ + if (req) { + enum media_request_state state; + unsigned long flags; + + spin_lock_irqsave(&req->lock, flags); + state = req->state; + spin_unlock_irqrestore(&req->lock, flags); + + if (state == MEDIA_REQUEST_STATE_QUEUED) + call_void_vb_qop(vb, buf_request_complete, vb); + } + + __vb2_buf_mem_finish(vb); + + if (vb->prepared) { + call_void_vb_qop(vb, buf_finish, vb); + vb->prepared = 0; + } + __vb2_dqbuf(vb); + + if (vb->req_obj.req) { + media_request_object_unbind(&vb->req_obj); + media_request_object_put(&vb->req_obj); + } + if (vb->request) + media_request_put(vb->request); + vb->request = NULL; + vb->copied_timestamp = 0; + } +} + +int vb2_core_streamon(struct vb2_queue *q, unsigned int type) +{ + int ret; + + if (type != q->type) { + dprintk(q, 1, "invalid stream type\n"); + return -EINVAL; + } + + if (q->streaming) { + dprintk(q, 3, "already streaming\n"); + return 0; + } + + if (!q->num_buffers) { + dprintk(q, 1, "no buffers have been allocated\n"); + return -EINVAL; + } + + if (q->num_buffers < q->min_buffers_needed) { + dprintk(q, 1, "need at least %u allocated buffers\n", + q->min_buffers_needed); + return -EINVAL; + } + + ret = call_qop(q, prepare_streaming, q); + if (ret) + return ret; + + /* + * Tell driver to start streaming provided sufficient buffers + * are available. + */ + if (q->queued_count >= q->min_buffers_needed) { + ret = vb2_start_streaming(q); + if (ret) + goto unprepare; + } + + q->streaming = 1; + + dprintk(q, 3, "successful\n"); + return 0; + +unprepare: + call_void_qop(q, unprepare_streaming, q); + return ret; +} +EXPORT_SYMBOL_GPL(vb2_core_streamon); + +void vb2_queue_error(struct vb2_queue *q) +{ + q->error = 1; + + wake_up_all(&q->done_wq); +} +EXPORT_SYMBOL_GPL(vb2_queue_error); + +int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) +{ + if (type != q->type) { + dprintk(q, 1, "invalid stream type\n"); + return -EINVAL; + } + + /* + * Cancel will pause streaming and remove all buffers from the driver + * and vb2, effectively returning control over them to userspace. + * + * Note that we do this even if q->streaming == 0: if you prepare or + * queue buffers, and then call streamoff without ever having called + * streamon, you would still expect those buffers to be returned to + * their normal dequeued state. + */ + __vb2_queue_cancel(q); + q->waiting_for_buffers = !q->is_output; + q->last_buffer_dequeued = false; + + dprintk(q, 3, "successful\n"); + return 0; +} +EXPORT_SYMBOL_GPL(vb2_core_streamoff); + +/* + * __find_plane_by_offset() - find plane associated with the given offset off + */ +static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, + unsigned int *_buffer, unsigned int *_plane) +{ + struct vb2_buffer *vb; + unsigned int buffer, plane; + + /* + * Sanity checks to ensure the lock is held, MEMORY_MMAP is + * used and fileio isn't active. + */ + lockdep_assert_held(&q->mmap_lock); + + if (q->memory != VB2_MEMORY_MMAP) { + dprintk(q, 1, "queue is not currently set up for mmap\n"); + return -EINVAL; + } + + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "file io in progress\n"); + return -EBUSY; + } + + /* + * Go over all buffers and their planes, comparing the given offset + * with an offset assigned to each plane. If a match is found, + * return its buffer and plane numbers. + */ + for (buffer = 0; buffer < q->num_buffers; ++buffer) { + vb = q->bufs[buffer]; + + for (plane = 0; plane < vb->num_planes; ++plane) { + if (vb->planes[plane].m.offset == off) { + *_buffer = buffer; + *_plane = plane; + return 0; + } + } + } + + return -EINVAL; +} + +int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, + unsigned int index, unsigned int plane, unsigned int flags) +{ + struct vb2_buffer *vb = NULL; + struct vb2_plane *vb_plane; + int ret; + struct dma_buf *dbuf; + + if (q->memory != VB2_MEMORY_MMAP) { + dprintk(q, 1, "queue is not currently set up for mmap\n"); + return -EINVAL; + } + + if (!q->mem_ops->get_dmabuf) { + dprintk(q, 1, "queue does not support DMA buffer exporting\n"); + return -EINVAL; + } + + if (flags & ~(O_CLOEXEC | O_ACCMODE)) { + dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n"); + return -EINVAL; + } + + if (type != q->type) { + dprintk(q, 1, "invalid buffer type\n"); + return -EINVAL; + } + + if (index >= q->num_buffers) { + dprintk(q, 1, "buffer index out of range\n"); + return -EINVAL; + } + + vb = q->bufs[index]; + + if (plane >= vb->num_planes) { + dprintk(q, 1, "buffer plane out of range\n"); + return -EINVAL; + } + + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "expbuf: file io in progress\n"); + return -EBUSY; + } + + vb_plane = &vb->planes[plane]; + + dbuf = call_ptr_memop(get_dmabuf, + vb, + vb_plane->mem_priv, + flags & O_ACCMODE); + if (IS_ERR_OR_NULL(dbuf)) { + dprintk(q, 1, "failed to export buffer %d, plane %d\n", + index, plane); + return -EINVAL; + } + + ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE); + if (ret < 0) { + dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n", + index, plane, ret); + dma_buf_put(dbuf); + return ret; + } + + dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n", + index, plane, ret); + *fd = ret; + + return 0; +} +EXPORT_SYMBOL_GPL(vb2_core_expbuf); + +int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) +{ + unsigned long off = vma->vm_pgoff << PAGE_SHIFT; + struct vb2_buffer *vb; + unsigned int buffer = 0, plane = 0; + int ret; + unsigned long length; + + /* + * Check memory area access mode. + */ + if (!(vma->vm_flags & VM_SHARED)) { + dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n"); + return -EINVAL; + } + if (q->is_output) { + if (!(vma->vm_flags & VM_WRITE)) { + dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n"); + return -EINVAL; + } + } else { + if (!(vma->vm_flags & VM_READ)) { + dprintk(q, 1, "invalid vma flags, VM_READ needed\n"); + return -EINVAL; + } + } + + mutex_lock(&q->mmap_lock); + + /* + * Find the plane corresponding to the offset passed by userspace. This + * will return an error if not MEMORY_MMAP or file I/O is in progress. + */ + ret = __find_plane_by_offset(q, off, &buffer, &plane); + if (ret) + goto unlock; + + vb = q->bufs[buffer]; + + /* + * MMAP requires page_aligned buffers. + * The buffer length was page_aligned at __vb2_buf_mem_alloc(), + * so, we need to do the same here. + */ + length = PAGE_ALIGN(vb->planes[plane].length); + if (length < (vma->vm_end - vma->vm_start)) { + dprintk(q, 1, + "MMAP invalid, as it would overflow buffer length\n"); + ret = -EINVAL; + goto unlock; + } + + /* + * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer, + * not as a in-buffer offset. We always want to mmap a whole buffer + * from its beginning. + */ + vma->vm_pgoff = 0; + + ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); + +unlock: + mutex_unlock(&q->mmap_lock); + if (ret) + return ret; + + dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane); + return 0; +} +EXPORT_SYMBOL_GPL(vb2_mmap); + +#ifndef CONFIG_MMU +unsigned long vb2_get_unmapped_area(struct vb2_queue *q, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + unsigned long off = pgoff << PAGE_SHIFT; + struct vb2_buffer *vb; + unsigned int buffer, plane; + void *vaddr; + int ret; + + mutex_lock(&q->mmap_lock); + + /* + * Find the plane corresponding to the offset passed by userspace. This + * will return an error if not MEMORY_MMAP or file I/O is in progress. + */ + ret = __find_plane_by_offset(q, off, &buffer, &plane); + if (ret) + goto unlock; + + vb = q->bufs[buffer]; + + vaddr = vb2_plane_vaddr(vb, plane); + mutex_unlock(&q->mmap_lock); + return vaddr ? (unsigned long)vaddr : -EINVAL; + +unlock: + mutex_unlock(&q->mmap_lock); + return ret; +} +EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); +#endif + +int vb2_core_queue_init(struct vb2_queue *q) +{ + /* + * Sanity check + */ + if (WARN_ON(!q) || + WARN_ON(!q->ops) || + WARN_ON(!q->mem_ops) || + WARN_ON(!q->type) || + WARN_ON(!q->io_modes) || + WARN_ON(!q->ops->queue_setup) || + WARN_ON(!q->ops->buf_queue)) + return -EINVAL; + + if (WARN_ON(q->requires_requests && !q->supports_requests)) + return -EINVAL; + + /* + * This combination is not allowed since a non-zero value of + * q->min_buffers_needed can cause vb2_core_qbuf() to fail if + * it has to call start_streaming(), and the Request API expects + * that queueing a request (and thus queueing a buffer contained + * in that request) will always succeed. There is no method of + * propagating an error back to userspace. + */ + if (WARN_ON(q->supports_requests && q->min_buffers_needed)) + return -EINVAL; + + INIT_LIST_HEAD(&q->queued_list); + INIT_LIST_HEAD(&q->done_list); + spin_lock_init(&q->done_lock); + mutex_init(&q->mmap_lock); + init_waitqueue_head(&q->done_wq); + + q->memory = VB2_MEMORY_UNKNOWN; + + if (q->buf_struct_size == 0) + q->buf_struct_size = sizeof(struct vb2_buffer); + + if (q->bidirectional) + q->dma_dir = DMA_BIDIRECTIONAL; + else + q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + + if (q->name[0] == '\0') + snprintf(q->name, sizeof(q->name), "%s-%p", + q->is_output ? "out" : "cap", q); + + return 0; +} +EXPORT_SYMBOL_GPL(vb2_core_queue_init); + +static int __vb2_init_fileio(struct vb2_queue *q, int read); +static int __vb2_cleanup_fileio(struct vb2_queue *q); +void vb2_core_queue_release(struct vb2_queue *q) +{ + __vb2_cleanup_fileio(q); + __vb2_queue_cancel(q); + mutex_lock(&q->mmap_lock); + __vb2_queue_free(q, q->num_buffers); + mutex_unlock(&q->mmap_lock); +} +EXPORT_SYMBOL_GPL(vb2_core_queue_release); + +__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, + poll_table *wait) +{ + __poll_t req_events = poll_requested_events(wait); + struct vb2_buffer *vb = NULL; + unsigned long flags; + + /* + * poll_wait() MUST be called on the first invocation on all the + * potential queues of interest, even if we are not interested in their + * events during this first call. Failure to do so will result in + * queue's events to be ignored because the poll_table won't be capable + * of adding new wait queues thereafter. + */ + poll_wait(file, &q->done_wq, wait); + + if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) + return 0; + if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) + return 0; + + /* + * Start file I/O emulator only if streaming API has not been used yet. + */ + if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { + if (!q->is_output && (q->io_modes & VB2_READ) && + (req_events & (EPOLLIN | EPOLLRDNORM))) { + if (__vb2_init_fileio(q, 1)) + return EPOLLERR; + } + if (q->is_output && (q->io_modes & VB2_WRITE) && + (req_events & (EPOLLOUT | EPOLLWRNORM))) { + if (__vb2_init_fileio(q, 0)) + return EPOLLERR; + /* + * Write to OUTPUT queue can be done immediately. + */ + return EPOLLOUT | EPOLLWRNORM; + } + } + + /* + * There is nothing to wait for if the queue isn't streaming, or if the + * error flag is set. + */ + if (!vb2_is_streaming(q) || q->error) + return EPOLLERR; + + /* + * If this quirk is set and QBUF hasn't been called yet then + * return EPOLLERR as well. This only affects capture queues, output + * queues will always initialize waiting_for_buffers to false. + * This quirk is set by V4L2 for backwards compatibility reasons. + */ + if (q->quirk_poll_must_check_waiting_for_buffers && + q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) + return EPOLLERR; + + /* + * For output streams you can call write() as long as there are fewer + * buffers queued than there are buffers available. + */ + if (q->is_output && q->fileio && q->queued_count < q->num_buffers) + return EPOLLOUT | EPOLLWRNORM; + + if (list_empty(&q->done_list)) { + /* + * If the last buffer was dequeued from a capture queue, + * return immediately. DQBUF will return -EPIPE. + */ + if (q->last_buffer_dequeued) + return EPOLLIN | EPOLLRDNORM; + } + + /* + * Take first buffer available for dequeuing. + */ + spin_lock_irqsave(&q->done_lock, flags); + if (!list_empty(&q->done_list)) + vb = list_first_entry(&q->done_list, struct vb2_buffer, + done_entry); + spin_unlock_irqrestore(&q->done_lock, flags); + + if (vb && (vb->state == VB2_BUF_STATE_DONE + || vb->state == VB2_BUF_STATE_ERROR)) { + return (q->is_output) ? + EPOLLOUT | EPOLLWRNORM : + EPOLLIN | EPOLLRDNORM; + } + return 0; +} +EXPORT_SYMBOL_GPL(vb2_core_poll); + +/* + * struct vb2_fileio_buf - buffer context used by file io emulator + * + * vb2 provides a compatibility layer and emulator of file io (read and + * write) calls on top of streaming API. This structure is used for + * tracking context related to the buffers. + */ +struct vb2_fileio_buf { + void *vaddr; + unsigned int size; + unsigned int pos; + unsigned int queued:1; +}; + +/* + * struct vb2_fileio_data - queue context used by file io emulator + * + * @cur_index: the index of the buffer currently being read from or + * written to. If equal to q->num_buffers then a new buffer + * must be dequeued. + * @initial_index: in the read() case all buffers are queued up immediately + * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles + * buffers. However, in the write() case no buffers are initially + * queued, instead whenever a buffer is full it is queued up by + * __vb2_perform_fileio(). Only once all available buffers have + * been queued up will __vb2_perform_fileio() start to dequeue + * buffers. This means that initially __vb2_perform_fileio() + * needs to know what buffer index to use when it is queuing up + * the buffers for the first time. That initial index is stored + * in this field. Once it is equal to q->num_buffers all + * available buffers have been queued and __vb2_perform_fileio() + * should start the normal dequeue/queue cycle. + * + * vb2 provides a compatibility layer and emulator of file io (read and + * write) calls on top of streaming API. For proper operation it required + * this structure to save the driver state between each call of the read + * or write function. + */ +struct vb2_fileio_data { + unsigned int count; + unsigned int type; + unsigned int memory; + struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; + unsigned int cur_index; + unsigned int initial_index; + unsigned int q_count; + unsigned int dq_count; + unsigned read_once:1; + unsigned write_immediately:1; +}; + +/* + * __vb2_init_fileio() - initialize file io emulator + * @q: videobuf2 queue + * @read: mode selector (1 means read, 0 means write) + */ +static int __vb2_init_fileio(struct vb2_queue *q, int read) +{ + struct vb2_fileio_data *fileio; + int i, ret; + unsigned int count = 0; + + /* + * Sanity check + */ + if (WARN_ON((read && !(q->io_modes & VB2_READ)) || + (!read && !(q->io_modes & VB2_WRITE)))) + return -EINVAL; + + /* + * Check if device supports mapping buffers to kernel virtual space. + */ + if (!q->mem_ops->vaddr) + return -EBUSY; + + /* + * Check if streaming api has not been already activated. + */ + if (q->streaming || q->num_buffers > 0) + return -EBUSY; + + /* + * Start with count 1, driver can increase it in queue_setup() + */ + count = 1; + + dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", + (read) ? "read" : "write", count, q->fileio_read_once, + q->fileio_write_immediately); + + fileio = kzalloc(sizeof(*fileio), GFP_KERNEL); + if (fileio == NULL) + return -ENOMEM; + + fileio->read_once = q->fileio_read_once; + fileio->write_immediately = q->fileio_write_immediately; + + /* + * Request buffers and use MMAP type to force driver + * to allocate buffers by itself. + */ + fileio->count = count; + fileio->memory = VB2_MEMORY_MMAP; + fileio->type = q->type; + q->fileio = fileio; + ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); + if (ret) + goto err_kfree; + + /* + * Check if plane_count is correct + * (multiplane buffers are not supported). + */ + if (q->bufs[0]->num_planes != 1) { + ret = -EBUSY; + goto err_reqbufs; + } + + /* + * Get kernel address of each buffer. + */ + for (i = 0; i < q->num_buffers; i++) { + fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); + if (fileio->bufs[i].vaddr == NULL) { + ret = -EINVAL; + goto err_reqbufs; + } + fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); + } + + /* + * Read mode requires pre queuing of all buffers. + */ + if (read) { + /* + * Queue all buffers. + */ + for (i = 0; i < q->num_buffers; i++) { + ret = vb2_core_qbuf(q, i, NULL, NULL); + if (ret) + goto err_reqbufs; + fileio->bufs[i].queued = 1; + } + /* + * All buffers have been queued, so mark that by setting + * initial_index to q->num_buffers + */ + fileio->initial_index = q->num_buffers; + fileio->cur_index = q->num_buffers; + } + + /* + * Start streaming. + */ + ret = vb2_core_streamon(q, q->type); + if (ret) + goto err_reqbufs; + + return ret; + +err_reqbufs: + fileio->count = 0; + vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); + +err_kfree: + q->fileio = NULL; + kfree(fileio); + return ret; +} + +/* + * __vb2_cleanup_fileio() - free resourced used by file io emulator + * @q: videobuf2 queue + */ +static int __vb2_cleanup_fileio(struct vb2_queue *q) +{ + struct vb2_fileio_data *fileio = q->fileio; + + if (fileio) { + vb2_core_streamoff(q, q->type); + q->fileio = NULL; + fileio->count = 0; + vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); + kfree(fileio); + dprintk(q, 3, "file io emulator closed\n"); + } + return 0; +} + +/* + * __vb2_perform_fileio() - perform a single file io (read or write) operation + * @q: videobuf2 queue + * @data: pointed to target userspace buffer + * @count: number of bytes to read or write + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + * @read: access mode selector (1 means read, 0 means write) + */ +static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, + loff_t *ppos, int nonblock, int read) +{ + struct vb2_fileio_data *fileio; + struct vb2_fileio_buf *buf; + bool is_multiplanar = q->is_multiplanar; + /* + * When using write() to write data to an output video node the vb2 core + * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody + * else is able to provide this information with the write() operation. + */ + bool copy_timestamp = !read && q->copy_timestamp; + unsigned index; + int ret; + + dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n", + read ? "read" : "write", (long)*ppos, count, + nonblock ? "non" : ""); + + if (!data) + return -EINVAL; + + if (q->waiting_in_dqbuf) { + dprintk(q, 3, "another dup()ped fd is %s\n", + read ? "reading" : "writing"); + return -EBUSY; + } + + /* + * Initialize emulator on first call. + */ + if (!vb2_fileio_is_active(q)) { + ret = __vb2_init_fileio(q, read); + dprintk(q, 3, "vb2_init_fileio result: %d\n", ret); + if (ret) + return ret; + } + fileio = q->fileio; + + /* + * Check if we need to dequeue the buffer. + */ + index = fileio->cur_index; + if (index >= q->num_buffers) { + struct vb2_buffer *b; + + /* + * Call vb2_dqbuf to get buffer back. + */ + ret = vb2_core_dqbuf(q, &index, NULL, nonblock); + dprintk(q, 5, "vb2_dqbuf result: %d\n", ret); + if (ret) + return ret; + fileio->dq_count += 1; + + fileio->cur_index = index; + buf = &fileio->bufs[index]; + b = q->bufs[index]; + + /* + * Get number of bytes filled by the driver + */ + buf->pos = 0; + buf->queued = 0; + buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) + : vb2_plane_size(q->bufs[index], 0); + /* Compensate for data_offset on read in the multiplanar case. */ + if (is_multiplanar && read && + b->planes[0].data_offset < buf->size) { + buf->pos = b->planes[0].data_offset; + buf->size -= buf->pos; + } + } else { + buf = &fileio->bufs[index]; + } + + /* + * Limit count on last few bytes of the buffer. + */ + if (buf->pos + count > buf->size) { + count = buf->size - buf->pos; + dprintk(q, 5, "reducing read count: %zd\n", count); + } + + /* + * Transfer data to userspace. + */ + dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n", + count, index, buf->pos); + if (read) + ret = copy_to_user(data, buf->vaddr + buf->pos, count); + else + ret = copy_from_user(buf->vaddr + buf->pos, data, count); + if (ret) { + dprintk(q, 3, "error copying data\n"); + return -EFAULT; + } + + /* + * Update counters. + */ + buf->pos += count; + *ppos += count; + + /* + * Queue next buffer if required. + */ + if (buf->pos == buf->size || (!read && fileio->write_immediately)) { + struct vb2_buffer *b = q->bufs[index]; + + /* + * Check if this is the last buffer to read. + */ + if (read && fileio->read_once && fileio->dq_count == 1) { + dprintk(q, 3, "read limit reached\n"); + return __vb2_cleanup_fileio(q); + } + + /* + * Call vb2_qbuf and give buffer to the driver. + */ + b->planes[0].bytesused = buf->pos; + + if (copy_timestamp) + b->timestamp = ktime_get_ns(); + ret = vb2_core_qbuf(q, index, NULL, NULL); + dprintk(q, 5, "vb2_dbuf result: %d\n", ret); + if (ret) + return ret; + + /* + * Buffer has been queued, update the status + */ + buf->pos = 0; + buf->queued = 1; + buf->size = vb2_plane_size(q->bufs[index], 0); + fileio->q_count += 1; + /* + * If we are queuing up buffers for the first time, then + * increase initial_index by one. + */ + if (fileio->initial_index < q->num_buffers) + fileio->initial_index++; + /* + * The next buffer to use is either a buffer that's going to be + * queued for the first time (initial_index < q->num_buffers) + * or it is equal to q->num_buffers, meaning that the next + * time we need to dequeue a buffer since we've now queued up + * all the 'first time' buffers. + */ + fileio->cur_index = fileio->initial_index; + } + + /* + * Return proper number of bytes processed. + */ + if (ret == 0) + ret = count; + return ret; +} + +size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, + loff_t *ppos, int nonblocking) +{ + return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); +} +EXPORT_SYMBOL_GPL(vb2_read); + +size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, + loff_t *ppos, int nonblocking) +{ + return __vb2_perform_fileio(q, (char __user *) data, count, + ppos, nonblocking, 0); +} +EXPORT_SYMBOL_GPL(vb2_write); + +struct vb2_threadio_data { + struct task_struct *thread; + vb2_thread_fnc fnc; + void *priv; + bool stop; +}; + +static int vb2_thread(void *data) +{ + struct vb2_queue *q = data; + struct vb2_threadio_data *threadio = q->threadio; + bool copy_timestamp = false; + unsigned prequeue = 0; + unsigned index = 0; + int ret = 0; + + if (q->is_output) { + prequeue = q->num_buffers; + copy_timestamp = q->copy_timestamp; + } + + set_freezable(); + + for (;;) { + struct vb2_buffer *vb; + + /* + * Call vb2_dqbuf to get buffer back. + */ + if (prequeue) { + vb = q->bufs[index++]; + prequeue--; + } else { + call_void_qop(q, wait_finish, q); + if (!threadio->stop) + ret = vb2_core_dqbuf(q, &index, NULL, 0); + call_void_qop(q, wait_prepare, q); + dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret); + if (!ret) + vb = q->bufs[index]; + } + if (ret || threadio->stop) + break; + try_to_freeze(); + + if (vb->state != VB2_BUF_STATE_ERROR) + if (threadio->fnc(vb, threadio->priv)) + break; + call_void_qop(q, wait_finish, q); + if (copy_timestamp) + vb->timestamp = ktime_get_ns(); + if (!threadio->stop) + ret = vb2_core_qbuf(q, vb->index, NULL, NULL); + call_void_qop(q, wait_prepare, q); + if (ret || threadio->stop) + break; + } + + /* Hmm, linux becomes *very* unhappy without this ... */ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + return 0; +} + +/* + * This function should not be used for anything else but the videobuf2-dvb + * support. If you think you have another good use-case for this, then please + * contact the linux-media mailinglist first. + */ +int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, + const char *thread_name) +{ + struct vb2_threadio_data *threadio; + int ret = 0; + + if (q->threadio) + return -EBUSY; + if (vb2_is_busy(q)) + return -EBUSY; + if (WARN_ON(q->fileio)) + return -EBUSY; + + threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); + if (threadio == NULL) + return -ENOMEM; + threadio->fnc = fnc; + threadio->priv = priv; + + ret = __vb2_init_fileio(q, !q->is_output); + dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret); + if (ret) + goto nomem; + q->threadio = threadio; + threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); + if (IS_ERR(threadio->thread)) { + ret = PTR_ERR(threadio->thread); + threadio->thread = NULL; + goto nothread; + } + return 0; + +nothread: + __vb2_cleanup_fileio(q); +nomem: + kfree(threadio); + return ret; +} +EXPORT_SYMBOL_GPL(vb2_thread_start); + +int vb2_thread_stop(struct vb2_queue *q) +{ + struct vb2_threadio_data *threadio = q->threadio; + int err; + + if (threadio == NULL) + return 0; + threadio->stop = true; + /* Wake up all pending sleeps in the thread */ + vb2_queue_error(q); + err = kthread_stop(threadio->thread); + __vb2_cleanup_fileio(q); + threadio->thread = NULL; + kfree(threadio); + q->threadio = NULL; + return err; +} +EXPORT_SYMBOL_GPL(vb2_thread_stop); + +MODULE_DESCRIPTION("Media buffer core framework"); +MODULE_AUTHOR("Pawel Osciak , Marek Szyprowski"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(DMA_BUF); diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c new file mode 100644 index 0000000000..2fa455d4a0 --- /dev/null +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -0,0 +1,865 @@ +/* + * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +struct vb2_dc_buf { + struct device *dev; + void *vaddr; + unsigned long size; + void *cookie; + dma_addr_t dma_addr; + unsigned long attrs; + enum dma_data_direction dma_dir; + struct sg_table *dma_sgt; + struct frame_vector *vec; + + /* MMAP related */ + struct vb2_vmarea_handler handler; + refcount_t refcount; + struct sg_table *sgt_base; + + /* DMABUF related */ + struct dma_buf_attachment *db_attach; + + struct vb2_buffer *vb; + bool non_coherent_mem; +}; + +/*********************************************/ +/* scatterlist table functions */ +/*********************************************/ + +static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) +{ + struct scatterlist *s; + dma_addr_t expected = sg_dma_address(sgt->sgl); + unsigned int i; + unsigned long size = 0; + + for_each_sgtable_dma_sg(sgt, s, i) { + if (sg_dma_address(s) != expected) + break; + expected += sg_dma_len(s); + size += sg_dma_len(s); + } + return size; +} + +/*********************************************/ +/* callbacks for all buffers */ +/*********************************************/ + +static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv) +{ + struct vb2_dc_buf *buf = buf_priv; + + return &buf->dma_addr; +} + +/* + * This function may fail if: + * + * - dma_buf_vmap() fails + * E.g. due to lack of virtual mapping address space, or due to + * dmabuf->ops misconfiguration. + * + * - dma_vmap_noncontiguous() fails + * For instance, when requested buffer size is larger than totalram_pages(). + * Relevant for buffers that use non-coherent memory. + * + * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set + * Relevant for buffers that use coherent memory. + */ +static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv) +{ + struct vb2_dc_buf *buf = buf_priv; + + if (buf->vaddr) + return buf->vaddr; + + if (buf->db_attach) { + struct iosys_map map; + + if (!dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map)) + buf->vaddr = map.vaddr; + + return buf->vaddr; + } + + if (buf->non_coherent_mem) + buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size, + buf->dma_sgt); + return buf->vaddr; +} + +static unsigned int vb2_dc_num_users(void *buf_priv) +{ + struct vb2_dc_buf *buf = buf_priv; + + return refcount_read(&buf->refcount); +} + +static void vb2_dc_prepare(void *buf_priv) +{ + struct vb2_dc_buf *buf = buf_priv; + struct sg_table *sgt = buf->dma_sgt; + + /* This takes care of DMABUF and user-enforced cache sync hint */ + if (buf->vb->skip_cache_sync_on_prepare) + return; + + if (!buf->non_coherent_mem) + return; + + /* Non-coherent MMAP only */ + if (buf->vaddr) + flush_kernel_vmap_range(buf->vaddr, buf->size); + + /* For both USERPTR and non-coherent MMAP */ + dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir); +} + +static void vb2_dc_finish(void *buf_priv) +{ + struct vb2_dc_buf *buf = buf_priv; + struct sg_table *sgt = buf->dma_sgt; + + /* This takes care of DMABUF and user-enforced cache sync hint */ + if (buf->vb->skip_cache_sync_on_finish) + return; + + if (!buf->non_coherent_mem) + return; + + /* Non-coherent MMAP only */ + if (buf->vaddr) + invalidate_kernel_vmap_range(buf->vaddr, buf->size); + + /* For both USERPTR and non-coherent MMAP */ + dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir); +} + +/*********************************************/ +/* callbacks for MMAP buffers */ +/*********************************************/ + +static void vb2_dc_put(void *buf_priv) +{ + struct vb2_dc_buf *buf = buf_priv; + + if (!refcount_dec_and_test(&buf->refcount)) + return; + + if (buf->non_coherent_mem) { + if (buf->vaddr) + dma_vunmap_noncontiguous(buf->dev, buf->vaddr); + dma_free_noncontiguous(buf->dev, buf->size, + buf->dma_sgt, buf->dma_dir); + } else { + if (buf->sgt_base) { + sg_free_table(buf->sgt_base); + kfree(buf->sgt_base); + } + dma_free_attrs(buf->dev, buf->size, buf->cookie, + buf->dma_addr, buf->attrs); + } + put_device(buf->dev); + kfree(buf); +} + +static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf) +{ + struct vb2_queue *q = buf->vb->vb2_queue; + + buf->cookie = dma_alloc_attrs(buf->dev, + buf->size, + &buf->dma_addr, + GFP_KERNEL | q->gfp_flags, + buf->attrs); + if (!buf->cookie) + return -ENOMEM; + + if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) + return 0; + + buf->vaddr = buf->cookie; + return 0; +} + +static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf) +{ + struct vb2_queue *q = buf->vb->vb2_queue; + + buf->dma_sgt = dma_alloc_noncontiguous(buf->dev, + buf->size, + buf->dma_dir, + GFP_KERNEL | q->gfp_flags, + buf->attrs); + if (!buf->dma_sgt) + return -ENOMEM; + + buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl); + + /* + * For non-coherent buffers the kernel mapping is created on demand + * in vb2_dc_vaddr(). + */ + return 0; +} + +static void *vb2_dc_alloc(struct vb2_buffer *vb, + struct device *dev, + unsigned long size) +{ + struct vb2_dc_buf *buf; + int ret; + + if (WARN_ON(!dev)) + return ERR_PTR(-EINVAL); + + buf = kzalloc(sizeof *buf, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->attrs = vb->vb2_queue->dma_attrs; + buf->dma_dir = vb->vb2_queue->dma_dir; + buf->vb = vb; + buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem; + + buf->size = size; + /* Prevent the device from being released while the buffer is used */ + buf->dev = get_device(dev); + + if (buf->non_coherent_mem) + ret = vb2_dc_alloc_non_coherent(buf); + else + ret = vb2_dc_alloc_coherent(buf); + + if (ret) { + dev_err(dev, "dma alloc of size %lu failed\n", size); + kfree(buf); + return ERR_PTR(-ENOMEM); + } + + buf->handler.refcount = &buf->refcount; + buf->handler.put = vb2_dc_put; + buf->handler.arg = buf; + + refcount_set(&buf->refcount, 1); + + return buf; +} + +static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) +{ + struct vb2_dc_buf *buf = buf_priv; + int ret; + + if (!buf) { + printk(KERN_ERR "No buffer to map\n"); + return -EINVAL; + } + + if (buf->non_coherent_mem) + ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size, + buf->dma_sgt); + else + ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr, + buf->size, buf->attrs); + if (ret) { + pr_err("Remapping memory failed, error: %d\n", ret); + return ret; + } + + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_private_data = &buf->handler; + vma->vm_ops = &vb2_common_vm_ops; + + vma->vm_ops->open(vma); + + pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n", + __func__, (unsigned long)buf->dma_addr, vma->vm_start, + buf->size); + + return 0; +} + +/*********************************************/ +/* DMABUF ops for exporters */ +/*********************************************/ + +struct vb2_dc_attachment { + struct sg_table sgt; + enum dma_data_direction dma_dir; +}; + +static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, + struct dma_buf_attachment *dbuf_attach) +{ + struct vb2_dc_attachment *attach; + unsigned int i; + struct scatterlist *rd, *wr; + struct sg_table *sgt; + struct vb2_dc_buf *buf = dbuf->priv; + int ret; + + attach = kzalloc(sizeof(*attach), GFP_KERNEL); + if (!attach) + return -ENOMEM; + + sgt = &attach->sgt; + /* Copy the buf->base_sgt scatter list to the attachment, as we can't + * map the same scatter list to multiple attachments at the same time. + */ + ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL); + if (ret) { + kfree(attach); + return -ENOMEM; + } + + rd = buf->sgt_base->sgl; + wr = sgt->sgl; + for (i = 0; i < sgt->orig_nents; ++i) { + sg_set_page(wr, sg_page(rd), rd->length, rd->offset); + rd = sg_next(rd); + wr = sg_next(wr); + } + + attach->dma_dir = DMA_NONE; + dbuf_attach->priv = attach; + + return 0; +} + +static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *db_attach) +{ + struct vb2_dc_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + if (!attach) + return; + + sgt = &attach->sgt; + + /* release the scatterlist cache */ + if (attach->dma_dir != DMA_NONE) + /* + * Cache sync can be skipped here, as the vb2_dc memory is + * allocated from device coherent memory, which means the + * memory locations do not require any explicit cache + * maintenance prior or after being used by the device. + */ + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, + DMA_ATTR_SKIP_CPU_SYNC); + sg_free_table(sgt); + kfree(attach); + db_attach->priv = NULL; +} + +static struct sg_table *vb2_dc_dmabuf_ops_map( + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) +{ + struct vb2_dc_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + sgt = &attach->sgt; + /* return previously mapped sg table */ + if (attach->dma_dir == dma_dir) + return sgt; + + /* release any previous cache */ + if (attach->dma_dir != DMA_NONE) { + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, + DMA_ATTR_SKIP_CPU_SYNC); + attach->dma_dir = DMA_NONE; + } + + /* + * mapping to the client with new direction, no cache sync + * required see comment in vb2_dc_dmabuf_ops_detach() + */ + if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, + DMA_ATTR_SKIP_CPU_SYNC)) { + pr_err("failed to map scatterlist\n"); + return ERR_PTR(-EIO); + } + + attach->dma_dir = dma_dir; + + return sgt; +} + +static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, + struct sg_table *sgt, enum dma_data_direction dma_dir) +{ + /* nothing to be done here */ +} + +static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf) +{ + /* drop reference obtained in vb2_dc_get_dmabuf */ + vb2_dc_put(dbuf->priv); +} + +static int +vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, + enum dma_data_direction direction) +{ + return 0; +} + +static int +vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, + enum dma_data_direction direction) +{ + return 0; +} + +static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map) +{ + struct vb2_dc_buf *buf; + void *vaddr; + + buf = dbuf->priv; + vaddr = vb2_dc_vaddr(buf->vb, buf); + if (!vaddr) + return -EINVAL; + + iosys_map_set_vaddr(map, vaddr); + + return 0; +} + +static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, + struct vm_area_struct *vma) +{ + return vb2_dc_mmap(dbuf->priv, vma); +} + +static const struct dma_buf_ops vb2_dc_dmabuf_ops = { + .attach = vb2_dc_dmabuf_ops_attach, + .detach = vb2_dc_dmabuf_ops_detach, + .map_dma_buf = vb2_dc_dmabuf_ops_map, + .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, + .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access, + .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access, + .vmap = vb2_dc_dmabuf_ops_vmap, + .mmap = vb2_dc_dmabuf_ops_mmap, + .release = vb2_dc_dmabuf_ops_release, +}; + +static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) +{ + int ret; + struct sg_table *sgt; + + if (buf->non_coherent_mem) + return buf->dma_sgt; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + dev_err(buf->dev, "failed to alloc sg table\n"); + return NULL; + } + + ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr, + buf->size, buf->attrs); + if (ret < 0) { + dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); + kfree(sgt); + return NULL; + } + + return sgt; +} + +static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb, + void *buf_priv, + unsigned long flags) +{ + struct vb2_dc_buf *buf = buf_priv; + struct dma_buf *dbuf; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &vb2_dc_dmabuf_ops; + exp_info.size = buf->size; + exp_info.flags = flags; + exp_info.priv = buf; + + if (!buf->sgt_base) + buf->sgt_base = vb2_dc_get_base_sgt(buf); + + if (WARN_ON(!buf->sgt_base)) + return NULL; + + dbuf = dma_buf_export(&exp_info); + if (IS_ERR(dbuf)) + return NULL; + + /* dmabuf keeps reference to vb2 buffer */ + refcount_inc(&buf->refcount); + + return dbuf; +} + +/*********************************************/ +/* callbacks for USERPTR buffers */ +/*********************************************/ + +static void vb2_dc_put_userptr(void *buf_priv) +{ + struct vb2_dc_buf *buf = buf_priv; + struct sg_table *sgt = buf->dma_sgt; + int i; + struct page **pages; + + if (sgt) { + /* + * No need to sync to CPU, it's already synced to the CPU + * since the finish() memop will have been called before this. + */ + dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, + DMA_ATTR_SKIP_CPU_SYNC); + pages = frame_vector_pages(buf->vec); + /* sgt should exist only if vector contains pages... */ + BUG_ON(IS_ERR(pages)); + if (buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL) + for (i = 0; i < frame_vector_count(buf->vec); i++) + set_page_dirty_lock(pages[i]); + sg_free_table(sgt); + kfree(sgt); + } else { + dma_unmap_resource(buf->dev, buf->dma_addr, buf->size, + buf->dma_dir, 0); + } + vb2_destroy_framevec(buf->vec); + kfree(buf); +} + +static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev, + unsigned long vaddr, unsigned long size) +{ + struct vb2_dc_buf *buf; + struct frame_vector *vec; + unsigned int offset; + int n_pages, i; + int ret = 0; + struct sg_table *sgt; + unsigned long contig_size; + unsigned long dma_align = dma_get_cache_alignment(); + + /* Only cache aligned DMA transfers are reliable */ + if (!IS_ALIGNED(vaddr | size, dma_align)) { + pr_debug("user data must be aligned to %lu bytes\n", dma_align); + return ERR_PTR(-EINVAL); + } + + if (!size) { + pr_debug("size is zero\n"); + return ERR_PTR(-EINVAL); + } + + if (WARN_ON(!dev)) + return ERR_PTR(-EINVAL); + + buf = kzalloc(sizeof *buf, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->dev = dev; + buf->dma_dir = vb->vb2_queue->dma_dir; + buf->vb = vb; + + offset = lower_32_bits(offset_in_page(vaddr)); + vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL); + if (IS_ERR(vec)) { + ret = PTR_ERR(vec); + goto fail_buf; + } + buf->vec = vec; + n_pages = frame_vector_count(vec); + ret = frame_vector_to_pages(vec); + if (ret < 0) { + unsigned long *nums = frame_vector_pfns(vec); + + /* + * Failed to convert to pages... Check the memory is physically + * contiguous and use direct mapping + */ + for (i = 1; i < n_pages; i++) + if (nums[i-1] + 1 != nums[i]) + goto fail_pfnvec; + buf->dma_addr = dma_map_resource(buf->dev, + __pfn_to_phys(nums[0]), size, buf->dma_dir, 0); + if (dma_mapping_error(buf->dev, buf->dma_addr)) { + ret = -ENOMEM; + goto fail_pfnvec; + } + goto out; + } + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + pr_err("failed to allocate sg table\n"); + ret = -ENOMEM; + goto fail_pfnvec; + } + + ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages, + offset, size, GFP_KERNEL); + if (ret) { + pr_err("failed to initialize sg table\n"); + goto fail_sgt; + } + + /* + * No need to sync to the device, this will happen later when the + * prepare() memop is called. + */ + if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, + DMA_ATTR_SKIP_CPU_SYNC)) { + pr_err("failed to map scatterlist\n"); + ret = -EIO; + goto fail_sgt_init; + } + + contig_size = vb2_dc_get_contiguous_size(sgt); + if (contig_size < size) { + pr_err("contiguous mapping is too small %lu/%lu\n", + contig_size, size); + ret = -EFAULT; + goto fail_map_sg; + } + + buf->dma_addr = sg_dma_address(sgt->sgl); + buf->dma_sgt = sgt; + buf->non_coherent_mem = 1; + +out: + buf->size = size; + + return buf; + +fail_map_sg: + dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); + +fail_sgt_init: + sg_free_table(sgt); + +fail_sgt: + kfree(sgt); + +fail_pfnvec: + vb2_destroy_framevec(vec); + +fail_buf: + kfree(buf); + + return ERR_PTR(ret); +} + +/*********************************************/ +/* callbacks for DMABUF buffers */ +/*********************************************/ + +static int vb2_dc_map_dmabuf(void *mem_priv) +{ + struct vb2_dc_buf *buf = mem_priv; + struct sg_table *sgt; + unsigned long contig_size; + + if (WARN_ON(!buf->db_attach)) { + pr_err("trying to pin a non attached buffer\n"); + return -EINVAL; + } + + if (WARN_ON(buf->dma_sgt)) { + pr_err("dmabuf buffer is already pinned\n"); + return 0; + } + + /* get the associated scatterlist for this buffer */ + sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir); + if (IS_ERR(sgt)) { + pr_err("Error getting dmabuf scatterlist\n"); + return -EINVAL; + } + + /* checking if dmabuf is big enough to store contiguous chunk */ + contig_size = vb2_dc_get_contiguous_size(sgt); + if (contig_size < buf->size) { + pr_err("contiguous chunk is too small %lu/%lu\n", + contig_size, buf->size); + dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, + buf->dma_dir); + return -EFAULT; + } + + buf->dma_addr = sg_dma_address(sgt->sgl); + buf->dma_sgt = sgt; + buf->vaddr = NULL; + + return 0; +} + +static void vb2_dc_unmap_dmabuf(void *mem_priv) +{ + struct vb2_dc_buf *buf = mem_priv; + struct sg_table *sgt = buf->dma_sgt; + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); + + if (WARN_ON(!buf->db_attach)) { + pr_err("trying to unpin a not attached buffer\n"); + return; + } + + if (WARN_ON(!sgt)) { + pr_err("dmabuf buffer is already unpinned\n"); + return; + } + + if (buf->vaddr) { + dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map); + buf->vaddr = NULL; + } + dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir); + + buf->dma_addr = 0; + buf->dma_sgt = NULL; +} + +static void vb2_dc_detach_dmabuf(void *mem_priv) +{ + struct vb2_dc_buf *buf = mem_priv; + + /* if vb2 works correctly you should never detach mapped buffer */ + if (WARN_ON(buf->dma_addr)) + vb2_dc_unmap_dmabuf(buf); + + /* detach this attachment */ + dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); + kfree(buf); +} + +static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev, + struct dma_buf *dbuf, unsigned long size) +{ + struct vb2_dc_buf *buf; + struct dma_buf_attachment *dba; + + if (dbuf->size < size) + return ERR_PTR(-EFAULT); + + if (WARN_ON(!dev)) + return ERR_PTR(-EINVAL); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->dev = dev; + buf->vb = vb; + + /* create attachment for the dmabuf with the user device */ + dba = dma_buf_attach(dbuf, buf->dev); + if (IS_ERR(dba)) { + pr_err("failed to attach dmabuf\n"); + kfree(buf); + return dba; + } + + buf->dma_dir = vb->vb2_queue->dma_dir; + buf->size = size; + buf->db_attach = dba; + + return buf; +} + +/*********************************************/ +/* DMA CONTIG exported functions */ +/*********************************************/ + +const struct vb2_mem_ops vb2_dma_contig_memops = { + .alloc = vb2_dc_alloc, + .put = vb2_dc_put, + .get_dmabuf = vb2_dc_get_dmabuf, + .cookie = vb2_dc_cookie, + .vaddr = vb2_dc_vaddr, + .mmap = vb2_dc_mmap, + .get_userptr = vb2_dc_get_userptr, + .put_userptr = vb2_dc_put_userptr, + .prepare = vb2_dc_prepare, + .finish = vb2_dc_finish, + .map_dmabuf = vb2_dc_map_dmabuf, + .unmap_dmabuf = vb2_dc_unmap_dmabuf, + .attach_dmabuf = vb2_dc_attach_dmabuf, + .detach_dmabuf = vb2_dc_detach_dmabuf, + .num_users = vb2_dc_num_users, +}; +EXPORT_SYMBOL_GPL(vb2_dma_contig_memops); + +/** + * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size + * @dev: device for configuring DMA parameters + * @size: size of DMA max segment size to set + * + * To allow mapping the scatter-list into a single chunk in the DMA + * address space, the device is required to have the DMA max segment + * size parameter set to a value larger than the buffer size. Otherwise, + * the DMA-mapping subsystem will split the mapping into max segment + * size chunks. This function sets the DMA max segment size + * parameter to let DMA-mapping map a buffer as a single chunk in DMA + * address space. + * This code assumes that the DMA-mapping subsystem will merge all + * scatterlist segments if this is really possible (for example when + * an IOMMU is available and enabled). + * Ideally, this parameter should be set by the generic bus code, but it + * is left with the default 64KiB value due to historical litmiations in + * other subsystems (like limited USB host drivers) and there no good + * place to set it to the proper value. + * This function should be called from the drivers, which are known to + * operate on platforms with IOMMU and provide access to shared buffers + * (either USERPTR or DMABUF). This should be done before initializing + * videobuf2 queue. + */ +int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size) +{ + if (!dev->dma_parms) { + dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n"); + return -ENODEV; + } + if (dma_get_max_seg_size(dev) < size) + return dma_set_max_seg_size(dev, size); + + return 0; +} +EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size); + +MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2"); +MODULE_AUTHOR("Pawel Osciak "); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(DMA_BUF); diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c new file mode 100644 index 0000000000..6975a71d74 --- /dev/null +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -0,0 +1,679 @@ +/* + * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Andrzej Pietrasiewicz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static int debug; +module_param(debug, int, 0644); + +#define dprintk(level, fmt, arg...) \ + do { \ + if (debug >= level) \ + printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \ + } while (0) + +struct vb2_dma_sg_buf { + struct device *dev; + void *vaddr; + struct page **pages; + struct frame_vector *vec; + int offset; + enum dma_data_direction dma_dir; + struct sg_table sg_table; + /* + * This will point to sg_table when used with the MMAP or USERPTR + * memory model, and to the dma_buf sglist when used with the + * DMABUF memory model. + */ + struct sg_table *dma_sgt; + size_t size; + unsigned int num_pages; + refcount_t refcount; + struct vb2_vmarea_handler handler; + + struct dma_buf_attachment *db_attach; + + struct vb2_buffer *vb; +}; + +static void vb2_dma_sg_put(void *buf_priv); + +static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, + gfp_t gfp_flags) +{ + unsigned int last_page = 0; + unsigned long size = buf->size; + + while (size > 0) { + struct page *pages; + int order; + int i; + + order = get_order(size); + /* Don't over allocate*/ + if ((PAGE_SIZE << order) > size) + order--; + + pages = NULL; + while (!pages) { + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | + __GFP_NOWARN | gfp_flags, order); + if (pages) + break; + + if (order == 0) { + while (last_page--) + __free_page(buf->pages[last_page]); + return -ENOMEM; + } + order--; + } + + split_page(pages, order); + for (i = 0; i < (1 << order); i++) + buf->pages[last_page++] = &pages[i]; + + size -= PAGE_SIZE << order; + } + + return 0; +} + +static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev, + unsigned long size) +{ + struct vb2_dma_sg_buf *buf; + struct sg_table *sgt; + int ret; + int num_pages; + + if (WARN_ON(!dev) || WARN_ON(!size)) + return ERR_PTR(-EINVAL); + + buf = kzalloc(sizeof *buf, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->vaddr = NULL; + buf->dma_dir = vb->vb2_queue->dma_dir; + buf->offset = 0; + buf->size = size; + /* size is already page aligned */ + buf->num_pages = size >> PAGE_SHIFT; + buf->dma_sgt = &buf->sg_table; + + /* + * NOTE: dma-sg allocates memory using the page allocator directly, so + * there is no memory consistency guarantee, hence dma-sg ignores DMA + * attributes passed from the upper layer. + */ + buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL); + if (!buf->pages) + goto fail_pages_array_alloc; + + ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags); + if (ret) + goto fail_pages_alloc; + + ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, + buf->num_pages, 0, size, GFP_KERNEL); + if (ret) + goto fail_table_alloc; + + /* Prevent the device from being released while the buffer is used */ + buf->dev = get_device(dev); + + sgt = &buf->sg_table; + /* + * No need to sync to the device, this will happen later when the + * prepare() memop is called. + */ + if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, + DMA_ATTR_SKIP_CPU_SYNC)) + goto fail_map; + + buf->handler.refcount = &buf->refcount; + buf->handler.put = vb2_dma_sg_put; + buf->handler.arg = buf; + buf->vb = vb; + + refcount_set(&buf->refcount, 1); + + dprintk(1, "%s: Allocated buffer of %d pages\n", + __func__, buf->num_pages); + return buf; + +fail_map: + put_device(buf->dev); + sg_free_table(buf->dma_sgt); +fail_table_alloc: + num_pages = buf->num_pages; + while (num_pages--) + __free_page(buf->pages[num_pages]); +fail_pages_alloc: + kvfree(buf->pages); +fail_pages_array_alloc: + kfree(buf); + return ERR_PTR(-ENOMEM); +} + +static void vb2_dma_sg_put(void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = &buf->sg_table; + int i = buf->num_pages; + + if (refcount_dec_and_test(&buf->refcount)) { + dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, + buf->num_pages); + dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, + DMA_ATTR_SKIP_CPU_SYNC); + if (buf->vaddr) + vm_unmap_ram(buf->vaddr, buf->num_pages); + sg_free_table(buf->dma_sgt); + while (--i >= 0) + __free_page(buf->pages[i]); + kvfree(buf->pages); + put_device(buf->dev); + kfree(buf); + } +} + +static void vb2_dma_sg_prepare(void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = buf->dma_sgt; + + if (buf->vb->skip_cache_sync_on_prepare) + return; + + dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir); +} + +static void vb2_dma_sg_finish(void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = buf->dma_sgt; + + if (buf->vb->skip_cache_sync_on_finish) + return; + + dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir); +} + +static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev, + unsigned long vaddr, unsigned long size) +{ + struct vb2_dma_sg_buf *buf; + struct sg_table *sgt; + struct frame_vector *vec; + + if (WARN_ON(!dev)) + return ERR_PTR(-EINVAL); + + buf = kzalloc(sizeof *buf, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->vaddr = NULL; + buf->dev = dev; + buf->dma_dir = vb->vb2_queue->dma_dir; + buf->offset = vaddr & ~PAGE_MASK; + buf->size = size; + buf->dma_sgt = &buf->sg_table; + buf->vb = vb; + vec = vb2_create_framevec(vaddr, size, + buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL); + if (IS_ERR(vec)) + goto userptr_fail_pfnvec; + buf->vec = vec; + + buf->pages = frame_vector_pages(vec); + if (IS_ERR(buf->pages)) + goto userptr_fail_sgtable; + buf->num_pages = frame_vector_count(vec); + + if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, + buf->num_pages, buf->offset, size, 0)) + goto userptr_fail_sgtable; + + sgt = &buf->sg_table; + /* + * No need to sync to the device, this will happen later when the + * prepare() memop is called. + */ + if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir, + DMA_ATTR_SKIP_CPU_SYNC)) + goto userptr_fail_map; + + return buf; + +userptr_fail_map: + sg_free_table(&buf->sg_table); +userptr_fail_sgtable: + vb2_destroy_framevec(vec); +userptr_fail_pfnvec: + kfree(buf); + return ERR_PTR(-ENOMEM); +} + +/* + * @put_userptr: inform the allocator that a USERPTR buffer will no longer + * be used + */ +static void vb2_dma_sg_put_userptr(void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct sg_table *sgt = &buf->sg_table; + int i = buf->num_pages; + + dprintk(1, "%s: Releasing userspace buffer of %d pages\n", + __func__, buf->num_pages); + dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); + if (buf->vaddr) + vm_unmap_ram(buf->vaddr, buf->num_pages); + sg_free_table(buf->dma_sgt); + if (buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL) + while (--i >= 0) + set_page_dirty_lock(buf->pages[i]); + vb2_destroy_framevec(buf->vec); + kfree(buf); +} + +static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct iosys_map map; + int ret; + + BUG_ON(!buf); + + if (!buf->vaddr) { + if (buf->db_attach) { + ret = dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map); + buf->vaddr = ret ? NULL : map.vaddr; + } else { + buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1); + } + } + + /* add offset in case userptr is not page-aligned */ + return buf->vaddr ? buf->vaddr + buf->offset : NULL; +} + +static unsigned int vb2_dma_sg_num_users(void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + + return refcount_read(&buf->refcount); +} + +static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + int err; + + if (!buf) { + printk(KERN_ERR "No memory to map\n"); + return -EINVAL; + } + + err = vm_map_pages(vma, buf->pages, buf->num_pages); + if (err) { + printk(KERN_ERR "Remapping memory, error: %d\n", err); + return err; + } + + /* + * Use common vm_area operations to track buffer refcount. + */ + vma->vm_private_data = &buf->handler; + vma->vm_ops = &vb2_common_vm_ops; + + vma->vm_ops->open(vma); + + return 0; +} + +/*********************************************/ +/* DMABUF ops for exporters */ +/*********************************************/ + +struct vb2_dma_sg_attachment { + struct sg_table sgt; + enum dma_data_direction dma_dir; +}; + +static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, + struct dma_buf_attachment *dbuf_attach) +{ + struct vb2_dma_sg_attachment *attach; + unsigned int i; + struct scatterlist *rd, *wr; + struct sg_table *sgt; + struct vb2_dma_sg_buf *buf = dbuf->priv; + int ret; + + attach = kzalloc(sizeof(*attach), GFP_KERNEL); + if (!attach) + return -ENOMEM; + + sgt = &attach->sgt; + /* Copy the buf->base_sgt scatter list to the attachment, as we can't + * map the same scatter list to multiple attachments at the same time. + */ + ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); + if (ret) { + kfree(attach); + return -ENOMEM; + } + + rd = buf->dma_sgt->sgl; + wr = sgt->sgl; + for (i = 0; i < sgt->orig_nents; ++i) { + sg_set_page(wr, sg_page(rd), rd->length, rd->offset); + rd = sg_next(rd); + wr = sg_next(wr); + } + + attach->dma_dir = DMA_NONE; + dbuf_attach->priv = attach; + + return 0; +} + +static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *db_attach) +{ + struct vb2_dma_sg_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + if (!attach) + return; + + sgt = &attach->sgt; + + /* release the scatterlist cache */ + if (attach->dma_dir != DMA_NONE) + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); + sg_free_table(sgt); + kfree(attach); + db_attach->priv = NULL; +} + +static struct sg_table *vb2_dma_sg_dmabuf_ops_map( + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) +{ + struct vb2_dma_sg_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + sgt = &attach->sgt; + /* return previously mapped sg table */ + if (attach->dma_dir == dma_dir) + return sgt; + + /* release any previous cache */ + if (attach->dma_dir != DMA_NONE) { + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); + attach->dma_dir = DMA_NONE; + } + + /* mapping to the client with new direction */ + if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { + pr_err("failed to map scatterlist\n"); + return ERR_PTR(-EIO); + } + + attach->dma_dir = dma_dir; + + return sgt; +} + +static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, + struct sg_table *sgt, enum dma_data_direction dma_dir) +{ + /* nothing to be done here */ +} + +static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) +{ + /* drop reference obtained in vb2_dma_sg_get_dmabuf */ + vb2_dma_sg_put(dbuf->priv); +} + +static int +vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, + enum dma_data_direction direction) +{ + struct vb2_dma_sg_buf *buf = dbuf->priv; + struct sg_table *sgt = buf->dma_sgt; + + dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); + return 0; +} + +static int +vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, + enum dma_data_direction direction) +{ + struct vb2_dma_sg_buf *buf = dbuf->priv; + struct sg_table *sgt = buf->dma_sgt; + + dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); + return 0; +} + +static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, + struct iosys_map *map) +{ + struct vb2_dma_sg_buf *buf; + void *vaddr; + + buf = dbuf->priv; + vaddr = vb2_dma_sg_vaddr(buf->vb, buf); + if (!vaddr) + return -EINVAL; + + iosys_map_set_vaddr(map, vaddr); + + return 0; +} + +static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, + struct vm_area_struct *vma) +{ + return vb2_dma_sg_mmap(dbuf->priv, vma); +} + +static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { + .attach = vb2_dma_sg_dmabuf_ops_attach, + .detach = vb2_dma_sg_dmabuf_ops_detach, + .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, + .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, + .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access, + .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access, + .vmap = vb2_dma_sg_dmabuf_ops_vmap, + .mmap = vb2_dma_sg_dmabuf_ops_mmap, + .release = vb2_dma_sg_dmabuf_ops_release, +}; + +static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb, + void *buf_priv, + unsigned long flags) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + struct dma_buf *dbuf; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &vb2_dma_sg_dmabuf_ops; + exp_info.size = buf->size; + exp_info.flags = flags; + exp_info.priv = buf; + + if (WARN_ON(!buf->dma_sgt)) + return NULL; + + dbuf = dma_buf_export(&exp_info); + if (IS_ERR(dbuf)) + return NULL; + + /* dmabuf keeps reference to vb2 buffer */ + refcount_inc(&buf->refcount); + + return dbuf; +} + +/*********************************************/ +/* callbacks for DMABUF buffers */ +/*********************************************/ + +static int vb2_dma_sg_map_dmabuf(void *mem_priv) +{ + struct vb2_dma_sg_buf *buf = mem_priv; + struct sg_table *sgt; + + if (WARN_ON(!buf->db_attach)) { + pr_err("trying to pin a non attached buffer\n"); + return -EINVAL; + } + + if (WARN_ON(buf->dma_sgt)) { + pr_err("dmabuf buffer is already pinned\n"); + return 0; + } + + /* get the associated scatterlist for this buffer */ + sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir); + if (IS_ERR(sgt)) { + pr_err("Error getting dmabuf scatterlist\n"); + return -EINVAL; + } + + buf->dma_sgt = sgt; + buf->vaddr = NULL; + + return 0; +} + +static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) +{ + struct vb2_dma_sg_buf *buf = mem_priv; + struct sg_table *sgt = buf->dma_sgt; + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); + + if (WARN_ON(!buf->db_attach)) { + pr_err("trying to unpin a not attached buffer\n"); + return; + } + + if (WARN_ON(!sgt)) { + pr_err("dmabuf buffer is already unpinned\n"); + return; + } + + if (buf->vaddr) { + dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map); + buf->vaddr = NULL; + } + dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir); + + buf->dma_sgt = NULL; +} + +static void vb2_dma_sg_detach_dmabuf(void *mem_priv) +{ + struct vb2_dma_sg_buf *buf = mem_priv; + + /* if vb2 works correctly you should never detach mapped buffer */ + if (WARN_ON(buf->dma_sgt)) + vb2_dma_sg_unmap_dmabuf(buf); + + /* detach this attachment */ + dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); + kfree(buf); +} + +static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev, + struct dma_buf *dbuf, unsigned long size) +{ + struct vb2_dma_sg_buf *buf; + struct dma_buf_attachment *dba; + + if (WARN_ON(!dev)) + return ERR_PTR(-EINVAL); + + if (dbuf->size < size) + return ERR_PTR(-EFAULT); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->dev = dev; + /* create attachment for the dmabuf with the user device */ + dba = dma_buf_attach(dbuf, buf->dev); + if (IS_ERR(dba)) { + pr_err("failed to attach dmabuf\n"); + kfree(buf); + return dba; + } + + buf->dma_dir = vb->vb2_queue->dma_dir; + buf->size = size; + buf->db_attach = dba; + buf->vb = vb; + + return buf; +} + +static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv) +{ + struct vb2_dma_sg_buf *buf = buf_priv; + + return buf->dma_sgt; +} + +const struct vb2_mem_ops vb2_dma_sg_memops = { + .alloc = vb2_dma_sg_alloc, + .put = vb2_dma_sg_put, + .get_userptr = vb2_dma_sg_get_userptr, + .put_userptr = vb2_dma_sg_put_userptr, + .prepare = vb2_dma_sg_prepare, + .finish = vb2_dma_sg_finish, + .vaddr = vb2_dma_sg_vaddr, + .mmap = vb2_dma_sg_mmap, + .num_users = vb2_dma_sg_num_users, + .get_dmabuf = vb2_dma_sg_get_dmabuf, + .map_dmabuf = vb2_dma_sg_map_dmabuf, + .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, + .attach_dmabuf = vb2_dma_sg_attach_dmabuf, + .detach_dmabuf = vb2_dma_sg_detach_dmabuf, + .cookie = vb2_dma_sg_cookie, +}; +EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); + +MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); +MODULE_AUTHOR("Andrzej Pietrasiewicz"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(DMA_BUF); diff --git a/drivers/media/common/videobuf2/videobuf2-dvb.c b/drivers/media/common/videobuf2/videobuf2-dvb.c new file mode 100644 index 0000000000..8c15bcd07e --- /dev/null +++ b/drivers/media/common/videobuf2/videobuf2-dvb.c @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * + * some helper function for simple DVB cards which simply DMA the + * complete transport stream and let the computer sort everything else + * (i.e. we are using the software demux, ...). Also uses vb2 + * to manage DMA buffers. + * + * (c) 2004 Gerd Knorr [SUSE Labs] + */ + +#include +#include +#include +#include + +#include + +/* ------------------------------------------------------------------ */ + +MODULE_AUTHOR("Gerd Knorr [SuSE Labs]"); +MODULE_LICENSE("GPL"); + +/* ------------------------------------------------------------------ */ + +static int dvb_fnc(struct vb2_buffer *vb, void *priv) +{ + struct vb2_dvb *dvb = priv; + + dvb_dmx_swfilter(&dvb->demux, vb2_plane_vaddr(vb, 0), + vb2_get_plane_payload(vb, 0)); + return 0; +} + +static int vb2_dvb_start_feed(struct dvb_demux_feed *feed) +{ + struct dvb_demux *demux = feed->demux; + struct vb2_dvb *dvb = demux->priv; + int rc = 0; + + if (!demux->dmx.frontend) + return -EINVAL; + + mutex_lock(&dvb->lock); + dvb->nfeeds++; + + if (!dvb->dvbq.threadio) { + rc = vb2_thread_start(&dvb->dvbq, dvb_fnc, dvb, dvb->name); + if (rc) + dvb->nfeeds--; + } + if (!rc) + rc = dvb->nfeeds; + mutex_unlock(&dvb->lock); + return rc; +} + +static int vb2_dvb_stop_feed(struct dvb_demux_feed *feed) +{ + struct dvb_demux *demux = feed->demux; + struct vb2_dvb *dvb = demux->priv; + int err = 0; + + mutex_lock(&dvb->lock); + dvb->nfeeds--; + if (0 == dvb->nfeeds) + err = vb2_thread_stop(&dvb->dvbq); + mutex_unlock(&dvb->lock); + return err; +} + +static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe, + struct module *module, + void *adapter_priv, + struct device *device, + struct media_device *mdev, + char *adapter_name, + short *adapter_nr, + int mfe_shared) +{ + int result; + + mutex_init(&fe->lock); + + /* register adapter */ + result = dvb_register_adapter(&fe->adapter, adapter_name, module, + device, adapter_nr); + if (result < 0) { + pr_warn("%s: dvb_register_adapter failed (errno = %d)\n", + adapter_name, result); + } + fe->adapter.priv = adapter_priv; + fe->adapter.mfe_shared = mfe_shared; +#ifdef CONFIG_MEDIA_CONTROLLER_DVB + if (mdev) + fe->adapter.mdev = mdev; +#endif + return result; +} + +static int vb2_dvb_register_frontend(struct dvb_adapter *adapter, + struct vb2_dvb *dvb) +{ + int result; + + /* register frontend */ + result = dvb_register_frontend(adapter, dvb->frontend); + if (result < 0) { + pr_warn("%s: dvb_register_frontend failed (errno = %d)\n", + dvb->name, result); + goto fail_frontend; + } + + /* register demux stuff */ + dvb->demux.dmx.capabilities = + DMX_TS_FILTERING | DMX_SECTION_FILTERING | + DMX_MEMORY_BASED_FILTERING; + dvb->demux.priv = dvb; + dvb->demux.filternum = 256; + dvb->demux.feednum = 256; + dvb->demux.start_feed = vb2_dvb_start_feed; + dvb->demux.stop_feed = vb2_dvb_stop_feed; + result = dvb_dmx_init(&dvb->demux); + if (result < 0) { + pr_warn("%s: dvb_dmx_init failed (errno = %d)\n", + dvb->name, result); + goto fail_dmx; + } + + dvb->dmxdev.filternum = 256; + dvb->dmxdev.demux = &dvb->demux.dmx; + dvb->dmxdev.capabilities = 0; + result = dvb_dmxdev_init(&dvb->dmxdev, adapter); + + if (result < 0) { + pr_warn("%s: dvb_dmxdev_init failed (errno = %d)\n", + dvb->name, result); + goto fail_dmxdev; + } + + dvb->fe_hw.source = DMX_FRONTEND_0; + result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw); + if (result < 0) { + pr_warn("%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n", + dvb->name, result); + goto fail_fe_hw; + } + + dvb->fe_mem.source = DMX_MEMORY_FE; + result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem); + if (result < 0) { + pr_warn("%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n", + dvb->name, result); + goto fail_fe_mem; + } + + result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw); + if (result < 0) { + pr_warn("%s: connect_frontend failed (errno = %d)\n", + dvb->name, result); + goto fail_fe_conn; + } + + /* register network adapter */ + result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx); + if (result < 0) { + pr_warn("%s: dvb_net_init failed (errno = %d)\n", + dvb->name, result); + goto fail_fe_conn; + } + return 0; + +fail_fe_conn: + dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem); +fail_fe_mem: + dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw); +fail_fe_hw: + dvb_dmxdev_release(&dvb->dmxdev); +fail_dmxdev: + dvb_dmx_release(&dvb->demux); +fail_dmx: + dvb_unregister_frontend(dvb->frontend); +fail_frontend: + dvb_frontend_detach(dvb->frontend); + dvb->frontend = NULL; + + return result; +} + +/* ------------------------------------------------------------------ */ +/* Register a single adapter and one or more frontends */ +int vb2_dvb_register_bus(struct vb2_dvb_frontends *f, + struct module *module, + void *adapter_priv, + struct device *device, + struct media_device *mdev, + short *adapter_nr, + int mfe_shared) +{ + struct list_head *list, *q; + struct vb2_dvb_frontend *fe; + int res; + + fe = vb2_dvb_get_frontend(f, 1); + if (!fe) { + pr_warn("Unable to register the adapter which has no frontends\n"); + return -EINVAL; + } + + /* Bring up the adapter */ + res = vb2_dvb_register_adapter(f, module, adapter_priv, device, mdev, + fe->dvb.name, adapter_nr, mfe_shared); + if (res < 0) { + pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res); + return res; + } + + /* Attach all of the frontends to the adapter */ + mutex_lock(&f->lock); + list_for_each_safe(list, q, &f->felist) { + fe = list_entry(list, struct vb2_dvb_frontend, felist); + res = vb2_dvb_register_frontend(&f->adapter, &fe->dvb); + if (res < 0) { + pr_warn("%s: vb2_dvb_register_frontend failed (errno = %d)\n", + fe->dvb.name, res); + goto err; + } + res = dvb_create_media_graph(&f->adapter, false); + if (res < 0) + goto err; + } + + mutex_unlock(&f->lock); + return 0; + +err: + mutex_unlock(&f->lock); + vb2_dvb_unregister_bus(f); + return res; +} +EXPORT_SYMBOL(vb2_dvb_register_bus); + +void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f) +{ + vb2_dvb_dealloc_frontends(f); + + dvb_unregister_adapter(&f->adapter); +} +EXPORT_SYMBOL(vb2_dvb_unregister_bus); + +struct vb2_dvb_frontend *vb2_dvb_get_frontend( + struct vb2_dvb_frontends *f, int id) +{ + struct list_head *list, *q; + struct vb2_dvb_frontend *fe, *ret = NULL; + + mutex_lock(&f->lock); + + list_for_each_safe(list, q, &f->felist) { + fe = list_entry(list, struct vb2_dvb_frontend, felist); + if (fe->id == id) { + ret = fe; + break; + } + } + + mutex_unlock(&f->lock); + + return ret; +} +EXPORT_SYMBOL(vb2_dvb_get_frontend); + +int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f, + struct dvb_frontend *p) +{ + struct list_head *list, *q; + struct vb2_dvb_frontend *fe = NULL; + int ret = 0; + + mutex_lock(&f->lock); + + list_for_each_safe(list, q, &f->felist) { + fe = list_entry(list, struct vb2_dvb_frontend, felist); + if (fe->dvb.frontend == p) { + ret = fe->id; + break; + } + } + + mutex_unlock(&f->lock); + + return ret; +} +EXPORT_SYMBOL(vb2_dvb_find_frontend); + +struct vb2_dvb_frontend *vb2_dvb_alloc_frontend( + struct vb2_dvb_frontends *f, int id) +{ + struct vb2_dvb_frontend *fe; + + fe = kzalloc(sizeof(struct vb2_dvb_frontend), GFP_KERNEL); + if (fe == NULL) + return NULL; + + fe->id = id; + mutex_init(&fe->dvb.lock); + + mutex_lock(&f->lock); + list_add_tail(&fe->felist, &f->felist); + mutex_unlock(&f->lock); + return fe; +} +EXPORT_SYMBOL(vb2_dvb_alloc_frontend); + +void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f) +{ + struct list_head *list, *q; + struct vb2_dvb_frontend *fe; + + mutex_lock(&f->lock); + list_for_each_safe(list, q, &f->felist) { + fe = list_entry(list, struct vb2_dvb_frontend, felist); + if (fe->dvb.net.dvbdev) { + dvb_net_release(&fe->dvb.net); + fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx, + &fe->dvb.fe_mem); + fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx, + &fe->dvb.fe_hw); + dvb_dmxdev_release(&fe->dvb.dmxdev); + dvb_dmx_release(&fe->dvb.demux); + dvb_unregister_frontend(fe->dvb.frontend); + } + if (fe->dvb.frontend) + /* always allocated, may have been reset */ + dvb_frontend_detach(fe->dvb.frontend); + list_del(list); /* remove list entry */ + kfree(fe); /* free frontend allocation */ + } + mutex_unlock(&f->lock); +} +EXPORT_SYMBOL(vb2_dvb_dealloc_frontends); diff --git a/drivers/media/common/videobuf2/videobuf2-memops.c b/drivers/media/common/videobuf2/videobuf2-memops.c new file mode 100644 index 0000000000..f9a4ec4442 --- /dev/null +++ b/drivers/media/common/videobuf2/videobuf2-memops.c @@ -0,0 +1,131 @@ +/* + * videobuf2-memops.c - generic memory handling routines for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * Marek Szyprowski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/** + * vb2_create_framevec() - map virtual addresses to pfns + * @start: Virtual user address where we start mapping + * @length: Length of a range to map + * @write: Should we map for writing into the area + * + * This function allocates and fills in a vector with pfns corresponding to + * virtual address range passed in arguments. If pfns have corresponding pages, + * page references are also grabbed to pin pages in memory. The function + * returns pointer to the vector on success and error pointer in case of + * failure. Returned vector needs to be freed via vb2_destroy_pfnvec(). + */ +struct frame_vector *vb2_create_framevec(unsigned long start, + unsigned long length, + bool write) +{ + int ret; + unsigned long first, last; + unsigned long nr; + struct frame_vector *vec; + + first = start >> PAGE_SHIFT; + last = (start + length - 1) >> PAGE_SHIFT; + nr = last - first + 1; + vec = frame_vector_create(nr); + if (!vec) + return ERR_PTR(-ENOMEM); + ret = get_vaddr_frames(start & PAGE_MASK, nr, write, vec); + if (ret < 0) + goto out_destroy; + /* We accept only complete set of PFNs */ + if (ret != nr) { + ret = -EFAULT; + goto out_release; + } + return vec; +out_release: + put_vaddr_frames(vec); +out_destroy: + frame_vector_destroy(vec); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(vb2_create_framevec); + +/** + * vb2_destroy_framevec() - release vector of mapped pfns + * @vec: vector of pfns / pages to release + * + * This releases references to all pages in the vector @vec (if corresponding + * pfns are backed by pages) and frees the passed vector. + */ +void vb2_destroy_framevec(struct frame_vector *vec) +{ + put_vaddr_frames(vec); + frame_vector_destroy(vec); +} +EXPORT_SYMBOL(vb2_destroy_framevec); + +/** + * vb2_common_vm_open() - increase refcount of the vma + * @vma: virtual memory region for the mapping + * + * This function adds another user to the provided vma. It expects + * struct vb2_vmarea_handler pointer in vma->vm_private_data. + */ +static void vb2_common_vm_open(struct vm_area_struct *vma) +{ + struct vb2_vmarea_handler *h = vma->vm_private_data; + + pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", + __func__, h, refcount_read(h->refcount), vma->vm_start, + vma->vm_end); + + refcount_inc(h->refcount); +} + +/** + * vb2_common_vm_close() - decrease refcount of the vma + * @vma: virtual memory region for the mapping + * + * This function releases the user from the provided vma. It expects + * struct vb2_vmarea_handler pointer in vma->vm_private_data. + */ +static void vb2_common_vm_close(struct vm_area_struct *vma) +{ + struct vb2_vmarea_handler *h = vma->vm_private_data; + + pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", + __func__, h, refcount_read(h->refcount), vma->vm_start, + vma->vm_end); + + h->put(h->arg); +} + +/* + * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmapped + * video buffers + */ +const struct vm_operations_struct vb2_common_vm_ops = { + .open = vb2_common_vm_open, + .close = vb2_common_vm_close, +}; +EXPORT_SYMBOL_GPL(vb2_common_vm_ops); + +MODULE_DESCRIPTION("common memory handling routines for videobuf2"); +MODULE_AUTHOR("Pawel Osciak "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c new file mode 100644 index 0000000000..c7a54d82a5 --- /dev/null +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -0,0 +1,1332 @@ +/* + * videobuf2-v4l2.c - V4L2 driver helper framework + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * Marek Szyprowski + * + * The vb2_thread implementation was based on code from videobuf-dvb.c: + * (c) 2004 Gerd Knorr [SUSE Labs] + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +static int debug; +module_param(debug, int, 0644); + +#define dprintk(q, level, fmt, arg...) \ + do { \ + if (debug >= level) \ + pr_info("vb2-v4l2: [%p] %s: " fmt, \ + (q)->name, __func__, ## arg); \ + } while (0) + +/* Flags that are set by us */ +#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ + V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \ + V4L2_BUF_FLAG_PREPARED | \ + V4L2_BUF_FLAG_IN_REQUEST | \ + V4L2_BUF_FLAG_REQUEST_FD | \ + V4L2_BUF_FLAG_TIMESTAMP_MASK) +/* Output buffer flags that should be passed on to the driver */ +#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \ + V4L2_BUF_FLAG_BFRAME | \ + V4L2_BUF_FLAG_KEYFRAME | \ + V4L2_BUF_FLAG_TIMECODE | \ + V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF) + +/* + * __verify_planes_array() - verify that the planes array passed in struct + * v4l2_buffer from userspace can be safely used + */ +static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b) +{ + if (!V4L2_TYPE_IS_MULTIPLANAR(b->type)) + return 0; + + /* Is memory for copying plane information present? */ + if (b->m.planes == NULL) { + dprintk(vb->vb2_queue, 1, + "multi-planar buffer passed but planes array not provided\n"); + return -EINVAL; + } + + if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) { + dprintk(vb->vb2_queue, 1, + "incorrect planes array length, expected %d, got %d\n", + vb->num_planes, b->length); + return -EINVAL; + } + + return 0; +} + +static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb) +{ + return __verify_planes_array(vb, pb); +} + +/* + * __verify_length() - Verify that the bytesused value for each plane fits in + * the plane length and that the data offset doesn't exceed the bytesused value. + */ +static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) +{ + unsigned int length; + unsigned int bytesused; + unsigned int plane; + + if (V4L2_TYPE_IS_CAPTURE(b->type)) + return 0; + + if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { + for (plane = 0; plane < vb->num_planes; ++plane) { + length = (b->memory == VB2_MEMORY_USERPTR || + b->memory == VB2_MEMORY_DMABUF) + ? b->m.planes[plane].length + : vb->planes[plane].length; + bytesused = b->m.planes[plane].bytesused + ? b->m.planes[plane].bytesused : length; + + if (b->m.planes[plane].bytesused > length) + return -EINVAL; + + if (b->m.planes[plane].data_offset > 0 && + b->m.planes[plane].data_offset >= bytesused) + return -EINVAL; + } + } else { + length = (b->memory == VB2_MEMORY_USERPTR) + ? b->length : vb->planes[0].length; + + if (b->bytesused > length) + return -EINVAL; + } + + return 0; +} + +/* + * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct + */ +static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + + vbuf->request_fd = -1; +} + +static void __copy_timestamp(struct vb2_buffer *vb, const void *pb) +{ + const struct v4l2_buffer *b = pb; + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vb2_queue *q = vb->vb2_queue; + + if (q->is_output) { + /* + * For output buffers copy the timestamp if needed, + * and the timecode field and flag if needed. + */ + if (q->copy_timestamp) + vb->timestamp = v4l2_buffer_get_timestamp(b); + vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE; + if (b->flags & V4L2_BUF_FLAG_TIMECODE) + vbuf->timecode = b->timecode; + } +}; + +static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) +{ + static bool check_once; + + if (check_once) + return; + + check_once = true; + + pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n"); + if (vb->vb2_queue->allow_zero_bytesused) + pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); + else + pr_warn("use the actual size instead.\n"); +} + +static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) +{ + struct vb2_queue *q = vb->vb2_queue; + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vb2_plane *planes = vbuf->planes; + unsigned int plane; + int ret; + + ret = __verify_length(vb, b); + if (ret < 0) { + dprintk(q, 1, "plane parameters verification failed: %d\n", ret); + return ret; + } + if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) { + /* + * If the format's field is ALTERNATE, then the buffer's field + * should be either TOP or BOTTOM, not ALTERNATE since that + * makes no sense. The driver has to know whether the + * buffer represents a top or a bottom field in order to + * program any DMA correctly. Using ALTERNATE is wrong, since + * that just says that it is either a top or a bottom field, + * but not which of the two it is. + */ + dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); + return -EINVAL; + } + vbuf->sequence = 0; + vbuf->request_fd = -1; + vbuf->is_held = false; + + if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { + switch (b->memory) { + case VB2_MEMORY_USERPTR: + for (plane = 0; plane < vb->num_planes; ++plane) { + planes[plane].m.userptr = + b->m.planes[plane].m.userptr; + planes[plane].length = + b->m.planes[plane].length; + } + break; + case VB2_MEMORY_DMABUF: + for (plane = 0; plane < vb->num_planes; ++plane) { + planes[plane].m.fd = + b->m.planes[plane].m.fd; + planes[plane].length = + b->m.planes[plane].length; + } + break; + default: + for (plane = 0; plane < vb->num_planes; ++plane) { + planes[plane].m.offset = + vb->planes[plane].m.offset; + planes[plane].length = + vb->planes[plane].length; + } + break; + } + + /* Fill in driver-provided information for OUTPUT types */ + if (V4L2_TYPE_IS_OUTPUT(b->type)) { + /* + * Will have to go up to b->length when API starts + * accepting variable number of planes. + * + * If bytesused == 0 for the output buffer, then fall + * back to the full buffer size. In that case + * userspace clearly never bothered to set it and + * it's a safe assumption that they really meant to + * use the full plane sizes. + * + * Some drivers, e.g. old codec drivers, use bytesused == 0 + * as a way to indicate that streaming is finished. + * In that case, the driver should use the + * allow_zero_bytesused flag to keep old userspace + * applications working. + */ + for (plane = 0; plane < vb->num_planes; ++plane) { + struct vb2_plane *pdst = &planes[plane]; + struct v4l2_plane *psrc = &b->m.planes[plane]; + + if (psrc->bytesused == 0) + vb2_warn_zero_bytesused(vb); + + if (vb->vb2_queue->allow_zero_bytesused) + pdst->bytesused = psrc->bytesused; + else + pdst->bytesused = psrc->bytesused ? + psrc->bytesused : pdst->length; + pdst->data_offset = psrc->data_offset; + } + } + } else { + /* + * Single-planar buffers do not use planes array, + * so fill in relevant v4l2_buffer struct fields instead. + * In vb2 we use our internal V4l2_planes struct for + * single-planar buffers as well, for simplicity. + * + * If bytesused == 0 for the output buffer, then fall back + * to the full buffer size as that's a sensible default. + * + * Some drivers, e.g. old codec drivers, use bytesused == 0 as + * a way to indicate that streaming is finished. In that case, + * the driver should use the allow_zero_bytesused flag to keep + * old userspace applications working. + */ + switch (b->memory) { + case VB2_MEMORY_USERPTR: + planes[0].m.userptr = b->m.userptr; + planes[0].length = b->length; + break; + case VB2_MEMORY_DMABUF: + planes[0].m.fd = b->m.fd; + planes[0].length = b->length; + break; + default: + planes[0].m.offset = vb->planes[0].m.offset; + planes[0].length = vb->planes[0].length; + break; + } + + planes[0].data_offset = 0; + if (V4L2_TYPE_IS_OUTPUT(b->type)) { + if (b->bytesused == 0) + vb2_warn_zero_bytesused(vb); + + if (vb->vb2_queue->allow_zero_bytesused) + planes[0].bytesused = b->bytesused; + else + planes[0].bytesused = b->bytesused ? + b->bytesused : planes[0].length; + } else + planes[0].bytesused = 0; + + } + + /* Zero flags that we handle */ + vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; + if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) { + /* + * Non-COPY timestamps and non-OUTPUT queues will get + * their timestamp and timestamp source flags from the + * queue. + */ + vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; + } + + if (V4L2_TYPE_IS_OUTPUT(b->type)) { + /* + * For output buffers mask out the timecode flag: + * this will be handled later in vb2_qbuf(). + * The 'field' is valid metadata for this output buffer + * and so that needs to be copied here. + */ + vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE; + vbuf->field = b->field; + if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) + vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; + } else { + /* Zero any output buffer flags as this is a capture buffer */ + vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS; + /* Zero last flag, this is a signal from driver to userspace */ + vbuf->flags &= ~V4L2_BUF_FLAG_LAST; + } + + return 0; +} + +static void set_buffer_cache_hints(struct vb2_queue *q, + struct vb2_buffer *vb, + struct v4l2_buffer *b) +{ + if (!vb2_queue_allows_cache_hints(q)) { + /* + * Clear buffer cache flags if queue does not support user + * space hints. That's to indicate to userspace that these + * flags won't work. + */ + b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE; + b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN; + return; + } + + if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE) + vb->skip_cache_sync_on_finish = 1; + + if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN) + vb->skip_cache_sync_on_prepare = 1; +} + +static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b, bool is_prepare, + struct media_request **p_req) +{ + const char *opname = is_prepare ? "prepare_buf" : "qbuf"; + struct media_request *req; + struct vb2_v4l2_buffer *vbuf; + struct vb2_buffer *vb; + int ret; + + if (b->type != q->type) { + dprintk(q, 1, "%s: invalid buffer type\n", opname); + return -EINVAL; + } + + if (b->index >= q->num_buffers) { + dprintk(q, 1, "%s: buffer index out of range\n", opname); + return -EINVAL; + } + + if (q->bufs[b->index] == NULL) { + /* Should never happen */ + dprintk(q, 1, "%s: buffer is NULL\n", opname); + return -EINVAL; + } + + if (b->memory != q->memory) { + dprintk(q, 1, "%s: invalid memory type\n", opname); + return -EINVAL; + } + + vb = q->bufs[b->index]; + vbuf = to_vb2_v4l2_buffer(vb); + ret = __verify_planes_array(vb, b); + if (ret) + return ret; + + if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) && + vb->state != VB2_BUF_STATE_DEQUEUED) { + dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname); + return -EINVAL; + } + + if (!vb->prepared) { + set_buffer_cache_hints(q, vb, b); + /* Copy relevant information provided by the userspace */ + memset(vbuf->planes, 0, + sizeof(vbuf->planes[0]) * vb->num_planes); + ret = vb2_fill_vb2_v4l2_buffer(vb, b); + if (ret) + return ret; + } + + if (is_prepare) + return 0; + + if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) { + if (q->requires_requests) { + dprintk(q, 1, "%s: queue requires requests\n", opname); + return -EBADR; + } + if (q->uses_requests) { + dprintk(q, 1, "%s: queue uses requests\n", opname); + return -EBUSY; + } + return 0; + } else if (!q->supports_requests) { + dprintk(q, 1, "%s: queue does not support requests\n", opname); + return -EBADR; + } else if (q->uses_qbuf) { + dprintk(q, 1, "%s: queue does not use requests\n", opname); + return -EBUSY; + } + + /* + * For proper locking when queueing a request you need to be able + * to lock access to the vb2 queue, so check that there is a lock + * that we can use. In addition p_req must be non-NULL. + */ + if (WARN_ON(!q->lock || !p_req)) + return -EINVAL; + + /* + * Make sure this op is implemented by the driver. It's easy to forget + * this callback, but is it important when canceling a buffer in a + * queued request. + */ + if (WARN_ON(!q->ops->buf_request_complete)) + return -EINVAL; + /* + * Make sure this op is implemented by the driver for the output queue. + * It's easy to forget this callback, but is it important to correctly + * validate the 'field' value at QBUF time. + */ + if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || + q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) && + !q->ops->buf_out_validate)) + return -EINVAL; + + req = media_request_get_by_fd(mdev, b->request_fd); + if (IS_ERR(req)) { + dprintk(q, 1, "%s: invalid request_fd\n", opname); + return PTR_ERR(req); + } + + /* + * Early sanity check. This is checked again when the buffer + * is bound to the request in vb2_core_qbuf(). + */ + if (req->state != MEDIA_REQUEST_STATE_IDLE && + req->state != MEDIA_REQUEST_STATE_UPDATING) { + dprintk(q, 1, "%s: request is not idle\n", opname); + media_request_put(req); + return -EBUSY; + } + + *p_req = req; + vbuf->request_fd = b->request_fd; + + return 0; +} + +/* + * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be + * returned to userspace + */ +static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) +{ + struct v4l2_buffer *b = pb; + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vb2_queue *q = vb->vb2_queue; + unsigned int plane; + + /* Copy back data such as timestamp, flags, etc. */ + b->index = vb->index; + b->type = vb->type; + b->memory = vb->memory; + b->bytesused = 0; + + b->flags = vbuf->flags; + b->field = vbuf->field; + v4l2_buffer_set_timestamp(b, vb->timestamp); + b->timecode = vbuf->timecode; + b->sequence = vbuf->sequence; + b->reserved2 = 0; + b->request_fd = 0; + + if (q->is_multiplanar) { + /* + * Fill in plane-related data if userspace provided an array + * for it. The caller has already verified memory and size. + */ + b->length = vb->num_planes; + for (plane = 0; plane < vb->num_planes; ++plane) { + struct v4l2_plane *pdst = &b->m.planes[plane]; + struct vb2_plane *psrc = &vb->planes[plane]; + + pdst->bytesused = psrc->bytesused; + pdst->length = psrc->length; + if (q->memory == VB2_MEMORY_MMAP) + pdst->m.mem_offset = psrc->m.offset; + else if (q->memory == VB2_MEMORY_USERPTR) + pdst->m.userptr = psrc->m.userptr; + else if (q->memory == VB2_MEMORY_DMABUF) + pdst->m.fd = psrc->m.fd; + pdst->data_offset = psrc->data_offset; + memset(pdst->reserved, 0, sizeof(pdst->reserved)); + } + } else { + /* + * We use length and offset in v4l2_planes array even for + * single-planar buffers, but userspace does not. + */ + b->length = vb->planes[0].length; + b->bytesused = vb->planes[0].bytesused; + if (q->memory == VB2_MEMORY_MMAP) + b->m.offset = vb->planes[0].m.offset; + else if (q->memory == VB2_MEMORY_USERPTR) + b->m.userptr = vb->planes[0].m.userptr; + else if (q->memory == VB2_MEMORY_DMABUF) + b->m.fd = vb->planes[0].m.fd; + } + + /* + * Clear any buffer state related flags. + */ + b->flags &= ~V4L2_BUFFER_MASK_FLAGS; + b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK; + if (!q->copy_timestamp) { + /* + * For non-COPY timestamps, drop timestamp source bits + * and obtain the timestamp source from the queue. + */ + b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; + b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; + } + + switch (vb->state) { + case VB2_BUF_STATE_QUEUED: + case VB2_BUF_STATE_ACTIVE: + b->flags |= V4L2_BUF_FLAG_QUEUED; + break; + case VB2_BUF_STATE_IN_REQUEST: + b->flags |= V4L2_BUF_FLAG_IN_REQUEST; + break; + case VB2_BUF_STATE_ERROR: + b->flags |= V4L2_BUF_FLAG_ERROR; + fallthrough; + case VB2_BUF_STATE_DONE: + b->flags |= V4L2_BUF_FLAG_DONE; + break; + case VB2_BUF_STATE_PREPARING: + case VB2_BUF_STATE_DEQUEUED: + /* nothing */ + break; + } + + if ((vb->state == VB2_BUF_STATE_DEQUEUED || + vb->state == VB2_BUF_STATE_IN_REQUEST) && + vb->synced && vb->prepared) + b->flags |= V4L2_BUF_FLAG_PREPARED; + + if (vb2_buffer_in_use(q, vb)) + b->flags |= V4L2_BUF_FLAG_MAPPED; + if (vbuf->request_fd >= 0) { + b->flags |= V4L2_BUF_FLAG_REQUEST_FD; + b->request_fd = vbuf->request_fd; + } +} + +/* + * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a + * v4l2_buffer by the userspace. It also verifies that struct + * v4l2_buffer has a valid number of planes. + */ +static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + unsigned int plane; + + if (!vb->vb2_queue->copy_timestamp) + vb->timestamp = 0; + + for (plane = 0; plane < vb->num_planes; ++plane) { + if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) { + planes[plane].m = vbuf->planes[plane].m; + planes[plane].length = vbuf->planes[plane].length; + } + planes[plane].bytesused = vbuf->planes[plane].bytesused; + planes[plane].data_offset = vbuf->planes[plane].data_offset; + } + return 0; +} + +static const struct vb2_buf_ops v4l2_buf_ops = { + .verify_planes_array = __verify_planes_array_core, + .init_buffer = __init_vb2_v4l2_buffer, + .fill_user_buffer = __fill_v4l2_buffer, + .fill_vb2_buffer = __fill_vb2_buffer, + .copy_timestamp = __copy_timestamp, +}; + +struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp) +{ + unsigned int i; + + for (i = 0; i < q->num_buffers; i++) + if (q->bufs[i]->copied_timestamp && + q->bufs[i]->timestamp == timestamp) + return vb2_get_buffer(q, i); + return NULL; +} +EXPORT_SYMBOL_GPL(vb2_find_buffer); + +/* + * vb2_querybuf() - query video buffer information + * @q: vb2 queue + * @b: buffer struct passed from userspace to vidioc_querybuf handler + * in driver + * + * Should be called from vidioc_querybuf ioctl handler in driver. + * This function will verify the passed v4l2_buffer structure and fill the + * relevant information for the userspace. + * + * The return values from this function are intended to be directly returned + * from vidioc_querybuf handler in driver. + */ +int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) +{ + struct vb2_buffer *vb; + int ret; + + if (b->type != q->type) { + dprintk(q, 1, "wrong buffer type\n"); + return -EINVAL; + } + + if (b->index >= q->num_buffers) { + dprintk(q, 1, "buffer index out of range\n"); + return -EINVAL; + } + vb = q->bufs[b->index]; + ret = __verify_planes_array(vb, b); + if (!ret) + vb2_core_querybuf(q, b->index, b); + return ret; +} +EXPORT_SYMBOL(vb2_querybuf); + +static void fill_buf_caps(struct vb2_queue *q, u32 *caps) +{ + *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS; + if (q->io_modes & VB2_MMAP) + *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP; + if (q->io_modes & VB2_USERPTR) + *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR; + if (q->io_modes & VB2_DMABUF) + *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF; + if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF) + *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF; + if (q->allow_cache_hints && q->io_modes & VB2_MMAP) + *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS; +#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API + if (q->supports_requests) + *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS; +#endif +} + +static void validate_memory_flags(struct vb2_queue *q, + int memory, + u32 *flags) +{ + if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) { + /* + * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only, + * but in order to avoid bugs we zero out all bits. + */ + *flags = 0; + } else { + /* Clear all unknown flags. */ + *flags &= V4L2_MEMORY_FLAG_NON_COHERENT; + } +} + +int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) +{ + int ret = vb2_verify_memory_type(q, req->memory, req->type); + u32 flags = req->flags; + + fill_buf_caps(q, &req->capabilities); + validate_memory_flags(q, req->memory, &flags); + req->flags = flags; + return ret ? ret : vb2_core_reqbufs(q, req->memory, + req->flags, &req->count); +} +EXPORT_SYMBOL_GPL(vb2_reqbufs); + +int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b) +{ + int ret; + + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "file io in progress\n"); + return -EBUSY; + } + + if (b->flags & V4L2_BUF_FLAG_REQUEST_FD) + return -EINVAL; + + ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL); + + return ret ? ret : vb2_core_prepare_buf(q, b->index, b); +} +EXPORT_SYMBOL_GPL(vb2_prepare_buf); + +int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) +{ + unsigned requested_planes = 1; + unsigned requested_sizes[VIDEO_MAX_PLANES]; + struct v4l2_format *f = &create->format; + int ret = vb2_verify_memory_type(q, create->memory, f->type); + unsigned i; + + fill_buf_caps(q, &create->capabilities); + validate_memory_flags(q, create->memory, &create->flags); + create->index = q->num_buffers; + if (create->count == 0) + return ret != -EBUSY ? ret : 0; + + switch (f->type) { + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + requested_planes = f->fmt.pix_mp.num_planes; + if (requested_planes == 0 || + requested_planes > VIDEO_MAX_PLANES) + return -EINVAL; + for (i = 0; i < requested_planes; i++) + requested_sizes[i] = + f->fmt.pix_mp.plane_fmt[i].sizeimage; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + requested_sizes[0] = f->fmt.pix.sizeimage; + break; + case V4L2_BUF_TYPE_VBI_CAPTURE: + case V4L2_BUF_TYPE_VBI_OUTPUT: + requested_sizes[0] = f->fmt.vbi.samples_per_line * + (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]); + break; + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: + requested_sizes[0] = f->fmt.sliced.io_size; + break; + case V4L2_BUF_TYPE_SDR_CAPTURE: + case V4L2_BUF_TYPE_SDR_OUTPUT: + requested_sizes[0] = f->fmt.sdr.buffersize; + break; + case V4L2_BUF_TYPE_META_CAPTURE: + case V4L2_BUF_TYPE_META_OUTPUT: + requested_sizes[0] = f->fmt.meta.buffersize; + break; + default: + return -EINVAL; + } + for (i = 0; i < requested_planes; i++) + if (requested_sizes[i] == 0) + return -EINVAL; + return ret ? ret : vb2_core_create_bufs(q, create->memory, + create->flags, + &create->count, + requested_planes, + requested_sizes); +} +EXPORT_SYMBOL_GPL(vb2_create_bufs); + +int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, + struct v4l2_buffer *b) +{ + struct media_request *req = NULL; + int ret; + + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "file io in progress\n"); + return -EBUSY; + } + + ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req); + if (ret) + return ret; + ret = vb2_core_qbuf(q, b->index, b, req); + if (req) + media_request_put(req); + return ret; +} +EXPORT_SYMBOL_GPL(vb2_qbuf); + +int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) +{ + int ret; + + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "file io in progress\n"); + return -EBUSY; + } + + if (b->type != q->type) { + dprintk(q, 1, "invalid buffer type\n"); + return -EINVAL; + } + + ret = vb2_core_dqbuf(q, NULL, b, nonblocking); + + if (!q->is_output && + b->flags & V4L2_BUF_FLAG_DONE && + b->flags & V4L2_BUF_FLAG_LAST) + q->last_buffer_dequeued = true; + + /* + * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be + * cleared. + */ + b->flags &= ~V4L2_BUF_FLAG_DONE; + + return ret; +} +EXPORT_SYMBOL_GPL(vb2_dqbuf); + +int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) +{ + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "file io in progress\n"); + return -EBUSY; + } + return vb2_core_streamon(q, type); +} +EXPORT_SYMBOL_GPL(vb2_streamon); + +int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) +{ + if (vb2_fileio_is_active(q)) { + dprintk(q, 1, "file io in progress\n"); + return -EBUSY; + } + return vb2_core_streamoff(q, type); +} +EXPORT_SYMBOL_GPL(vb2_streamoff); + +int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) +{ + return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index, + eb->plane, eb->flags); +} +EXPORT_SYMBOL_GPL(vb2_expbuf); + +int vb2_queue_init_name(struct vb2_queue *q, const char *name) +{ + /* + * Sanity check + */ + if (WARN_ON(!q) || + WARN_ON(q->timestamp_flags & + ~(V4L2_BUF_FLAG_TIMESTAMP_MASK | + V4L2_BUF_FLAG_TSTAMP_SRC_MASK))) + return -EINVAL; + + /* Warn that the driver should choose an appropriate timestamp type */ + WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == + V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN); + + /* Warn that vb2_memory should match with v4l2_memory */ + if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP) + || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR) + || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF)) + return -EINVAL; + + if (q->buf_struct_size == 0) + q->buf_struct_size = sizeof(struct vb2_v4l2_buffer); + + q->buf_ops = &v4l2_buf_ops; + q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type); + q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); + q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) + == V4L2_BUF_FLAG_TIMESTAMP_COPY; + /* + * For compatibility with vb1: if QBUF hasn't been called yet, then + * return EPOLLERR as well. This only affects capture queues, output + * queues will always initialize waiting_for_buffers to false. + */ + q->quirk_poll_must_check_waiting_for_buffers = true; + + if (name) + strscpy(q->name, name, sizeof(q->name)); + else + q->name[0] = '\0'; + + return vb2_core_queue_init(q); +} +EXPORT_SYMBOL_GPL(vb2_queue_init_name); + +int vb2_queue_init(struct vb2_queue *q) +{ + return vb2_queue_init_name(q, NULL); +} +EXPORT_SYMBOL_GPL(vb2_queue_init); + +void vb2_queue_release(struct vb2_queue *q) +{ + vb2_core_queue_release(q); +} +EXPORT_SYMBOL_GPL(vb2_queue_release); + +int vb2_queue_change_type(struct vb2_queue *q, unsigned int type) +{ + if (type == q->type) + return 0; + + if (vb2_is_busy(q)) + return -EBUSY; + + q->type = type; + + return 0; +} +EXPORT_SYMBOL_GPL(vb2_queue_change_type); + +__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) +{ + struct video_device *vfd = video_devdata(file); + __poll_t res; + + res = vb2_core_poll(q, file, wait); + + if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { + struct v4l2_fh *fh = file->private_data; + + poll_wait(file, &fh->wait, wait); + if (v4l2_event_pending(fh)) + res |= EPOLLPRI; + } + + return res; +} +EXPORT_SYMBOL_GPL(vb2_poll); + +/* + * The following functions are not part of the vb2 core API, but are helper + * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations + * and struct vb2_ops. + * They contain boilerplate code that most if not all drivers have to do + * and so they simplify the driver code. + */ + +/* vb2 ioctl helpers */ + +int vb2_ioctl_reqbufs(struct file *file, void *priv, + struct v4l2_requestbuffers *p) +{ + struct video_device *vdev = video_devdata(file); + int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type); + u32 flags = p->flags; + + fill_buf_caps(vdev->queue, &p->capabilities); + validate_memory_flags(vdev->queue, p->memory, &flags); + p->flags = flags; + if (res) + return res; + if (vb2_queue_is_busy(vdev->queue, file)) + return -EBUSY; + res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count); + /* If count == 0, then the owner has released all buffers and he + is no longer owner of the queue. Otherwise we have a new owner. */ + if (res == 0) + vdev->queue->owner = p->count ? file->private_data : NULL; + return res; +} +EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs); + +int vb2_ioctl_create_bufs(struct file *file, void *priv, + struct v4l2_create_buffers *p) +{ + struct video_device *vdev = video_devdata(file); + int res = vb2_verify_memory_type(vdev->queue, p->memory, + p->format.type); + + p->index = vdev->queue->num_buffers; + fill_buf_caps(vdev->queue, &p->capabilities); + validate_memory_flags(vdev->queue, p->memory, &p->flags); + /* + * If count == 0, then just check if memory and type are valid. + * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0. + */ + if (p->count == 0) + return res != -EBUSY ? res : 0; + if (res) + return res; + if (vb2_queue_is_busy(vdev->queue, file)) + return -EBUSY; + + res = vb2_create_bufs(vdev->queue, p); + if (res == 0) + vdev->queue->owner = file->private_data; + return res; +} +EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs); + +int vb2_ioctl_prepare_buf(struct file *file, void *priv, + struct v4l2_buffer *p) +{ + struct video_device *vdev = video_devdata(file); + + if (vb2_queue_is_busy(vdev->queue, file)) + return -EBUSY; + return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p); +} +EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf); + +int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) +{ + struct video_device *vdev = video_devdata(file); + + /* No need to call vb2_queue_is_busy(), anyone can query buffers. */ + return vb2_querybuf(vdev->queue, p); +} +EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf); + +int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) +{ + struct video_device *vdev = video_devdata(file); + + if (vb2_queue_is_busy(vdev->queue, file)) + return -EBUSY; + return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p); +} +EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf); + +int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) +{ + struct video_device *vdev = video_devdata(file); + + if (vb2_queue_is_busy(vdev->queue, file)) + return -EBUSY; + return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK); +} +EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf); + +int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i) +{ + struct video_device *vdev = video_devdata(file); + + if (vb2_queue_is_busy(vdev->queue, file)) + return -EBUSY; + return vb2_streamon(vdev->queue, i); +} +EXPORT_SYMBOL_GPL(vb2_ioctl_streamon); + +int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) +{ + struct video_device *vdev = video_devdata(file); + + if (vb2_queue_is_busy(vdev->queue, file)) + return -EBUSY; + return vb2_streamoff(vdev->queue, i); +} +EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff); + +int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p) +{ + struct video_device *vdev = video_devdata(file); + + if (vb2_queue_is_busy(vdev->queue, file)) + return -EBUSY; + return vb2_expbuf(vdev->queue, p); +} +EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf); + +/* v4l2_file_operations helpers */ + +int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct video_device *vdev = video_devdata(file); + + return vb2_mmap(vdev->queue, vma); +} +EXPORT_SYMBOL_GPL(vb2_fop_mmap); + +int _vb2_fop_release(struct file *file, struct mutex *lock) +{ + struct video_device *vdev = video_devdata(file); + + if (lock) + mutex_lock(lock); + if (file->private_data == vdev->queue->owner) { + vb2_queue_release(vdev->queue); + vdev->queue->owner = NULL; + } + if (lock) + mutex_unlock(lock); + return v4l2_fh_release(file); +} +EXPORT_SYMBOL_GPL(_vb2_fop_release); + +int vb2_fop_release(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; + + return _vb2_fop_release(file, lock); +} +EXPORT_SYMBOL_GPL(vb2_fop_release); + +ssize_t vb2_fop_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct video_device *vdev = video_devdata(file); + struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; + int err = -EBUSY; + + if (!(vdev->queue->io_modes & VB2_WRITE)) + return -EINVAL; + if (lock && mutex_lock_interruptible(lock)) + return -ERESTARTSYS; + if (vb2_queue_is_busy(vdev->queue, file)) + goto exit; + err = vb2_write(vdev->queue, buf, count, ppos, + file->f_flags & O_NONBLOCK); + if (vdev->queue->fileio) + vdev->queue->owner = file->private_data; +exit: + if (lock) + mutex_unlock(lock); + return err; +} +EXPORT_SYMBOL_GPL(vb2_fop_write); + +ssize_t vb2_fop_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct video_device *vdev = video_devdata(file); + struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; + int err = -EBUSY; + + if (!(vdev->queue->io_modes & VB2_READ)) + return -EINVAL; + if (lock && mutex_lock_interruptible(lock)) + return -ERESTARTSYS; + if (vb2_queue_is_busy(vdev->queue, file)) + goto exit; + vdev->queue->owner = file->private_data; + err = vb2_read(vdev->queue, buf, count, ppos, + file->f_flags & O_NONBLOCK); + if (!vdev->queue->fileio) + vdev->queue->owner = NULL; +exit: + if (lock) + mutex_unlock(lock); + return err; +} +EXPORT_SYMBOL_GPL(vb2_fop_read); + +__poll_t vb2_fop_poll(struct file *file, poll_table *wait) +{ + struct video_device *vdev = video_devdata(file); + struct vb2_queue *q = vdev->queue; + struct mutex *lock = q->lock ? q->lock : vdev->lock; + __poll_t res; + void *fileio; + + /* + * If this helper doesn't know how to lock, then you shouldn't be using + * it but you should write your own. + */ + WARN_ON(!lock); + + if (lock && mutex_lock_interruptible(lock)) + return EPOLLERR; + + fileio = q->fileio; + + res = vb2_poll(vdev->queue, file, wait); + + /* If fileio was started, then we have a new queue owner. */ + if (!fileio && q->fileio) + q->owner = file->private_data; + if (lock) + mutex_unlock(lock); + return res; +} +EXPORT_SYMBOL_GPL(vb2_fop_poll); + +#ifndef CONFIG_MMU +unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct video_device *vdev = video_devdata(file); + + return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags); +} +EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area); +#endif + +void vb2_video_unregister_device(struct video_device *vdev) +{ + /* Check if vdev was ever registered at all */ + if (!vdev || !video_is_registered(vdev)) + return; + + /* + * Calling this function only makes sense if vdev->queue is set. + * If it is NULL, then just call video_unregister_device() instead. + */ + WARN_ON(!vdev->queue); + + /* + * Take a reference to the device since video_unregister_device() + * calls device_unregister(), but we don't want that to release + * the device since we want to clean up the queue first. + */ + get_device(&vdev->dev); + video_unregister_device(vdev); + if (vdev->queue && vdev->queue->owner) { + struct mutex *lock = vdev->queue->lock ? + vdev->queue->lock : vdev->lock; + + if (lock) + mutex_lock(lock); + vb2_queue_release(vdev->queue); + vdev->queue->owner = NULL; + if (lock) + mutex_unlock(lock); + } + /* + * Now we put the device, and in most cases this will release + * everything. + */ + put_device(&vdev->dev); +} +EXPORT_SYMBOL_GPL(vb2_video_unregister_device); + +/* vb2_ops helpers. Only use if vq->lock is non-NULL. */ + +void vb2_ops_wait_prepare(struct vb2_queue *vq) +{ + mutex_unlock(vq->lock); +} +EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare); + +void vb2_ops_wait_finish(struct vb2_queue *vq) +{ + mutex_lock(vq->lock); +} +EXPORT_SYMBOL_GPL(vb2_ops_wait_finish); + +/* + * Note that this function is called during validation time and + * thus the req_queue_mutex is held to ensure no request objects + * can be added or deleted while validating. So there is no need + * to protect the objects list. + */ +int vb2_request_validate(struct media_request *req) +{ + struct media_request_object *obj; + int ret = 0; + + if (!vb2_request_buffer_cnt(req)) + return -ENOENT; + + list_for_each_entry(obj, &req->objects, list) { + if (!obj->ops->prepare) + continue; + + ret = obj->ops->prepare(obj); + if (ret) + break; + } + + if (ret) { + list_for_each_entry_continue_reverse(obj, &req->objects, list) + if (obj->ops->unprepare) + obj->ops->unprepare(obj); + return ret; + } + return 0; +} +EXPORT_SYMBOL_GPL(vb2_request_validate); + +void vb2_request_queue(struct media_request *req) +{ + struct media_request_object *obj, *obj_safe; + + /* + * Queue all objects. Note that buffer objects are at the end of the + * objects list, after all other object types. Once buffer objects + * are queued, the driver might delete them immediately (if the driver + * processes the buffer at once), so we have to use + * list_for_each_entry_safe() to handle the case where the object we + * queue is deleted. + */ + list_for_each_entry_safe(obj, obj_safe, &req->objects, list) + if (obj->ops->queue) + obj->ops->queue(obj); +} +EXPORT_SYMBOL_GPL(vb2_request_queue); + +MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); +MODULE_AUTHOR("Pawel Osciak , Marek Szyprowski"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c new file mode 100644 index 0000000000..7c635e2921 --- /dev/null +++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c @@ -0,0 +1,443 @@ +/* + * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2 + * + * Copyright (C) 2010 Samsung Electronics + * + * Author: Pawel Osciak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +struct vb2_vmalloc_buf { + void *vaddr; + struct frame_vector *vec; + enum dma_data_direction dma_dir; + unsigned long size; + refcount_t refcount; + struct vb2_vmarea_handler handler; + struct dma_buf *dbuf; +}; + +static void vb2_vmalloc_put(void *buf_priv); + +static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev, + unsigned long size) +{ + struct vb2_vmalloc_buf *buf; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->size = size; + buf->vaddr = vmalloc_user(buf->size); + if (!buf->vaddr) { + pr_debug("vmalloc of size %ld failed\n", buf->size); + kfree(buf); + return ERR_PTR(-ENOMEM); + } + + buf->dma_dir = vb->vb2_queue->dma_dir; + buf->handler.refcount = &buf->refcount; + buf->handler.put = vb2_vmalloc_put; + buf->handler.arg = buf; + + refcount_set(&buf->refcount, 1); + return buf; +} + +static void vb2_vmalloc_put(void *buf_priv) +{ + struct vb2_vmalloc_buf *buf = buf_priv; + + if (refcount_dec_and_test(&buf->refcount)) { + vfree(buf->vaddr); + kfree(buf); + } +} + +static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev, + unsigned long vaddr, unsigned long size) +{ + struct vb2_vmalloc_buf *buf; + struct frame_vector *vec; + int n_pages, offset, i; + int ret = -ENOMEM; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->dma_dir = vb->vb2_queue->dma_dir; + offset = vaddr & ~PAGE_MASK; + buf->size = size; + vec = vb2_create_framevec(vaddr, size, + buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL); + if (IS_ERR(vec)) { + ret = PTR_ERR(vec); + goto fail_pfnvec_create; + } + buf->vec = vec; + n_pages = frame_vector_count(vec); + if (frame_vector_to_pages(vec) < 0) { + unsigned long *nums = frame_vector_pfns(vec); + + /* + * We cannot get page pointers for these pfns. Check memory is + * physically contiguous and use direct mapping. + */ + for (i = 1; i < n_pages; i++) + if (nums[i-1] + 1 != nums[i]) + goto fail_map; + buf->vaddr = (__force void *) + ioremap(__pfn_to_phys(nums[0]), size + offset); + } else { + buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1); + } + + if (!buf->vaddr) + goto fail_map; + buf->vaddr += offset; + return buf; + +fail_map: + vb2_destroy_framevec(vec); +fail_pfnvec_create: + kfree(buf); + + return ERR_PTR(ret); +} + +static void vb2_vmalloc_put_userptr(void *buf_priv) +{ + struct vb2_vmalloc_buf *buf = buf_priv; + unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; + unsigned int i; + struct page **pages; + unsigned int n_pages; + + if (!buf->vec->is_pfns) { + n_pages = frame_vector_count(buf->vec); + pages = frame_vector_pages(buf->vec); + if (vaddr) + vm_unmap_ram((void *)vaddr, n_pages); + if (buf->dma_dir == DMA_FROM_DEVICE || + buf->dma_dir == DMA_BIDIRECTIONAL) + for (i = 0; i < n_pages; i++) + set_page_dirty_lock(pages[i]); + } else { + iounmap((__force void __iomem *)buf->vaddr); + } + vb2_destroy_framevec(buf->vec); + kfree(buf); +} + +static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv) +{ + struct vb2_vmalloc_buf *buf = buf_priv; + + if (!buf->vaddr) { + pr_err("Address of an unallocated plane requested or cannot map user pointer\n"); + return NULL; + } + + return buf->vaddr; +} + +static unsigned int vb2_vmalloc_num_users(void *buf_priv) +{ + struct vb2_vmalloc_buf *buf = buf_priv; + return refcount_read(&buf->refcount); +} + +static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) +{ + struct vb2_vmalloc_buf *buf = buf_priv; + int ret; + + if (!buf) { + pr_err("No memory to map\n"); + return -EINVAL; + } + + ret = remap_vmalloc_range(vma, buf->vaddr, 0); + if (ret) { + pr_err("Remapping vmalloc memory, error: %d\n", ret); + return ret; + } + + /* + * Make sure that vm_areas for 2 buffers won't be merged together + */ + vm_flags_set(vma, VM_DONTEXPAND); + + /* + * Use common vm_area operations to track buffer refcount. + */ + vma->vm_private_data = &buf->handler; + vma->vm_ops = &vb2_common_vm_ops; + + vma->vm_ops->open(vma); + + return 0; +} + +#ifdef CONFIG_HAS_DMA +/*********************************************/ +/* DMABUF ops for exporters */ +/*********************************************/ + +struct vb2_vmalloc_attachment { + struct sg_table sgt; + enum dma_data_direction dma_dir; +}; + +static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, + struct dma_buf_attachment *dbuf_attach) +{ + struct vb2_vmalloc_attachment *attach; + struct vb2_vmalloc_buf *buf = dbuf->priv; + int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; + struct sg_table *sgt; + struct scatterlist *sg; + void *vaddr = buf->vaddr; + int ret; + int i; + + attach = kzalloc(sizeof(*attach), GFP_KERNEL); + if (!attach) + return -ENOMEM; + + sgt = &attach->sgt; + ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); + if (ret) { + kfree(attach); + return ret; + } + for_each_sgtable_sg(sgt, sg, i) { + struct page *page = vmalloc_to_page(vaddr); + + if (!page) { + sg_free_table(sgt); + kfree(attach); + return -ENOMEM; + } + sg_set_page(sg, page, PAGE_SIZE, 0); + vaddr += PAGE_SIZE; + } + + attach->dma_dir = DMA_NONE; + dbuf_attach->priv = attach; + return 0; +} + +static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, + struct dma_buf_attachment *db_attach) +{ + struct vb2_vmalloc_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + if (!attach) + return; + + sgt = &attach->sgt; + + /* release the scatterlist cache */ + if (attach->dma_dir != DMA_NONE) + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); + sg_free_table(sgt); + kfree(attach); + db_attach->priv = NULL; +} + +static struct sg_table *vb2_vmalloc_dmabuf_ops_map( + struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) +{ + struct vb2_vmalloc_attachment *attach = db_attach->priv; + struct sg_table *sgt; + + sgt = &attach->sgt; + /* return previously mapped sg table */ + if (attach->dma_dir == dma_dir) + return sgt; + + /* release any previous cache */ + if (attach->dma_dir != DMA_NONE) { + dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); + attach->dma_dir = DMA_NONE; + } + + /* mapping to the client with new direction */ + if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { + pr_err("failed to map scatterlist\n"); + return ERR_PTR(-EIO); + } + + attach->dma_dir = dma_dir; + + return sgt; +} + +static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, + struct sg_table *sgt, enum dma_data_direction dma_dir) +{ + /* nothing to be done here */ +} + +static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) +{ + /* drop reference obtained in vb2_vmalloc_get_dmabuf */ + vb2_vmalloc_put(dbuf->priv); +} + +static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, + struct iosys_map *map) +{ + struct vb2_vmalloc_buf *buf = dbuf->priv; + + iosys_map_set_vaddr(map, buf->vaddr); + + return 0; +} + +static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, + struct vm_area_struct *vma) +{ + return vb2_vmalloc_mmap(dbuf->priv, vma); +} + +static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { + .attach = vb2_vmalloc_dmabuf_ops_attach, + .detach = vb2_vmalloc_dmabuf_ops_detach, + .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, + .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, + .vmap = vb2_vmalloc_dmabuf_ops_vmap, + .mmap = vb2_vmalloc_dmabuf_ops_mmap, + .release = vb2_vmalloc_dmabuf_ops_release, +}; + +static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb, + void *buf_priv, + unsigned long flags) +{ + struct vb2_vmalloc_buf *buf = buf_priv; + struct dma_buf *dbuf; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &vb2_vmalloc_dmabuf_ops; + exp_info.size = buf->size; + exp_info.flags = flags; + exp_info.priv = buf; + + if (WARN_ON(!buf->vaddr)) + return NULL; + + dbuf = dma_buf_export(&exp_info); + if (IS_ERR(dbuf)) + return NULL; + + /* dmabuf keeps reference to vb2 buffer */ + refcount_inc(&buf->refcount); + + return dbuf; +} +#endif /* CONFIG_HAS_DMA */ + + +/*********************************************/ +/* callbacks for DMABUF buffers */ +/*********************************************/ + +static int vb2_vmalloc_map_dmabuf(void *mem_priv) +{ + struct vb2_vmalloc_buf *buf = mem_priv; + struct iosys_map map; + int ret; + + ret = dma_buf_vmap_unlocked(buf->dbuf, &map); + if (ret) + return -EFAULT; + buf->vaddr = map.vaddr; + + return 0; +} + +static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) +{ + struct vb2_vmalloc_buf *buf = mem_priv; + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); + + dma_buf_vunmap_unlocked(buf->dbuf, &map); + buf->vaddr = NULL; +} + +static void vb2_vmalloc_detach_dmabuf(void *mem_priv) +{ + struct vb2_vmalloc_buf *buf = mem_priv; + struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); + + if (buf->vaddr) + dma_buf_vunmap_unlocked(buf->dbuf, &map); + + kfree(buf); +} + +static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb, + struct device *dev, + struct dma_buf *dbuf, + unsigned long size) +{ + struct vb2_vmalloc_buf *buf; + + if (dbuf->size < size) + return ERR_PTR(-EFAULT); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + buf->dbuf = dbuf; + buf->dma_dir = vb->vb2_queue->dma_dir; + buf->size = size; + + return buf; +} + + +const struct vb2_mem_ops vb2_vmalloc_memops = { + .alloc = vb2_vmalloc_alloc, + .put = vb2_vmalloc_put, + .get_userptr = vb2_vmalloc_get_userptr, + .put_userptr = vb2_vmalloc_put_userptr, +#ifdef CONFIG_HAS_DMA + .get_dmabuf = vb2_vmalloc_get_dmabuf, +#endif + .map_dmabuf = vb2_vmalloc_map_dmabuf, + .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, + .attach_dmabuf = vb2_vmalloc_attach_dmabuf, + .detach_dmabuf = vb2_vmalloc_detach_dmabuf, + .vaddr = vb2_vmalloc_vaddr, + .mmap = vb2_vmalloc_mmap, + .num_users = vb2_vmalloc_num_users, +}; +EXPORT_SYMBOL_GPL(vb2_vmalloc_memops); + +MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2"); +MODULE_AUTHOR("Pawel Osciak "); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(DMA_BUF); -- cgit v1.2.3